Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Fri, 14 May 2021 14:07:58
Message-Id: 1621001254.55bd8ef17301314122ac11a224fa5d911554f951.alicef@gentoo
1 commit: 55bd8ef17301314122ac11a224fa5d911554f951
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Fri May 14 14:07:18 2021 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Fri May 14 14:07:34 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=55bd8ef1
7
8 Linux patch 5.10.37
9
10 Signed-off-by: Alice Ferrazzi <alicef <AT> gentoo.org>
11
12 0000_README | 4 +
13 1036_linux-5.10.37.patch | 21107 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 21111 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e9a544e..fc87a37 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -187,6 +187,10 @@ Patch: 1035_linux-5.10.36.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.36
23
24 +Patch: 1036_linux-5.10.37.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.37
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1036_linux-5.10.37.patch b/1036_linux-5.10.37.patch
33 new file mode 100644
34 index 0000000..981d7dd
35 --- /dev/null
36 +++ b/1036_linux-5.10.37.patch
37 @@ -0,0 +1,21107 @@
38 +diff --git a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
39 +index 06d5f251ec880..51f390e5c276c 100644
40 +--- a/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
41 ++++ b/Documentation/devicetree/bindings/serial/st,stm32-uart.yaml
42 +@@ -77,7 +77,8 @@ required:
43 + - interrupts
44 + - clocks
45 +
46 +-additionalProperties: false
47 ++additionalProperties:
48 ++ type: object
49 +
50 + examples:
51 + - |
52 +diff --git a/Documentation/driver-api/xilinx/eemi.rst b/Documentation/driver-api/xilinx/eemi.rst
53 +index 9dcbc6f18d75d..c1bc47b9000dc 100644
54 +--- a/Documentation/driver-api/xilinx/eemi.rst
55 ++++ b/Documentation/driver-api/xilinx/eemi.rst
56 +@@ -16,35 +16,8 @@ components running across different processing clusters on a chip or
57 + device to communicate with a power management controller (PMC) on a
58 + device to issue or respond to power management requests.
59 +
60 +-EEMI ops is a structure containing all eemi APIs supported by Zynq MPSoC.
61 +-The zynqmp-firmware driver maintain all EEMI APIs in zynqmp_eemi_ops
62 +-structure. Any driver who want to communicate with PMC using EEMI APIs
63 +-can call zynqmp_pm_get_eemi_ops().
64 +-
65 +-Example of EEMI ops::
66 +-
67 +- /* zynqmp-firmware driver maintain all EEMI APIs */
68 +- struct zynqmp_eemi_ops {
69 +- int (*get_api_version)(u32 *version);
70 +- int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out);
71 +- };
72 +-
73 +- static const struct zynqmp_eemi_ops eemi_ops = {
74 +- .get_api_version = zynqmp_pm_get_api_version,
75 +- .query_data = zynqmp_pm_query_data,
76 +- };
77 +-
78 +-Example of EEMI ops usage::
79 +-
80 +- static const struct zynqmp_eemi_ops *eemi_ops;
81 +- u32 ret_payload[PAYLOAD_ARG_CNT];
82 +- int ret;
83 +-
84 +- eemi_ops = zynqmp_pm_get_eemi_ops();
85 +- if (IS_ERR(eemi_ops))
86 +- return PTR_ERR(eemi_ops);
87 +-
88 +- ret = eemi_ops->query_data(qdata, ret_payload);
89 ++Any driver who wants to communicate with PMC using EEMI APIs use the
90 ++functions provided for each function.
91 +
92 + IOCTL
93 + ------
94 +diff --git a/Documentation/userspace-api/media/v4l/subdev-formats.rst b/Documentation/userspace-api/media/v4l/subdev-formats.rst
95 +index c9b7bb3ca089d..eff6727c69d30 100644
96 +--- a/Documentation/userspace-api/media/v4l/subdev-formats.rst
97 ++++ b/Documentation/userspace-api/media/v4l/subdev-formats.rst
98 +@@ -1567,8 +1567,8 @@ The following tables list existing packed RGB formats.
99 + - MEDIA_BUS_FMT_RGB101010_1X30
100 + - 0x1018
101 + -
102 +- - 0
103 +- - 0
104 ++ -
105 ++ -
106 + - r\ :sub:`9`
107 + - r\ :sub:`8`
108 + - r\ :sub:`7`
109 +diff --git a/Makefile b/Makefile
110 +index ece5b660dcb06..39f14ad009aef 100644
111 +--- a/Makefile
112 ++++ b/Makefile
113 +@@ -1,7 +1,7 @@
114 + # SPDX-License-Identifier: GPL-2.0
115 + VERSION = 5
116 + PATCHLEVEL = 10
117 +-SUBLEVEL = 36
118 ++SUBLEVEL = 37
119 + EXTRAVERSION =
120 + NAME = Dare mighty things
121 +
122 +diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
123 +index 21ae880c7530f..c76b0046b4028 100644
124 +--- a/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
125 ++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-rainier.dts
126 +@@ -707,9 +707,9 @@
127 + multi-master;
128 + status = "okay";
129 +
130 +- si7021-a20@20 {
131 ++ si7021-a20@40 {
132 + compatible = "silabs,si7020";
133 +- reg = <0x20>;
134 ++ reg = <0x40>;
135 + };
136 +
137 + tmp275@48 {
138 +diff --git a/arch/arm/boot/dts/exynos4210-i9100.dts b/arch/arm/boot/dts/exynos4210-i9100.dts
139 +index 5370ee477186c..7777bf51a6e64 100644
140 +--- a/arch/arm/boot/dts/exynos4210-i9100.dts
141 ++++ b/arch/arm/boot/dts/exynos4210-i9100.dts
142 +@@ -136,7 +136,7 @@
143 + compatible = "maxim,max17042";
144 +
145 + interrupt-parent = <&gpx2>;
146 +- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
147 ++ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
148 +
149 + pinctrl-0 = <&max17042_fuel_irq>;
150 + pinctrl-names = "default";
151 +diff --git a/arch/arm/boot/dts/exynos4412-midas.dtsi b/arch/arm/boot/dts/exynos4412-midas.dtsi
152 +index 7e7c243ff196a..06450066b1787 100644
153 +--- a/arch/arm/boot/dts/exynos4412-midas.dtsi
154 ++++ b/arch/arm/boot/dts/exynos4412-midas.dtsi
155 +@@ -174,7 +174,7 @@
156 + max77693@66 {
157 + compatible = "maxim,max77693";
158 + interrupt-parent = <&gpx1>;
159 +- interrupts = <5 IRQ_TYPE_EDGE_FALLING>;
160 ++ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
161 + pinctrl-names = "default";
162 + pinctrl-0 = <&max77693_irq>;
163 + reg = <0x66>;
164 +@@ -223,7 +223,7 @@
165 + max77693-fuel-gauge@36 {
166 + compatible = "maxim,max17047";
167 + interrupt-parent = <&gpx2>;
168 +- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
169 ++ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
170 + pinctrl-names = "default";
171 + pinctrl-0 = <&max77693_fuel_irq>;
172 + reg = <0x36>;
173 +@@ -668,7 +668,7 @@
174 + max77686: max77686_pmic@9 {
175 + compatible = "maxim,max77686";
176 + interrupt-parent = <&gpx0>;
177 +- interrupts = <7 IRQ_TYPE_NONE>;
178 ++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
179 + pinctrl-0 = <&max77686_irq>;
180 + pinctrl-names = "default";
181 + reg = <0x09>;
182 +diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
183 +index 2983e91bc7dde..869d80be1b36e 100644
184 +--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
185 ++++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
186 +@@ -279,7 +279,7 @@
187 + max77686: pmic@9 {
188 + compatible = "maxim,max77686";
189 + interrupt-parent = <&gpx3>;
190 +- interrupts = <2 IRQ_TYPE_NONE>;
191 ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
192 + pinctrl-names = "default";
193 + pinctrl-0 = <&max77686_irq>;
194 + reg = <0x09>;
195 +diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
196 +index 186790f39e4d3..d0e48c10aec2b 100644
197 +--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
198 ++++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
199 +@@ -134,7 +134,7 @@
200 + compatible = "maxim,max77686";
201 + reg = <0x09>;
202 + interrupt-parent = <&gpx3>;
203 +- interrupts = <2 IRQ_TYPE_NONE>;
204 ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
205 + pinctrl-names = "default";
206 + pinctrl-0 = <&max77686_irq>;
207 + #clock-cells = <1>;
208 +diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
209 +index c952a615148e5..737f0e20a4525 100644
210 +--- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi
211 ++++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi
212 +@@ -292,7 +292,7 @@
213 + max77686: max77686@9 {
214 + compatible = "maxim,max77686";
215 + interrupt-parent = <&gpx3>;
216 +- interrupts = <2 IRQ_TYPE_NONE>;
217 ++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
218 + pinctrl-names = "default";
219 + pinctrl-0 = <&max77686_irq>;
220 + wakeup-source;
221 +diff --git a/arch/arm/boot/dts/r8a7790-lager.dts b/arch/arm/boot/dts/r8a7790-lager.dts
222 +index 09a152b915575..1d6f0c5d02e9a 100644
223 +--- a/arch/arm/boot/dts/r8a7790-lager.dts
224 ++++ b/arch/arm/boot/dts/r8a7790-lager.dts
225 +@@ -53,6 +53,9 @@
226 + i2c11 = &i2cexio1;
227 + i2c12 = &i2chdmi;
228 + i2c13 = &i2cpwr;
229 ++ mmc0 = &mmcif1;
230 ++ mmc1 = &sdhi0;
231 ++ mmc2 = &sdhi2;
232 + };
233 +
234 + chosen {
235 +diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
236 +index f603cba5441fc..6af1727b82690 100644
237 +--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
238 ++++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
239 +@@ -53,6 +53,9 @@
240 + i2c12 = &i2cexio1;
241 + i2c13 = &i2chdmi;
242 + i2c14 = &i2cexio4;
243 ++ mmc0 = &sdhi0;
244 ++ mmc1 = &sdhi1;
245 ++ mmc2 = &sdhi2;
246 + };
247 +
248 + chosen {
249 +diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
250 +index c6d563fb7ec7c..bf51e29c793a3 100644
251 +--- a/arch/arm/boot/dts/r8a7791-porter.dts
252 ++++ b/arch/arm/boot/dts/r8a7791-porter.dts
253 +@@ -28,6 +28,8 @@
254 + serial0 = &scif0;
255 + i2c9 = &gpioi2c2;
256 + i2c10 = &i2chdmi;
257 ++ mmc0 = &sdhi0;
258 ++ mmc1 = &sdhi2;
259 + };
260 +
261 + chosen {
262 +diff --git a/arch/arm/boot/dts/r8a7793-gose.dts b/arch/arm/boot/dts/r8a7793-gose.dts
263 +index abf487e8fe0f3..2b59a04913500 100644
264 +--- a/arch/arm/boot/dts/r8a7793-gose.dts
265 ++++ b/arch/arm/boot/dts/r8a7793-gose.dts
266 +@@ -49,6 +49,9 @@
267 + i2c10 = &gpioi2c4;
268 + i2c11 = &i2chdmi;
269 + i2c12 = &i2cexio4;
270 ++ mmc0 = &sdhi0;
271 ++ mmc1 = &sdhi1;
272 ++ mmc2 = &sdhi2;
273 + };
274 +
275 + chosen {
276 +diff --git a/arch/arm/boot/dts/r8a7794-alt.dts b/arch/arm/boot/dts/r8a7794-alt.dts
277 +index 3f1cc5bbf3297..32025986b3b9b 100644
278 +--- a/arch/arm/boot/dts/r8a7794-alt.dts
279 ++++ b/arch/arm/boot/dts/r8a7794-alt.dts
280 +@@ -19,6 +19,9 @@
281 + i2c10 = &gpioi2c4;
282 + i2c11 = &i2chdmi;
283 + i2c12 = &i2cexio4;
284 ++ mmc0 = &mmcif0;
285 ++ mmc1 = &sdhi0;
286 ++ mmc2 = &sdhi1;
287 + };
288 +
289 + chosen {
290 +diff --git a/arch/arm/boot/dts/r8a7794-silk.dts b/arch/arm/boot/dts/r8a7794-silk.dts
291 +index 677596f6c9c9a..af066ee5e2754 100644
292 +--- a/arch/arm/boot/dts/r8a7794-silk.dts
293 ++++ b/arch/arm/boot/dts/r8a7794-silk.dts
294 +@@ -31,6 +31,8 @@
295 + serial0 = &scif2;
296 + i2c9 = &gpioi2c1;
297 + i2c10 = &i2chdmi;
298 ++ mmc0 = &mmcif0;
299 ++ mmc1 = &sdhi1;
300 + };
301 +
302 + chosen {
303 +diff --git a/arch/arm/boot/dts/s5pv210-fascinate4g.dts b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
304 +index ca064359dd308..b47d8300e536e 100644
305 +--- a/arch/arm/boot/dts/s5pv210-fascinate4g.dts
306 ++++ b/arch/arm/boot/dts/s5pv210-fascinate4g.dts
307 +@@ -115,7 +115,7 @@
308 + compatible = "maxim,max77836-battery";
309 +
310 + interrupt-parent = <&gph3>;
311 +- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
312 ++ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
313 +
314 + pinctrl-names = "default";
315 + pinctrl-0 = <&fg_irq>;
316 +diff --git a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
317 +index d84686e003709..dee4d32ab32c4 100644
318 +--- a/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
319 ++++ b/arch/arm/boot/dts/stm32mp15-pinctrl.dtsi
320 +@@ -1806,10 +1806,15 @@
321 + usart2_idle_pins_c: usart2-idle-2 {
322 + pins1 {
323 + pinmux = <STM32_PINMUX('D', 5, ANALOG)>, /* USART2_TX */
324 +- <STM32_PINMUX('D', 4, ANALOG)>, /* USART2_RTS */
325 + <STM32_PINMUX('D', 3, ANALOG)>; /* USART2_CTS_NSS */
326 + };
327 + pins2 {
328 ++ pinmux = <STM32_PINMUX('D', 4, AF7)>; /* USART2_RTS */
329 ++ bias-disable;
330 ++ drive-push-pull;
331 ++ slew-rate = <3>;
332 ++ };
333 ++ pins3 {
334 + pinmux = <STM32_PINMUX('D', 6, AF7)>; /* USART2_RX */
335 + bias-disable;
336 + };
337 +@@ -1855,10 +1860,15 @@
338 + usart3_idle_pins_b: usart3-idle-1 {
339 + pins1 {
340 + pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
341 +- <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
342 + <STM32_PINMUX('I', 10, ANALOG)>; /* USART3_CTS_NSS */
343 + };
344 + pins2 {
345 ++ pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
346 ++ bias-disable;
347 ++ drive-push-pull;
348 ++ slew-rate = <0>;
349 ++ };
350 ++ pins3 {
351 + pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
352 + bias-disable;
353 + };
354 +@@ -1891,10 +1901,15 @@
355 + usart3_idle_pins_c: usart3-idle-2 {
356 + pins1 {
357 + pinmux = <STM32_PINMUX('B', 10, ANALOG)>, /* USART3_TX */
358 +- <STM32_PINMUX('G', 8, ANALOG)>, /* USART3_RTS */
359 + <STM32_PINMUX('B', 13, ANALOG)>; /* USART3_CTS_NSS */
360 + };
361 + pins2 {
362 ++ pinmux = <STM32_PINMUX('G', 8, AF8)>; /* USART3_RTS */
363 ++ bias-disable;
364 ++ drive-push-pull;
365 ++ slew-rate = <0>;
366 ++ };
367 ++ pins3 {
368 + pinmux = <STM32_PINMUX('B', 12, AF8)>; /* USART3_RX */
369 + bias-disable;
370 + };
371 +diff --git a/arch/arm/boot/dts/uniphier-pxs2.dtsi b/arch/arm/boot/dts/uniphier-pxs2.dtsi
372 +index b0b15c97306b8..e81e5937a60ae 100644
373 +--- a/arch/arm/boot/dts/uniphier-pxs2.dtsi
374 ++++ b/arch/arm/boot/dts/uniphier-pxs2.dtsi
375 +@@ -583,7 +583,7 @@
376 + clocks = <&sys_clk 6>;
377 + reset-names = "ether";
378 + resets = <&sys_rst 6>;
379 +- phy-mode = "rgmii";
380 ++ phy-mode = "rgmii-id";
381 + local-mac-address = [00 00 00 00 00 00];
382 + socionext,syscon-phy-mode = <&soc_glue 0>;
383 +
384 +diff --git a/arch/arm/crypto/poly1305-glue.c b/arch/arm/crypto/poly1305-glue.c
385 +index 3023c1acfa194..c31bd8f7c0927 100644
386 +--- a/arch/arm/crypto/poly1305-glue.c
387 ++++ b/arch/arm/crypto/poly1305-glue.c
388 +@@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit)
389 +
390 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
391 +
392 +-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
393 ++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
394 + {
395 + poly1305_init_arm(&dctx->h, key);
396 + dctx->s[0] = get_unaligned_le32(key + 16);
397 +diff --git a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
398 +index 29d8cf6df46bf..99c2d6fd6304a 100644
399 +--- a/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
400 ++++ b/arch/arm64/boot/dts/mediatek/pumpkin-common.dtsi
401 +@@ -56,7 +56,7 @@
402 + tca6416: gpio@20 {
403 + compatible = "ti,tca6416";
404 + reg = <0x20>;
405 +- reset-gpios = <&pio 65 GPIO_ACTIVE_HIGH>;
406 ++ reset-gpios = <&pio 65 GPIO_ACTIVE_LOW>;
407 + pinctrl-names = "default";
408 + pinctrl-0 = <&tca6416_pins>;
409 +
410 +diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
411 +index c4ac6f5dc008d..96d36b38f2696 100644
412 +--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
413 ++++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
414 +@@ -1015,7 +1015,7 @@
415 + left_spkr: wsa8810-left{
416 + compatible = "sdw10217201000";
417 + reg = <0 1>;
418 +- powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
419 ++ powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
420 + #thermal-sensor-cells = <0>;
421 + sound-name-prefix = "SpkrLeft";
422 + #sound-dai-cells = <0>;
423 +@@ -1023,7 +1023,7 @@
424 +
425 + right_spkr: wsa8810-right{
426 + compatible = "sdw10217201000";
427 +- powerdown-gpios = <&wcdgpio 2 GPIO_ACTIVE_HIGH>;
428 ++ powerdown-gpios = <&wcdgpio 1 GPIO_ACTIVE_HIGH>;
429 + reg = <0 2>;
430 + #thermal-sensor-cells = <0>;
431 + sound-name-prefix = "SpkrRight";
432 +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
433 +index f97f354af86f4..ea6e3a11e641b 100644
434 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
435 ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
436 +@@ -2192,7 +2192,7 @@
437 + #gpio-cells = <2>;
438 + interrupt-controller;
439 + #interrupt-cells = <2>;
440 +- gpio-ranges = <&tlmm 0 0 150>;
441 ++ gpio-ranges = <&tlmm 0 0 151>;
442 + wakeup-parent = <&pdc_intc>;
443 +
444 + cci0_default: cci0-default {
445 +diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi
446 +index f0a872e02686d..1aec54590a11a 100644
447 +--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi
448 ++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi
449 +@@ -748,7 +748,7 @@
450 + <0x0 0x03D00000 0x0 0x300000>;
451 + reg-names = "west", "east", "north", "south";
452 + interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
453 +- gpio-ranges = <&tlmm 0 0 175>;
454 ++ gpio-ranges = <&tlmm 0 0 176>;
455 + gpio-controller;
456 + #gpio-cells = <2>;
457 + interrupt-controller;
458 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
459 +index d057d85a19fb2..d4547a192748b 100644
460 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
461 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
462 +@@ -216,7 +216,7 @@
463 +
464 + pmu {
465 + compatible = "arm,armv8-pmuv3";
466 +- interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
467 ++ interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
468 + };
469 +
470 + psci {
471 +@@ -1555,7 +1555,7 @@
472 + #gpio-cells = <2>;
473 + interrupt-controller;
474 + #interrupt-cells = <2>;
475 +- gpio-ranges = <&tlmm 0 0 180>;
476 ++ gpio-ranges = <&tlmm 0 0 181>;
477 + wakeup-parent = <&pdc>;
478 +
479 + qup_i2c0_default: qup-i2c0-default {
480 +@@ -2379,7 +2379,7 @@
481 + (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
482 + <GIC_PPI 11
483 + (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
484 +- <GIC_PPI 12
485 ++ <GIC_PPI 10
486 + (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
487 + };
488 +
489 +diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
490 +index 2eda9f66ae81d..e8bf6f0c4c400 100644
491 +--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
492 ++++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
493 +@@ -12,6 +12,9 @@
494 + aliases {
495 + serial0 = &scif2;
496 + serial1 = &hscif0;
497 ++ mmc0 = &sdhi3;
498 ++ mmc1 = &sdhi0;
499 ++ mmc2 = &sdhi2;
500 + };
501 +
502 + chosen {
503 +diff --git a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
504 +index 2c5b057c30c62..ad26f5bf0648d 100644
505 +--- a/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
506 ++++ b/arch/arm64/boot/dts/renesas/r8a774a1-beacon-rzg2m-kit.dts
507 +@@ -21,6 +21,9 @@
508 + serial4 = &hscif2;
509 + serial5 = &scif5;
510 + ethernet0 = &avb;
511 ++ mmc0 = &sdhi3;
512 ++ mmc1 = &sdhi0;
513 ++ mmc2 = &sdhi2;
514 + };
515 +
516 + chosen {
517 +diff --git a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
518 +index 26aee004a44e2..c4b50a5e3d92c 100644
519 +--- a/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
520 ++++ b/arch/arm64/boot/dts/renesas/r8a774c0-cat874.dts
521 +@@ -17,6 +17,8 @@
522 + aliases {
523 + serial0 = &scif2;
524 + serial1 = &hscif2;
525 ++ mmc0 = &sdhi0;
526 ++ mmc1 = &sdhi3;
527 + };
528 +
529 + chosen {
530 +diff --git a/arch/arm64/boot/dts/renesas/r8a77980.dtsi b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
531 +index d6cae90d7fd9e..e6ef837c4a3b3 100644
532 +--- a/arch/arm64/boot/dts/renesas/r8a77980.dtsi
533 ++++ b/arch/arm64/boot/dts/renesas/r8a77980.dtsi
534 +@@ -990,8 +990,8 @@
535 +
536 + reg = <1>;
537 +
538 +- vin4csi41: endpoint@2 {
539 +- reg = <2>;
540 ++ vin4csi41: endpoint@3 {
541 ++ reg = <3>;
542 + remote-endpoint = <&csi41vin4>;
543 + };
544 + };
545 +@@ -1018,8 +1018,8 @@
546 +
547 + reg = <1>;
548 +
549 +- vin5csi41: endpoint@2 {
550 +- reg = <2>;
551 ++ vin5csi41: endpoint@3 {
552 ++ reg = <3>;
553 + remote-endpoint = <&csi41vin5>;
554 + };
555 + };
556 +@@ -1046,8 +1046,8 @@
557 +
558 + reg = <1>;
559 +
560 +- vin6csi41: endpoint@2 {
561 +- reg = <2>;
562 ++ vin6csi41: endpoint@3 {
563 ++ reg = <3>;
564 + remote-endpoint = <&csi41vin6>;
565 + };
566 + };
567 +@@ -1074,8 +1074,8 @@
568 +
569 + reg = <1>;
570 +
571 +- vin7csi41: endpoint@2 {
572 +- reg = <2>;
573 ++ vin7csi41: endpoint@3 {
574 ++ reg = <3>;
575 + remote-endpoint = <&csi41vin7>;
576 + };
577 + };
578 +diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
579 +index e0ccca2222d2d..b9e3b6762ff42 100644
580 +--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
581 ++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
582 +@@ -16,6 +16,9 @@
583 + aliases {
584 + serial0 = &scif2;
585 + ethernet0 = &avb;
586 ++ mmc0 = &sdhi3;
587 ++ mmc1 = &sdhi0;
588 ++ mmc2 = &sdhi1;
589 + };
590 +
591 + chosen {
592 +diff --git a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
593 +index 6cf77ce9aa937..86ec32a919d29 100644
594 +--- a/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
595 ++++ b/arch/arm64/boot/dts/renesas/r8a779a0.dtsi
596 +@@ -50,10 +50,7 @@
597 +
598 + pmu_a76 {
599 + compatible = "arm,cortex-a76-pmu";
600 +- interrupts-extended = <&gic GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
601 +- <&gic GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
602 +- <&gic GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
603 +- <&gic GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
604 ++ interrupts-extended = <&gic GIC_PPI 7 IRQ_TYPE_LEVEL_LOW>;
605 + };
606 +
607 + /* External SCIF clock - to be overridden by boards that provide it */
608 +diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
609 +index 1bf77957d2c21..08b8525bb7257 100644
610 +--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi
611 ++++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi
612 +@@ -36,6 +36,9 @@
613 + serial0 = &scif2;
614 + serial1 = &hscif1;
615 + ethernet0 = &avb;
616 ++ mmc0 = &sdhi2;
617 ++ mmc1 = &sdhi0;
618 ++ mmc2 = &sdhi3;
619 + };
620 +
621 + chosen {
622 +diff --git a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
623 +index 202177706cdeb..05e64bfad0235 100644
624 +--- a/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
625 ++++ b/arch/arm64/boot/dts/renesas/ulcb-kf.dtsi
626 +@@ -16,6 +16,7 @@
627 + aliases {
628 + serial1 = &hscif0;
629 + serial2 = &scif1;
630 ++ mmc2 = &sdhi3;
631 + };
632 +
633 + clksndsel: clksndsel {
634 +diff --git a/arch/arm64/boot/dts/renesas/ulcb.dtsi b/arch/arm64/boot/dts/renesas/ulcb.dtsi
635 +index a2e085db87c53..e11521b4b9ca4 100644
636 +--- a/arch/arm64/boot/dts/renesas/ulcb.dtsi
637 ++++ b/arch/arm64/boot/dts/renesas/ulcb.dtsi
638 +@@ -23,6 +23,8 @@
639 + aliases {
640 + serial0 = &scif2;
641 + ethernet0 = &avb;
642 ++ mmc0 = &sdhi2;
643 ++ mmc1 = &sdhi0;
644 + };
645 +
646 + chosen {
647 +diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
648 +index a87b8a6787196..8f2c1c1e2c64e 100644
649 +--- a/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
650 ++++ b/arch/arm64/boot/dts/socionext/uniphier-ld20.dtsi
651 +@@ -734,7 +734,7 @@
652 + clocks = <&sys_clk 6>;
653 + reset-names = "ether";
654 + resets = <&sys_rst 6>;
655 +- phy-mode = "rgmii";
656 ++ phy-mode = "rgmii-id";
657 + local-mac-address = [00 00 00 00 00 00];
658 + socionext,syscon-phy-mode = <&soc_glue 0>;
659 +
660 +diff --git a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
661 +index 0e52dadf54b3a..be97da1322580 100644
662 +--- a/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
663 ++++ b/arch/arm64/boot/dts/socionext/uniphier-pxs3.dtsi
664 +@@ -564,7 +564,7 @@
665 + clocks = <&sys_clk 6>;
666 + reset-names = "ether";
667 + resets = <&sys_rst 6>;
668 +- phy-mode = "rgmii";
669 ++ phy-mode = "rgmii-id";
670 + local-mac-address = [00 00 00 00 00 00];
671 + socionext,syscon-phy-mode = <&soc_glue 0>;
672 +
673 +@@ -585,7 +585,7 @@
674 + clocks = <&sys_clk 7>;
675 + reset-names = "ether";
676 + resets = <&sys_rst 7>;
677 +- phy-mode = "rgmii";
678 ++ phy-mode = "rgmii-id";
679 + local-mac-address = [00 00 00 00 00 00];
680 + socionext,syscon-phy-mode = <&soc_glue 1>;
681 +
682 +diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c
683 +index f33ada70c4ed8..01e22fe408235 100644
684 +--- a/arch/arm64/crypto/poly1305-glue.c
685 ++++ b/arch/arm64/crypto/poly1305-glue.c
686 +@@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce);
687 +
688 + static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon);
689 +
690 +-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
691 ++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
692 + {
693 + poly1305_init_arm64(&dctx->h, key);
694 + dctx->s[0] = get_unaligned_le32(key + 16);
695 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
696 +index cc060c41adaab..912b83e784bbf 100644
697 +--- a/arch/arm64/include/asm/kvm_host.h
698 ++++ b/arch/arm64/include/asm/kvm_host.h
699 +@@ -601,6 +601,7 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
700 + static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
701 +
702 + void kvm_arm_init_debug(void);
703 ++void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
704 + void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
705 + void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
706 + void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
707 +diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
708 +index a884d77739895..fce8cbecd6bc7 100644
709 +--- a/arch/arm64/include/asm/processor.h
710 ++++ b/arch/arm64/include/asm/processor.h
711 +@@ -96,8 +96,7 @@
712 + #endif /* CONFIG_ARM64_FORCE_52BIT */
713 +
714 + extern phys_addr_t arm64_dma_phys_limit;
715 +-extern phys_addr_t arm64_dma32_phys_limit;
716 +-#define ARCH_LOW_ADDRESS_LIMIT ((arm64_dma_phys_limit ? : arm64_dma32_phys_limit) - 1)
717 ++#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
718 +
719 + struct debug_info {
720 + #ifdef CONFIG_HAVE_HW_BREAKPOINT
721 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
722 +index a1c2c955474e9..5e5dd99e8cee8 100644
723 +--- a/arch/arm64/kvm/arm.c
724 ++++ b/arch/arm64/kvm/arm.c
725 +@@ -547,6 +547,8 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
726 +
727 + vcpu->arch.has_run_once = true;
728 +
729 ++ kvm_arm_vcpu_init_debug(vcpu);
730 ++
731 + if (likely(irqchip_in_kernel(kvm))) {
732 + /*
733 + * Map the VGIC hardware resources before running a vcpu the
734 +diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
735 +index dbc8905116311..2484b2cca74bc 100644
736 +--- a/arch/arm64/kvm/debug.c
737 ++++ b/arch/arm64/kvm/debug.c
738 +@@ -68,6 +68,64 @@ void kvm_arm_init_debug(void)
739 + __this_cpu_write(mdcr_el2, kvm_call_hyp_ret(__kvm_get_mdcr_el2));
740 + }
741 +
742 ++/**
743 ++ * kvm_arm_setup_mdcr_el2 - configure vcpu mdcr_el2 value
744 ++ *
745 ++ * @vcpu: the vcpu pointer
746 ++ *
747 ++ * This ensures we will trap access to:
748 ++ * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
749 ++ * - Debug ROM Address (MDCR_EL2_TDRA)
750 ++ * - OS related registers (MDCR_EL2_TDOSA)
751 ++ * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
752 ++ * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
753 ++ */
754 ++static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu)
755 ++{
756 ++ /*
757 ++ * This also clears MDCR_EL2_E2PB_MASK to disable guest access
758 ++ * to the profiling buffer.
759 ++ */
760 ++ vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
761 ++ vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
762 ++ MDCR_EL2_TPMS |
763 ++ MDCR_EL2_TTRF |
764 ++ MDCR_EL2_TPMCR |
765 ++ MDCR_EL2_TDRA |
766 ++ MDCR_EL2_TDOSA);
767 ++
768 ++ /* Is the VM being debugged by userspace? */
769 ++ if (vcpu->guest_debug)
770 ++ /* Route all software debug exceptions to EL2 */
771 ++ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
772 ++
773 ++ /*
774 ++ * Trap debug register access when one of the following is true:
775 ++ * - Userspace is using the hardware to debug the guest
776 ++ * (KVM_GUESTDBG_USE_HW is set).
777 ++ * - The guest is not using debug (KVM_ARM64_DEBUG_DIRTY is clear).
778 ++ */
779 ++ if ((vcpu->guest_debug & KVM_GUESTDBG_USE_HW) ||
780 ++ !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
781 ++ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
782 ++
783 ++ trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
784 ++}
785 ++
786 ++/**
787 ++ * kvm_arm_vcpu_init_debug - setup vcpu debug traps
788 ++ *
789 ++ * @vcpu: the vcpu pointer
790 ++ *
791 ++ * Set vcpu initial mdcr_el2 value.
792 ++ */
793 ++void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu)
794 ++{
795 ++ preempt_disable();
796 ++ kvm_arm_setup_mdcr_el2(vcpu);
797 ++ preempt_enable();
798 ++}
799 ++
800 + /**
801 + * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
802 + */
803 +@@ -83,13 +141,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
804 + * @vcpu: the vcpu pointer
805 + *
806 + * This is called before each entry into the hypervisor to setup any
807 +- * debug related registers. Currently this just ensures we will trap
808 +- * access to:
809 +- * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
810 +- * - Debug ROM Address (MDCR_EL2_TDRA)
811 +- * - OS related registers (MDCR_EL2_TDOSA)
812 +- * - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
813 +- * - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
814 ++ * debug related registers.
815 + *
816 + * Additionally, KVM only traps guest accesses to the debug registers if
817 + * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
818 +@@ -101,28 +153,14 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
819 +
820 + void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
821 + {
822 +- bool trap_debug = !(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY);
823 + unsigned long mdscr, orig_mdcr_el2 = vcpu->arch.mdcr_el2;
824 +
825 + trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
826 +
827 +- /*
828 +- * This also clears MDCR_EL2_E2PB_MASK to disable guest access
829 +- * to the profiling buffer.
830 +- */
831 +- vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
832 +- vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
833 +- MDCR_EL2_TPMS |
834 +- MDCR_EL2_TTRF |
835 +- MDCR_EL2_TPMCR |
836 +- MDCR_EL2_TDRA |
837 +- MDCR_EL2_TDOSA);
838 ++ kvm_arm_setup_mdcr_el2(vcpu);
839 +
840 + /* Is Guest debugging in effect? */
841 + if (vcpu->guest_debug) {
842 +- /* Route all software debug exceptions to EL2 */
843 +- vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
844 +-
845 + /* Save guest debug state */
846 + save_guest_debug_regs(vcpu);
847 +
848 +@@ -176,7 +214,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
849 +
850 + vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
851 + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
852 +- trap_debug = true;
853 +
854 + trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
855 + &vcpu->arch.debug_ptr->dbg_bcr[0],
856 +@@ -191,10 +228,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
857 + BUG_ON(!vcpu->guest_debug &&
858 + vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
859 +
860 +- /* Trap debug register access */
861 +- if (trap_debug)
862 +- vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
863 +-
864 + /* If KDE or MDE are set, perform a full save/restore cycle. */
865 + if (vcpu_read_sys_reg(vcpu, MDSCR_EL1) & (DBG_MDSCR_KDE | DBG_MDSCR_MDE))
866 + vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
867 +@@ -203,7 +236,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
868 + if (has_vhe() && orig_mdcr_el2 != vcpu->arch.mdcr_el2)
869 + write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
870 +
871 +- trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
872 + trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_read_sys_reg(vcpu, MDSCR_EL1));
873 + }
874 +
875 +diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
876 +index e911eea36eb0e..53a127d3e460b 100644
877 +--- a/arch/arm64/kvm/reset.c
878 ++++ b/arch/arm64/kvm/reset.c
879 +@@ -291,6 +291,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
880 +
881 + /* Reset core registers */
882 + memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
883 ++ memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
884 ++ vcpu->arch.ctxt.spsr_abt = 0;
885 ++ vcpu->arch.ctxt.spsr_und = 0;
886 ++ vcpu->arch.ctxt.spsr_irq = 0;
887 ++ vcpu->arch.ctxt.spsr_fiq = 0;
888 + vcpu_gp_regs(vcpu)->pstate = pstate;
889 +
890 + /* Reset system registers */
891 +diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
892 +index 44419679f91ad..7740995de982e 100644
893 +--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
894 ++++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
895 +@@ -87,8 +87,8 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
896 + r = vgic_v3_set_redist_base(kvm, 0, *addr, 0);
897 + goto out;
898 + }
899 +- rdreg = list_first_entry(&vgic->rd_regions,
900 +- struct vgic_redist_region, list);
901 ++ rdreg = list_first_entry_or_null(&vgic->rd_regions,
902 ++ struct vgic_redist_region, list);
903 + if (!rdreg)
904 + addr_ptr = &undef_value;
905 + else
906 +@@ -226,6 +226,9 @@ static int vgic_get_common_attr(struct kvm_device *dev,
907 + u64 addr;
908 + unsigned long type = (unsigned long)attr->attr;
909 +
910 ++ if (copy_from_user(&addr, uaddr, sizeof(addr)))
911 ++ return -EFAULT;
912 ++
913 + r = kvm_vgic_addr(dev->kvm, type, &addr, false);
914 + if (r)
915 + return (r == -ENODEV) ? -ENXIO : r;
916 +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
917 +index 916e0547fdccf..a985d292e8203 100644
918 +--- a/arch/arm64/mm/init.c
919 ++++ b/arch/arm64/mm/init.c
920 +@@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
921 + EXPORT_SYMBOL(memstart_addr);
922 +
923 + /*
924 +- * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
925 +- * memory as some devices, namely the Raspberry Pi 4, have peripherals with
926 +- * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
927 +- * bit addressable memory area.
928 ++ * If the corresponding config options are enabled, we create both ZONE_DMA
929 ++ * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
930 ++ * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
931 ++ * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
932 ++ * otherwise it is empty.
933 + */
934 + phys_addr_t arm64_dma_phys_limit __ro_after_init;
935 +-phys_addr_t arm64_dma32_phys_limit __ro_after_init;
936 +
937 + #ifdef CONFIG_KEXEC_CORE
938 + /*
939 +@@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
940 +
941 + if (crash_base == 0) {
942 + /* Current arm64 boot protocol requires 2MB alignment */
943 +- crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
944 ++ crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
945 + crash_size, SZ_2M);
946 + if (crash_base == 0) {
947 + pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
948 +@@ -189,6 +189,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
949 + unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
950 + unsigned int __maybe_unused acpi_zone_dma_bits;
951 + unsigned int __maybe_unused dt_zone_dma_bits;
952 ++ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
953 +
954 + #ifdef CONFIG_ZONE_DMA
955 + acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
956 +@@ -198,8 +199,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
957 + max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
958 + #endif
959 + #ifdef CONFIG_ZONE_DMA32
960 +- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
961 ++ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
962 ++ if (!arm64_dma_phys_limit)
963 ++ arm64_dma_phys_limit = dma32_phys_limit;
964 + #endif
965 ++ if (!arm64_dma_phys_limit)
966 ++ arm64_dma_phys_limit = PHYS_MASK + 1;
967 + max_zone_pfns[ZONE_NORMAL] = max;
968 +
969 + free_area_init(max_zone_pfns);
970 +@@ -393,16 +398,9 @@ void __init arm64_memblock_init(void)
971 +
972 + early_init_fdt_scan_reserved_mem();
973 +
974 +- if (IS_ENABLED(CONFIG_ZONE_DMA32))
975 +- arm64_dma32_phys_limit = max_zone_phys(32);
976 +- else
977 +- arm64_dma32_phys_limit = PHYS_MASK + 1;
978 +-
979 + reserve_elfcorehdr();
980 +
981 + high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
982 +-
983 +- dma_contiguous_reserve(arm64_dma32_phys_limit);
984 + }
985 +
986 + void __init bootmem_init(void)
987 +@@ -437,6 +435,11 @@ void __init bootmem_init(void)
988 + sparse_init();
989 + zone_sizes_init(min, max);
990 +
991 ++ /*
992 ++ * Reserve the CMA area after arm64_dma_phys_limit was initialised.
993 ++ */
994 ++ dma_contiguous_reserve(arm64_dma_phys_limit);
995 ++
996 + /*
997 + * request_standard_resources() depends on crashkernel's memory being
998 + * reserved, so do it here.
999 +@@ -519,7 +522,7 @@ static void __init free_unused_memmap(void)
1000 + void __init mem_init(void)
1001 + {
1002 + if (swiotlb_force == SWIOTLB_FORCE ||
1003 +- max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
1004 ++ max_pfn > PFN_DOWN(arm64_dma_phys_limit))
1005 + swiotlb_init(1);
1006 + else
1007 + swiotlb_force = SWIOTLB_NO_FORCE;
1008 +diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
1009 +index f932b25fb817a..33282f33466e7 100644
1010 +--- a/arch/ia64/kernel/efi.c
1011 ++++ b/arch/ia64/kernel/efi.c
1012 +@@ -413,10 +413,10 @@ efi_get_pal_addr (void)
1013 + mask = ~((1 << IA64_GRANULE_SHIFT) - 1);
1014 +
1015 + printk(KERN_INFO "CPU %d: mapping PAL code "
1016 +- "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n",
1017 +- smp_processor_id(), md->phys_addr,
1018 +- md->phys_addr + efi_md_size(md),
1019 +- vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
1020 ++ "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n",
1021 ++ smp_processor_id(), md->phys_addr,
1022 ++ md->phys_addr + efi_md_size(md),
1023 ++ vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE);
1024 + #endif
1025 + return __va(md->phys_addr);
1026 + }
1027 +@@ -558,6 +558,7 @@ efi_init (void)
1028 + {
1029 + efi_memory_desc_t *md;
1030 + void *p;
1031 ++ unsigned int i;
1032 +
1033 + for (i = 0, p = efi_map_start; p < efi_map_end;
1034 + ++i, p += efi_desc_size)
1035 +@@ -584,7 +585,7 @@ efi_init (void)
1036 + }
1037 +
1038 + printk("mem%02d: %s "
1039 +- "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
1040 ++ "range=[0x%016llx-0x%016llx) (%4lu%s)\n",
1041 + i, efi_md_typeattr_format(buf, sizeof(buf), md),
1042 + md->phys_addr,
1043 + md->phys_addr + efi_md_size(md), size, unit);
1044 +diff --git a/arch/m68k/include/asm/mvme147hw.h b/arch/m68k/include/asm/mvme147hw.h
1045 +index 257b29184af91..e28eb1c0e0bfb 100644
1046 +--- a/arch/m68k/include/asm/mvme147hw.h
1047 ++++ b/arch/m68k/include/asm/mvme147hw.h
1048 +@@ -66,6 +66,9 @@ struct pcc_regs {
1049 + #define PCC_INT_ENAB 0x08
1050 +
1051 + #define PCC_TIMER_INT_CLR 0x80
1052 ++
1053 ++#define PCC_TIMER_TIC_EN 0x01
1054 ++#define PCC_TIMER_COC_EN 0x02
1055 + #define PCC_TIMER_CLR_OVF 0x04
1056 +
1057 + #define PCC_LEVEL_ABORT 0x07
1058 +diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
1059 +index 1c235d8f53f36..f55bdcb8e4f15 100644
1060 +--- a/arch/m68k/kernel/sys_m68k.c
1061 ++++ b/arch/m68k/kernel/sys_m68k.c
1062 +@@ -388,6 +388,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
1063 + ret = -EPERM;
1064 + if (!capable(CAP_SYS_ADMIN))
1065 + goto out;
1066 ++
1067 ++ mmap_read_lock(current->mm);
1068 + } else {
1069 + struct vm_area_struct *vma;
1070 +
1071 +diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
1072 +index 490700aa2212e..aab7880e078df 100644
1073 +--- a/arch/m68k/mvme147/config.c
1074 ++++ b/arch/m68k/mvme147/config.c
1075 +@@ -116,8 +116,10 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
1076 + unsigned long flags;
1077 +
1078 + local_irq_save(flags);
1079 +- m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
1080 +- m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF;
1081 ++ m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
1082 ++ PCC_TIMER_TIC_EN;
1083 ++ m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
1084 ++ PCC_LEVEL_TIMER1;
1085 + clk_total += PCC_TIMER_CYCLES;
1086 + timer_routine(0, NULL);
1087 + local_irq_restore(flags);
1088 +@@ -135,10 +137,10 @@ void mvme147_sched_init (irq_handler_t timer_routine)
1089 + /* Init the clock with a value */
1090 + /* The clock counter increments until 0xFFFF then reloads */
1091 + m147_pcc->t1_preload = PCC_TIMER_PRELOAD;
1092 +- m147_pcc->t1_cntrl = 0x0; /* clear timer */
1093 +- m147_pcc->t1_cntrl = 0x3; /* start timer */
1094 +- m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR; /* clear pending ints */
1095 +- m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
1096 ++ m147_pcc->t1_cntrl = PCC_TIMER_CLR_OVF | PCC_TIMER_COC_EN |
1097 ++ PCC_TIMER_TIC_EN;
1098 ++ m147_pcc->t1_int_cntrl = PCC_INT_ENAB | PCC_TIMER_INT_CLR |
1099 ++ PCC_LEVEL_TIMER1;
1100 +
1101 + clocksource_register_hz(&mvme147_clk, PCC_TIMER_CLOCK_FREQ);
1102 + }
1103 +diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
1104 +index 5b86d10e0f84e..d43d128b77471 100644
1105 +--- a/arch/m68k/mvme16x/config.c
1106 ++++ b/arch/m68k/mvme16x/config.c
1107 +@@ -367,6 +367,7 @@ static u32 clk_total;
1108 + #define PCCTOVR1_COC_EN 0x02
1109 + #define PCCTOVR1_OVR_CLR 0x04
1110 +
1111 ++#define PCCTIC1_INT_LEVEL 6
1112 + #define PCCTIC1_INT_CLR 0x08
1113 + #define PCCTIC1_INT_EN 0x10
1114 +
1115 +@@ -376,8 +377,8 @@ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
1116 + unsigned long flags;
1117 +
1118 + local_irq_save(flags);
1119 +- out_8(PCCTIC1, in_8(PCCTIC1) | PCCTIC1_INT_CLR);
1120 +- out_8(PCCTOVR1, PCCTOVR1_OVR_CLR);
1121 ++ out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
1122 ++ out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
1123 + clk_total += PCC_TIMER_CYCLES;
1124 + timer_routine(0, NULL);
1125 + local_irq_restore(flags);
1126 +@@ -391,14 +392,15 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
1127 + int irq;
1128 +
1129 + /* Using PCCchip2 or MC2 chip tick timer 1 */
1130 +- out_be32(PCCTCNT1, 0);
1131 +- out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
1132 +- out_8(PCCTOVR1, in_8(PCCTOVR1) | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
1133 +- out_8(PCCTIC1, PCCTIC1_INT_EN | 6);
1134 + if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, IRQF_TIMER, "timer",
1135 + timer_routine))
1136 + panic ("Couldn't register timer int");
1137 +
1138 ++ out_be32(PCCTCNT1, 0);
1139 ++ out_be32(PCCTCMP1, PCC_TIMER_CYCLES);
1140 ++ out_8(PCCTOVR1, PCCTOVR1_OVR_CLR | PCCTOVR1_TIC_EN | PCCTOVR1_COC_EN);
1141 ++ out_8(PCCTIC1, PCCTIC1_INT_EN | PCCTIC1_INT_CLR | PCCTIC1_INT_LEVEL);
1142 ++
1143 + clocksource_register_hz(&mvme16x_clk, PCC_TIMER_CLOCK_FREQ);
1144 +
1145 + if (brdno == 0x0162 || brdno == 0x172)
1146 +diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
1147 +index 2000bb2b0220d..1917ccd392564 100644
1148 +--- a/arch/mips/Kconfig
1149 ++++ b/arch/mips/Kconfig
1150 +@@ -6,6 +6,7 @@ config MIPS
1151 + select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT
1152 + select ARCH_HAS_FORTIFY_SOURCE
1153 + select ARCH_HAS_KCOV
1154 ++ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE if !EVA
1155 + select ARCH_HAS_PTE_SPECIAL if !(32BIT && CPU_HAS_RIXI)
1156 + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
1157 + select ARCH_HAS_UBSAN_SANITIZE_ALL
1158 +diff --git a/arch/mips/boot/dts/brcm/bcm3368.dtsi b/arch/mips/boot/dts/brcm/bcm3368.dtsi
1159 +index 69cbef4723775..d4b2b430dad01 100644
1160 +--- a/arch/mips/boot/dts/brcm/bcm3368.dtsi
1161 ++++ b/arch/mips/boot/dts/brcm/bcm3368.dtsi
1162 +@@ -59,7 +59,7 @@
1163 +
1164 + periph_cntl: syscon@fff8c008 {
1165 + compatible = "syscon";
1166 +- reg = <0xfff8c000 0x4>;
1167 ++ reg = <0xfff8c008 0x4>;
1168 + native-endian;
1169 + };
1170 +
1171 +diff --git a/arch/mips/boot/dts/brcm/bcm63268.dtsi b/arch/mips/boot/dts/brcm/bcm63268.dtsi
1172 +index 5acb49b618678..365fa75cd9ac5 100644
1173 +--- a/arch/mips/boot/dts/brcm/bcm63268.dtsi
1174 ++++ b/arch/mips/boot/dts/brcm/bcm63268.dtsi
1175 +@@ -59,7 +59,7 @@
1176 +
1177 + periph_cntl: syscon@10000008 {
1178 + compatible = "syscon";
1179 +- reg = <0x10000000 0xc>;
1180 ++ reg = <0x10000008 0x4>;
1181 + native-endian;
1182 + };
1183 +
1184 +diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
1185 +index f21176cac0381..89a3107cad28e 100644
1186 +--- a/arch/mips/boot/dts/brcm/bcm6358.dtsi
1187 ++++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
1188 +@@ -59,7 +59,7 @@
1189 +
1190 + periph_cntl: syscon@fffe0008 {
1191 + compatible = "syscon";
1192 +- reg = <0xfffe0000 0x4>;
1193 ++ reg = <0xfffe0008 0x4>;
1194 + native-endian;
1195 + };
1196 +
1197 +diff --git a/arch/mips/boot/dts/brcm/bcm6362.dtsi b/arch/mips/boot/dts/brcm/bcm6362.dtsi
1198 +index c98f9111e3c8b..0b2adefd75cec 100644
1199 +--- a/arch/mips/boot/dts/brcm/bcm6362.dtsi
1200 ++++ b/arch/mips/boot/dts/brcm/bcm6362.dtsi
1201 +@@ -59,7 +59,7 @@
1202 +
1203 + periph_cntl: syscon@10000008 {
1204 + compatible = "syscon";
1205 +- reg = <0x10000000 0xc>;
1206 ++ reg = <0x10000008 0x4>;
1207 + native-endian;
1208 + };
1209 +
1210 +diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
1211 +index 449c167dd8921..b84a3bfe8c51e 100644
1212 +--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
1213 ++++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
1214 +@@ -59,7 +59,7 @@
1215 +
1216 + periph_cntl: syscon@100000008 {
1217 + compatible = "syscon";
1218 +- reg = <0x10000000 0xc>;
1219 ++ reg = <0x10000008 0x4>;
1220 + native-endian;
1221 + };
1222 +
1223 +diff --git a/arch/mips/crypto/poly1305-glue.c b/arch/mips/crypto/poly1305-glue.c
1224 +index fc881b46d9111..bc6110fb98e0a 100644
1225 +--- a/arch/mips/crypto/poly1305-glue.c
1226 ++++ b/arch/mips/crypto/poly1305-glue.c
1227 +@@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void *state, const u8 *key);
1228 + asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit);
1229 + asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce);
1230 +
1231 +-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
1232 ++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
1233 + {
1234 + poly1305_init_mips(&dctx->h, key);
1235 + dctx->s[0] = get_unaligned_le32(key + 16);
1236 +diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
1237 +index 86f2323ebe6bc..ca83ada7015f5 100644
1238 +--- a/arch/mips/include/asm/asmmacro.h
1239 ++++ b/arch/mips/include/asm/asmmacro.h
1240 +@@ -44,8 +44,7 @@
1241 + .endm
1242 + #endif
1243 +
1244 +-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
1245 +- defined(CONFIG_CPU_MIPSR6)
1246 ++#ifdef CONFIG_CPU_HAS_DIEI
1247 + .macro local_irq_enable reg=t0
1248 + ei
1249 + irq_enable_hazard
1250 +diff --git a/arch/mips/loongson64/init.c b/arch/mips/loongson64/init.c
1251 +index ed75f7971261b..052cce6a8a998 100644
1252 +--- a/arch/mips/loongson64/init.c
1253 ++++ b/arch/mips/loongson64/init.c
1254 +@@ -82,7 +82,7 @@ static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_
1255 + return -ENOMEM;
1256 +
1257 + range->fwnode = fwnode;
1258 +- range->size = size;
1259 ++ range->size = size = round_up(size, PAGE_SIZE);
1260 + range->hw_start = hw_start;
1261 + range->flags = LOGIC_PIO_CPU_MMIO;
1262 +
1263 +diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
1264 +index 39052de915f34..3a909194284a6 100644
1265 +--- a/arch/mips/pci/pci-legacy.c
1266 ++++ b/arch/mips/pci/pci-legacy.c
1267 +@@ -166,8 +166,13 @@ void pci_load_of_ranges(struct pci_controller *hose, struct device_node *node)
1268 + res = hose->mem_resource;
1269 + break;
1270 + }
1271 +- if (res != NULL)
1272 +- of_pci_range_to_resource(&range, node, res);
1273 ++ if (res != NULL) {
1274 ++ res->name = node->full_name;
1275 ++ res->flags = range.flags;
1276 ++ res->start = range.cpu_addr;
1277 ++ res->end = range.cpu_addr + range.size - 1;
1278 ++ res->parent = res->child = res->sibling = NULL;
1279 ++ }
1280 + }
1281 + }
1282 +
1283 +diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
1284 +index d360616037525..e032932348d6f 100644
1285 +--- a/arch/mips/pci/pci-mt7620.c
1286 ++++ b/arch/mips/pci/pci-mt7620.c
1287 +@@ -30,6 +30,7 @@
1288 + #define RALINK_GPIOMODE 0x60
1289 +
1290 + #define PPLL_CFG1 0x9c
1291 ++#define PPLL_LD BIT(23)
1292 +
1293 + #define PPLL_DRV 0xa0
1294 + #define PDRV_SW_SET BIT(31)
1295 +@@ -239,8 +240,8 @@ static int mt7620_pci_hw_init(struct platform_device *pdev)
1296 + rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
1297 + mdelay(100);
1298 +
1299 +- if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
1300 +- dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
1301 ++ if (!(rt_sysc_r32(PPLL_CFG1) & PPLL_LD)) {
1302 ++ dev_err(&pdev->dev, "pcie PLL not locked, aborting init\n");
1303 + reset_control_assert(rstpcie0);
1304 + rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
1305 + return -1;
1306 +diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
1307 +index e1f12e3981363..f1538d2be89e5 100644
1308 +--- a/arch/mips/pci/pci-rt2880.c
1309 ++++ b/arch/mips/pci/pci-rt2880.c
1310 +@@ -180,7 +180,6 @@ static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
1311 +
1312 + int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1313 + {
1314 +- u16 cmd;
1315 + int irq = -1;
1316 +
1317 + if (dev->bus->number != 0)
1318 +@@ -188,8 +187,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1319 +
1320 + switch (PCI_SLOT(dev->devfn)) {
1321 + case 0x00:
1322 +- rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
1323 +- (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
1324 + break;
1325 + case 0x11:
1326 + irq = RT288X_CPU_IRQ_PCI;
1327 +@@ -201,16 +198,6 @@ int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
1328 + break;
1329 + }
1330 +
1331 +- pci_write_config_byte((struct pci_dev *) dev,
1332 +- PCI_CACHE_LINE_SIZE, 0x14);
1333 +- pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
1334 +- pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
1335 +- cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
1336 +- PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
1337 +- PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
1338 +- pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
1339 +- pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
1340 +- dev->irq);
1341 + return irq;
1342 + }
1343 +
1344 +@@ -251,6 +238,30 @@ static int rt288x_pci_probe(struct platform_device *pdev)
1345 +
1346 + int pcibios_plat_dev_init(struct pci_dev *dev)
1347 + {
1348 ++ static bool slot0_init;
1349 ++
1350 ++ /*
1351 ++ * Nobody seems to initialize slot 0, but this platform requires it, so
1352 ++ * do it once when some other slot is being enabled. The PCI subsystem
1353 ++ * should configure other slots properly, so no need to do anything
1354 ++ * special for those.
1355 ++ */
1356 ++ if (!slot0_init && dev->bus->number == 0) {
1357 ++ u16 cmd;
1358 ++ u32 bar0;
1359 ++
1360 ++ slot0_init = true;
1361 ++
1362 ++ pci_bus_write_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
1363 ++ 0x08000000);
1364 ++ pci_bus_read_config_dword(dev->bus, 0, PCI_BASE_ADDRESS_0,
1365 ++ &bar0);
1366 ++
1367 ++ pci_bus_read_config_word(dev->bus, 0, PCI_COMMAND, &cmd);
1368 ++ cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
1369 ++ pci_bus_write_config_word(dev->bus, 0, PCI_COMMAND, cmd);
1370 ++ }
1371 ++
1372 + return 0;
1373 + }
1374 +
1375 +diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
1376 +index 31ed8083571ff..5afa0ebd78ca5 100644
1377 +--- a/arch/powerpc/Kconfig
1378 ++++ b/arch/powerpc/Kconfig
1379 +@@ -222,7 +222,7 @@ config PPC
1380 + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
1381 + select HAVE_MOD_ARCH_SPECIFIC
1382 + select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
1383 +- select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S)
1384 ++ select HAVE_HARDLOCKUP_DETECTOR_ARCH if PPC64 && PPC_BOOK3S && SMP
1385 + select HAVE_OPROFILE
1386 + select HAVE_OPTPROBES if PPC64
1387 + select HAVE_PERF_EVENTS
1388 +diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
1389 +index b88900f4832fd..52abca88b5b2b 100644
1390 +--- a/arch/powerpc/Kconfig.debug
1391 ++++ b/arch/powerpc/Kconfig.debug
1392 +@@ -352,6 +352,7 @@ config PPC_EARLY_DEBUG_CPM_ADDR
1393 + config FAIL_IOMMU
1394 + bool "Fault-injection capability for IOMMU"
1395 + depends on FAULT_INJECTION
1396 ++ depends on PCI || IBMVIO
1397 + help
1398 + Provide fault-injection capability for IOMMU. Each device can
1399 + be selectively enabled via the fail_iommu property.
1400 +diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
1401 +index cd3feeac6e87c..4a3dca0271f1e 100644
1402 +--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
1403 ++++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
1404 +@@ -7,6 +7,7 @@
1405 + #ifndef __ASSEMBLY__
1406 + #include <linux/mmdebug.h>
1407 + #include <linux/bug.h>
1408 ++#include <linux/sizes.h>
1409 + #endif
1410 +
1411 + /*
1412 +@@ -323,7 +324,8 @@ extern unsigned long pci_io_base;
1413 + #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
1414 + #define IOREMAP_BASE (PHB_IO_END)
1415 + #define IOREMAP_START (ioremap_bot)
1416 +-#define IOREMAP_END (KERN_IO_END)
1417 ++#define IOREMAP_END (KERN_IO_END - FIXADDR_SIZE)
1418 ++#define FIXADDR_SIZE SZ_32M
1419 +
1420 + /* Advertise special mapping type for AGP */
1421 + #define HAVE_PAGE_AGP
1422 +diff --git a/arch/powerpc/include/asm/book3s/64/radix.h b/arch/powerpc/include/asm/book3s/64/radix.h
1423 +index c7813dc628fc9..59cab558e2f05 100644
1424 +--- a/arch/powerpc/include/asm/book3s/64/radix.h
1425 ++++ b/arch/powerpc/include/asm/book3s/64/radix.h
1426 +@@ -222,8 +222,10 @@ static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
1427 + * from ptesync, it should probably go into update_mmu_cache, rather
1428 + * than set_pte_at (which is used to set ptes unrelated to faults).
1429 + *
1430 +- * Spurious faults to vmalloc region are not tolerated, so there is
1431 +- * a ptesync in flush_cache_vmap.
1432 ++ * Spurious faults from the kernel memory are not tolerated, so there
1433 ++ * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
1434 ++ * the pte update sequence from ISA Book III 6.10 Translation Table
1435 ++ * Update Synchronization Requirements.
1436 + */
1437 + }
1438 +
1439 +diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
1440 +index 6bfc87915d5db..591b2f4deed53 100644
1441 +--- a/arch/powerpc/include/asm/fixmap.h
1442 ++++ b/arch/powerpc/include/asm/fixmap.h
1443 +@@ -23,12 +23,17 @@
1444 + #include <asm/kmap_types.h>
1445 + #endif
1446 +
1447 ++#ifdef CONFIG_PPC64
1448 ++#define FIXADDR_TOP (IOREMAP_END + FIXADDR_SIZE)
1449 ++#else
1450 ++#define FIXADDR_SIZE 0
1451 + #ifdef CONFIG_KASAN
1452 + #include <asm/kasan.h>
1453 + #define FIXADDR_TOP (KASAN_SHADOW_START - PAGE_SIZE)
1454 + #else
1455 + #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
1456 + #endif
1457 ++#endif
1458 +
1459 + /*
1460 + * Here we define all the compile-time 'special' virtual
1461 +@@ -50,6 +55,7 @@
1462 + */
1463 + enum fixed_addresses {
1464 + FIX_HOLE,
1465 ++#ifdef CONFIG_PPC32
1466 + /* reserve the top 128K for early debugging purposes */
1467 + FIX_EARLY_DEBUG_TOP = FIX_HOLE,
1468 + FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
1469 +@@ -72,6 +78,7 @@ enum fixed_addresses {
1470 + FIX_IMMR_SIZE,
1471 + #endif
1472 + /* FIX_PCIE_MCFG, */
1473 ++#endif /* CONFIG_PPC32 */
1474 + __end_of_permanent_fixed_addresses,
1475 +
1476 + #define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
1477 +@@ -98,6 +105,8 @@ enum fixed_addresses {
1478 + static inline void __set_fixmap(enum fixed_addresses idx,
1479 + phys_addr_t phys, pgprot_t flags)
1480 + {
1481 ++ BUILD_BUG_ON(IS_ENABLED(CONFIG_PPC64) && __FIXADDR_SIZE > FIXADDR_SIZE);
1482 ++
1483 + if (__builtin_constant_p(idx))
1484 + BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
1485 + else if (WARN_ON(idx >= __end_of_fixed_addresses))
1486 +diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
1487 +index 6cb8aa3571917..57cd3892bfe05 100644
1488 +--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
1489 ++++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
1490 +@@ -6,6 +6,8 @@
1491 + * the ppc64 non-hashed page table.
1492 + */
1493 +
1494 ++#include <linux/sizes.h>
1495 ++
1496 + #include <asm/nohash/64/pgtable-4k.h>
1497 + #include <asm/barrier.h>
1498 + #include <asm/asm-const.h>
1499 +@@ -54,7 +56,8 @@
1500 + #define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE)
1501 + #define IOREMAP_BASE (PHB_IO_END)
1502 + #define IOREMAP_START (ioremap_bot)
1503 +-#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE)
1504 ++#define IOREMAP_END (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
1505 ++#define FIXADDR_SIZE SZ_32M
1506 +
1507 +
1508 + /*
1509 +diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
1510 +index b2035b2f57ce3..635bdf947105c 100644
1511 +--- a/arch/powerpc/include/asm/smp.h
1512 ++++ b/arch/powerpc/include/asm/smp.h
1513 +@@ -121,6 +121,11 @@ static inline struct cpumask *cpu_sibling_mask(int cpu)
1514 + return per_cpu(cpu_sibling_map, cpu);
1515 + }
1516 +
1517 ++static inline struct cpumask *cpu_core_mask(int cpu)
1518 ++{
1519 ++ return per_cpu(cpu_core_map, cpu);
1520 ++}
1521 ++
1522 + static inline struct cpumask *cpu_l2_cache_mask(int cpu)
1523 + {
1524 + return per_cpu(cpu_l2_cache_map, cpu);
1525 +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
1526 +index 8482739d42f38..eddf362caedce 100644
1527 +--- a/arch/powerpc/kernel/fadump.c
1528 ++++ b/arch/powerpc/kernel/fadump.c
1529 +@@ -292,7 +292,7 @@ static void fadump_show_config(void)
1530 + * that is required for a kernel to boot successfully.
1531 + *
1532 + */
1533 +-static inline u64 fadump_calculate_reserve_size(void)
1534 ++static __init u64 fadump_calculate_reserve_size(void)
1535 + {
1536 + u64 base, size, bootmem_min;
1537 + int ret;
1538 +diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
1539 +index c1545f22c077d..7a14a094be8ac 100644
1540 +--- a/arch/powerpc/kernel/prom.c
1541 ++++ b/arch/powerpc/kernel/prom.c
1542 +@@ -268,7 +268,7 @@ static struct feature_property {
1543 + };
1544 +
1545 + #if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
1546 +-static inline void identical_pvr_fixup(unsigned long node)
1547 ++static __init void identical_pvr_fixup(unsigned long node)
1548 + {
1549 + unsigned int pvr;
1550 + const char *model = of_get_flat_dt_prop(node, "model", NULL);
1551 +diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
1552 +index 7d6cf75a7fd80..dd34ea6744965 100644
1553 +--- a/arch/powerpc/kernel/smp.c
1554 ++++ b/arch/powerpc/kernel/smp.c
1555 +@@ -975,17 +975,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
1556 + local_memory_node(numa_cpu_lookup_table[cpu]));
1557 + }
1558 + #endif
1559 +- /*
1560 +- * cpu_core_map is now more updated and exists only since
1561 +- * its been exported for long. It only will have a snapshot
1562 +- * of cpu_cpu_mask.
1563 +- */
1564 +- cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
1565 + }
1566 +
1567 + /* Init the cpumasks so the boot CPU is related to itself */
1568 + cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid));
1569 + cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
1570 ++ cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
1571 +
1572 + if (has_coregroup_support())
1573 + cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid));
1574 +@@ -1304,6 +1299,9 @@ static void remove_cpu_from_masks(int cpu)
1575 + set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
1576 + }
1577 +
1578 ++ for_each_cpu(i, cpu_core_mask(cpu))
1579 ++ set_cpus_unrelated(cpu, i, cpu_core_mask);
1580 ++
1581 + if (has_coregroup_support()) {
1582 + for_each_cpu(i, cpu_coregroup_mask(cpu))
1583 + set_cpus_unrelated(cpu, i, cpu_coregroup_mask);
1584 +@@ -1364,8 +1362,11 @@ static void update_coregroup_mask(int cpu, cpumask_var_t *mask)
1585 +
1586 + static void add_cpu_to_masks(int cpu)
1587 + {
1588 ++ struct cpumask *(*submask_fn)(int) = cpu_sibling_mask;
1589 + int first_thread = cpu_first_thread_sibling(cpu);
1590 ++ int chip_id = cpu_to_chip_id(cpu);
1591 + cpumask_var_t mask;
1592 ++ bool ret;
1593 + int i;
1594 +
1595 + /*
1596 +@@ -1381,12 +1382,36 @@ static void add_cpu_to_masks(int cpu)
1597 + add_cpu_to_smallcore_masks(cpu);
1598 +
1599 + /* In CPU-hotplug path, hence use GFP_ATOMIC */
1600 +- alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1601 ++ ret = alloc_cpumask_var_node(&mask, GFP_ATOMIC, cpu_to_node(cpu));
1602 + update_mask_by_l2(cpu, &mask);
1603 +
1604 + if (has_coregroup_support())
1605 + update_coregroup_mask(cpu, &mask);
1606 +
1607 ++ if (chip_id == -1 || !ret) {
1608 ++ cpumask_copy(per_cpu(cpu_core_map, cpu), cpu_cpu_mask(cpu));
1609 ++ goto out;
1610 ++ }
1611 ++
1612 ++ if (shared_caches)
1613 ++ submask_fn = cpu_l2_cache_mask;
1614 ++
1615 ++ /* Update core_mask with all the CPUs that are part of submask */
1616 ++ or_cpumasks_related(cpu, cpu, submask_fn, cpu_core_mask);
1617 ++
1618 ++ /* Skip all CPUs already part of current CPU core mask */
1619 ++ cpumask_andnot(mask, cpu_online_mask, cpu_core_mask(cpu));
1620 ++
1621 ++ for_each_cpu(i, mask) {
1622 ++ if (chip_id == cpu_to_chip_id(i)) {
1623 ++ or_cpumasks_related(cpu, i, submask_fn, cpu_core_mask);
1624 ++ cpumask_andnot(mask, mask, submask_fn(i));
1625 ++ } else {
1626 ++ cpumask_andnot(mask, mask, cpu_core_mask(i));
1627 ++ }
1628 ++ }
1629 ++
1630 ++out:
1631 + free_cpumask_var(mask);
1632 + }
1633 +
1634 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
1635 +index e3b1839fc2519..280f7992ae993 100644
1636 +--- a/arch/powerpc/kvm/book3s_hv.c
1637 ++++ b/arch/powerpc/kvm/book3s_hv.c
1638 +@@ -3697,7 +3697,10 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
1639 + vcpu->arch.dec_expires = dec + tb;
1640 + vcpu->cpu = -1;
1641 + vcpu->arch.thread_cpu = -1;
1642 ++ /* Save guest CTRL register, set runlatch to 1 */
1643 + vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
1644 ++ if (!(vcpu->arch.ctrl & 1))
1645 ++ mtspr(SPRN_CTRLT, vcpu->arch.ctrl | 1);
1646 +
1647 + vcpu->arch.iamr = mfspr(SPRN_IAMR);
1648 + vcpu->arch.pspb = mfspr(SPRN_PSPB);
1649 +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
1650 +index 3adcf730f4784..1d5eec847b883 100644
1651 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
1652 ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
1653 +@@ -108,7 +108,7 @@ static int early_map_kernel_page(unsigned long ea, unsigned long pa,
1654 +
1655 + set_the_pte:
1656 + set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
1657 +- smp_wmb();
1658 ++ asm volatile("ptesync": : :"memory");
1659 + return 0;
1660 + }
1661 +
1662 +@@ -168,7 +168,7 @@ static int __map_kernel_page(unsigned long ea, unsigned long pa,
1663 +
1664 + set_the_pte:
1665 + set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
1666 +- smp_wmb();
1667 ++ asm volatile("ptesync": : :"memory");
1668 + return 0;
1669 + }
1670 +
1671 +diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
1672 +index e1a21d34c6e49..5e8eedda45d39 100644
1673 +--- a/arch/powerpc/perf/isa207-common.c
1674 ++++ b/arch/powerpc/perf/isa207-common.c
1675 +@@ -400,8 +400,8 @@ ebb_bhrb:
1676 + * EBB events are pinned & exclusive, so this should never actually
1677 + * hit, but we leave it as a fallback in case.
1678 + */
1679 +- mask |= CNST_EBB_VAL(ebb);
1680 +- value |= CNST_EBB_MASK;
1681 ++ mask |= CNST_EBB_MASK;
1682 ++ value |= CNST_EBB_VAL(ebb);
1683 +
1684 + *maskp = mask;
1685 + *valp = value;
1686 +diff --git a/arch/powerpc/perf/power10-events-list.h b/arch/powerpc/perf/power10-events-list.h
1687 +index 60c1b8111082d..e66487804a599 100644
1688 +--- a/arch/powerpc/perf/power10-events-list.h
1689 ++++ b/arch/powerpc/perf/power10-events-list.h
1690 +@@ -66,5 +66,5 @@ EVENT(PM_RUN_INST_CMPL_ALT, 0x00002);
1691 + * thresh end (TE)
1692 + */
1693 +
1694 +-EVENT(MEM_LOADS, 0x34340401e0);
1695 +-EVENT(MEM_STORES, 0x343c0401e0);
1696 ++EVENT(MEM_LOADS, 0x35340401e0);
1697 ++EVENT(MEM_STORES, 0x353c0401e0);
1698 +diff --git a/arch/powerpc/platforms/52xx/lite5200_sleep.S b/arch/powerpc/platforms/52xx/lite5200_sleep.S
1699 +index 11475c58ea431..afee8b1515a8e 100644
1700 +--- a/arch/powerpc/platforms/52xx/lite5200_sleep.S
1701 ++++ b/arch/powerpc/platforms/52xx/lite5200_sleep.S
1702 +@@ -181,7 +181,7 @@ sram_code:
1703 + udelay: /* r11 - tb_ticks_per_usec, r12 - usecs, overwrites r13 */
1704 + mullw r12, r12, r11
1705 + mftb r13 /* start */
1706 +- addi r12, r13, r12 /* end */
1707 ++ add r12, r13, r12 /* end */
1708 + 1:
1709 + mftb r13 /* current */
1710 + cmp cr0, r13, r12
1711 +diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
1712 +index f9ae17e8a0f46..a8f9140a24fa3 100644
1713 +--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
1714 ++++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
1715 +@@ -50,6 +50,7 @@ EXPORT_SYMBOL_GPL(init_phb_dynamic);
1716 + int remove_phb_dynamic(struct pci_controller *phb)
1717 + {
1718 + struct pci_bus *b = phb->bus;
1719 ++ struct pci_host_bridge *host_bridge = to_pci_host_bridge(b->bridge);
1720 + struct resource *res;
1721 + int rc, i;
1722 +
1723 +@@ -76,7 +77,8 @@ int remove_phb_dynamic(struct pci_controller *phb)
1724 + /* Remove the PCI bus and unregister the bridge device from sysfs */
1725 + phb->bus = NULL;
1726 + pci_remove_bus(b);
1727 +- device_unregister(b->bridge);
1728 ++ host_bridge->bus = NULL;
1729 ++ device_unregister(&host_bridge->dev);
1730 +
1731 + /* Now release the IO resource */
1732 + if (res->flags & IORESOURCE_IO)
1733 +diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
1734 +index b2797cfe4e2b0..68276e05502b9 100644
1735 +--- a/arch/powerpc/platforms/pseries/vio.c
1736 ++++ b/arch/powerpc/platforms/pseries/vio.c
1737 +@@ -1286,6 +1286,10 @@ static int vio_bus_remove(struct device *dev)
1738 + int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1739 + const char *mod_name)
1740 + {
1741 ++ // vio_bus_type is only initialised for pseries
1742 ++ if (!machine_is(pseries))
1743 ++ return -ENODEV;
1744 ++
1745 + pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1746 +
1747 + /* fill in 'struct driver' fields */
1748 +diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
1749 +index a80440af491a2..5b0f6b6278e3d 100644
1750 +--- a/arch/powerpc/sysdev/xive/common.c
1751 ++++ b/arch/powerpc/sysdev/xive/common.c
1752 +@@ -261,17 +261,20 @@ notrace void xmon_xive_do_dump(int cpu)
1753 + xmon_printf("\n");
1754 + }
1755 +
1756 ++static struct irq_data *xive_get_irq_data(u32 hw_irq)
1757 ++{
1758 ++ unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq);
1759 ++
1760 ++ return irq ? irq_get_irq_data(irq) : NULL;
1761 ++}
1762 ++
1763 + int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
1764 + {
1765 +- struct irq_chip *chip = irq_data_get_irq_chip(d);
1766 + int rc;
1767 + u32 target;
1768 + u8 prio;
1769 + u32 lirq;
1770 +
1771 +- if (!is_xive_irq(chip))
1772 +- return -EINVAL;
1773 +-
1774 + rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
1775 + if (rc) {
1776 + xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc);
1777 +@@ -281,6 +284,9 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
1778 + xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1779 + hw_irq, target, prio, lirq);
1780 +
1781 ++ if (!d)
1782 ++ d = xive_get_irq_data(hw_irq);
1783 ++
1784 + if (d) {
1785 + struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1786 + u64 val = xive_esb_read(xd, XIVE_ESB_GET);
1787 +@@ -1606,6 +1612,8 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
1788 + u32 target;
1789 + u8 prio;
1790 + u32 lirq;
1791 ++ struct xive_irq_data *xd;
1792 ++ u64 val;
1793 +
1794 + if (!is_xive_irq(chip))
1795 + return;
1796 +@@ -1619,17 +1627,14 @@ static void xive_debug_show_irq(struct seq_file *m, u32 hw_irq, struct irq_data
1797 + seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
1798 + hw_irq, target, prio, lirq);
1799 +
1800 +- if (d) {
1801 +- struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1802 +- u64 val = xive_esb_read(xd, XIVE_ESB_GET);
1803 +-
1804 +- seq_printf(m, "flags=%c%c%c PQ=%c%c",
1805 +- xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
1806 +- xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
1807 +- xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
1808 +- val & XIVE_ESB_VAL_P ? 'P' : '-',
1809 +- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1810 +- }
1811 ++ xd = irq_data_get_irq_handler_data(d);
1812 ++ val = xive_esb_read(xd, XIVE_ESB_GET);
1813 ++ seq_printf(m, "flags=%c%c%c PQ=%c%c",
1814 ++ xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
1815 ++ xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
1816 ++ xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
1817 ++ val & XIVE_ESB_VAL_P ? 'P' : '-',
1818 ++ val & XIVE_ESB_VAL_Q ? 'Q' : '-');
1819 + seq_puts(m, "\n");
1820 + }
1821 +
1822 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1823 +index 4d843e64496f4..e83ce909686c5 100644
1824 +--- a/arch/s390/kernel/setup.c
1825 ++++ b/arch/s390/kernel/setup.c
1826 +@@ -925,9 +925,9 @@ static int __init setup_hwcaps(void)
1827 + if (MACHINE_HAS_VX) {
1828 + elf_hwcap |= HWCAP_S390_VXRS;
1829 + if (test_facility(134))
1830 +- elf_hwcap |= HWCAP_S390_VXRS_EXT;
1831 +- if (test_facility(135))
1832 + elf_hwcap |= HWCAP_S390_VXRS_BCD;
1833 ++ if (test_facility(135))
1834 ++ elf_hwcap |= HWCAP_S390_VXRS_EXT;
1835 + if (test_facility(148))
1836 + elf_hwcap |= HWCAP_S390_VXRS_EXT2;
1837 + if (test_facility(152))
1838 +diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
1839 +index 6d6b57059493e..b9f85b2dc053f 100644
1840 +--- a/arch/s390/kvm/gaccess.c
1841 ++++ b/arch/s390/kvm/gaccess.c
1842 +@@ -976,7 +976,9 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
1843 + * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
1844 + * @sg: pointer to the shadow guest address space structure
1845 + * @saddr: faulting address in the shadow gmap
1846 +- * @pgt: pointer to the page table address result
1847 ++ * @pgt: pointer to the beginning of the page table for the given address if
1848 ++ * successful (return value 0), or to the first invalid DAT entry in
1849 ++ * case of exceptions (return value > 0)
1850 + * @fake: pgt references contiguous guest memory block, not a pgtable
1851 + */
1852 + static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1853 +@@ -1034,6 +1036,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
1854 + rfte.val = ptr;
1855 + goto shadow_r2t;
1856 + }
1857 ++ *pgt = ptr + vaddr.rfx * 8;
1858 + rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
1859 + if (rc)
1860 + return rc;
1861 +@@ -1060,6 +1063,7 @@ shadow_r2t:
1862 + rste.val = ptr;
1863 + goto shadow_r3t;
1864 + }
1865 ++ *pgt = ptr + vaddr.rsx * 8;
1866 + rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
1867 + if (rc)
1868 + return rc;
1869 +@@ -1087,6 +1091,7 @@ shadow_r3t:
1870 + rtte.val = ptr;
1871 + goto shadow_sgt;
1872 + }
1873 ++ *pgt = ptr + vaddr.rtx * 8;
1874 + rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
1875 + if (rc)
1876 + return rc;
1877 +@@ -1123,6 +1128,7 @@ shadow_sgt:
1878 + ste.val = ptr;
1879 + goto shadow_pgt;
1880 + }
1881 ++ *pgt = ptr + vaddr.sx * 8;
1882 + rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
1883 + if (rc)
1884 + return rc;
1885 +@@ -1157,6 +1163,8 @@ shadow_pgt:
1886 + * @vcpu: virtual cpu
1887 + * @sg: pointer to the shadow guest address space structure
1888 + * @saddr: faulting address in the shadow gmap
1889 ++ * @datptr: will contain the address of the faulting DAT table entry, or of
1890 ++ * the valid leaf, plus some flags
1891 + *
1892 + * Returns: - 0 if the shadow fault was successfully resolved
1893 + * - > 0 (pgm exception code) on exceptions while faulting
1894 +@@ -1165,11 +1173,11 @@ shadow_pgt:
1895 + * - -ENOMEM if out of memory
1896 + */
1897 + int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1898 +- unsigned long saddr)
1899 ++ unsigned long saddr, unsigned long *datptr)
1900 + {
1901 + union vaddress vaddr;
1902 + union page_table_entry pte;
1903 +- unsigned long pgt;
1904 ++ unsigned long pgt = 0;
1905 + int dat_protection, fake;
1906 + int rc;
1907 +
1908 +@@ -1191,8 +1199,20 @@ int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
1909 + pte.val = pgt + vaddr.px * PAGE_SIZE;
1910 + goto shadow_page;
1911 + }
1912 +- if (!rc)
1913 +- rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
1914 ++
1915 ++ switch (rc) {
1916 ++ case PGM_SEGMENT_TRANSLATION:
1917 ++ case PGM_REGION_THIRD_TRANS:
1918 ++ case PGM_REGION_SECOND_TRANS:
1919 ++ case PGM_REGION_FIRST_TRANS:
1920 ++ pgt |= PEI_NOT_PTE;
1921 ++ break;
1922 ++ case 0:
1923 ++ pgt += vaddr.px * 8;
1924 ++ rc = gmap_read_table(sg->parent, pgt, &pte.val);
1925 ++ }
1926 ++ if (datptr)
1927 ++ *datptr = pgt | dat_protection * PEI_DAT_PROT;
1928 + if (!rc && pte.i)
1929 + rc = PGM_PAGE_TRANSLATION;
1930 + if (!rc && pte.z)
1931 +diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
1932 +index f4c51756c4623..7c72a5e3449f8 100644
1933 +--- a/arch/s390/kvm/gaccess.h
1934 ++++ b/arch/s390/kvm/gaccess.h
1935 +@@ -18,17 +18,14 @@
1936 +
1937 + /**
1938 + * kvm_s390_real_to_abs - convert guest real address to guest absolute address
1939 +- * @vcpu - guest virtual cpu
1940 ++ * @prefix - guest prefix
1941 + * @gra - guest real address
1942 + *
1943 + * Returns the guest absolute address that corresponds to the passed guest real
1944 +- * address @gra of a virtual guest cpu by applying its prefix.
1945 ++ * address @gra of by applying the given prefix.
1946 + */
1947 +-static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
1948 +- unsigned long gra)
1949 ++static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
1950 + {
1951 +- unsigned long prefix = kvm_s390_get_prefix(vcpu);
1952 +-
1953 + if (gra < 2 * PAGE_SIZE)
1954 + gra += prefix;
1955 + else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
1956 +@@ -36,6 +33,43 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
1957 + return gra;
1958 + }
1959 +
1960 ++/**
1961 ++ * kvm_s390_real_to_abs - convert guest real address to guest absolute address
1962 ++ * @vcpu - guest virtual cpu
1963 ++ * @gra - guest real address
1964 ++ *
1965 ++ * Returns the guest absolute address that corresponds to the passed guest real
1966 ++ * address @gra of a virtual guest cpu by applying its prefix.
1967 ++ */
1968 ++static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
1969 ++ unsigned long gra)
1970 ++{
1971 ++ return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
1972 ++}
1973 ++
1974 ++/**
1975 ++ * _kvm_s390_logical_to_effective - convert guest logical to effective address
1976 ++ * @psw: psw of the guest
1977 ++ * @ga: guest logical address
1978 ++ *
1979 ++ * Convert a guest logical address to an effective address by applying the
1980 ++ * rules of the addressing mode defined by bits 31 and 32 of the given PSW
1981 ++ * (extendended/basic addressing mode).
1982 ++ *
1983 ++ * Depending on the addressing mode, the upper 40 bits (24 bit addressing
1984 ++ * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
1985 ++ * mode) of @ga will be zeroed and the remaining bits will be returned.
1986 ++ */
1987 ++static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
1988 ++ unsigned long ga)
1989 ++{
1990 ++ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
1991 ++ return ga;
1992 ++ if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
1993 ++ return ga & ((1UL << 31) - 1);
1994 ++ return ga & ((1UL << 24) - 1);
1995 ++}
1996 ++
1997 + /**
1998 + * kvm_s390_logical_to_effective - convert guest logical to effective address
1999 + * @vcpu: guest virtual cpu
2000 +@@ -52,13 +86,7 @@ static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
2001 + static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
2002 + unsigned long ga)
2003 + {
2004 +- psw_t *psw = &vcpu->arch.sie_block->gpsw;
2005 +-
2006 +- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
2007 +- return ga;
2008 +- if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
2009 +- return ga & ((1UL << 31) - 1);
2010 +- return ga & ((1UL << 24) - 1);
2011 ++ return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
2012 + }
2013 +
2014 + /*
2015 +@@ -359,7 +387,11 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
2016 + int ipte_lock_held(struct kvm_vcpu *vcpu);
2017 + int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
2018 +
2019 ++/* MVPG PEI indication bits */
2020 ++#define PEI_DAT_PROT 2
2021 ++#define PEI_NOT_PTE 4
2022 ++
2023 + int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
2024 +- unsigned long saddr);
2025 ++ unsigned long saddr, unsigned long *datptr);
2026 +
2027 + #endif /* __KVM_S390_GACCESS_H */
2028 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
2029 +index 425d3d75320bf..20afffd6b9820 100644
2030 +--- a/arch/s390/kvm/kvm-s390.c
2031 ++++ b/arch/s390/kvm/kvm-s390.c
2032 +@@ -4308,16 +4308,16 @@ static void store_regs_fmt2(struct kvm_vcpu *vcpu)
2033 + kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
2034 + kvm_run->s.regs.diag318 = vcpu->arch.diag318_info.val;
2035 + if (MACHINE_HAS_GS) {
2036 ++ preempt_disable();
2037 + __ctl_set_bit(2, 4);
2038 + if (vcpu->arch.gs_enabled)
2039 + save_gs_cb(current->thread.gs_cb);
2040 +- preempt_disable();
2041 + current->thread.gs_cb = vcpu->arch.host_gscb;
2042 + restore_gs_cb(vcpu->arch.host_gscb);
2043 +- preempt_enable();
2044 + if (!vcpu->arch.host_gscb)
2045 + __ctl_clear_bit(2, 4);
2046 + vcpu->arch.host_gscb = NULL;
2047 ++ preempt_enable();
2048 + }
2049 + /* SIE will save etoken directly into SDNX and therefore kvm_run */
2050 + }
2051 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
2052 +index 4f3cbf6003a9d..3fbf7081c000c 100644
2053 +--- a/arch/s390/kvm/vsie.c
2054 ++++ b/arch/s390/kvm/vsie.c
2055 +@@ -416,11 +416,6 @@ static void unshadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
2056 + memcpy((void *)((u64)scb_o + 0xc0),
2057 + (void *)((u64)scb_s + 0xc0), 0xf0 - 0xc0);
2058 + break;
2059 +- case ICPT_PARTEXEC:
2060 +- /* MVPG only */
2061 +- memcpy((void *)((u64)scb_o + 0xc0),
2062 +- (void *)((u64)scb_s + 0xc0), 0xd0 - 0xc0);
2063 +- break;
2064 + }
2065 +
2066 + if (scb_s->ihcpu != 0xffffU)
2067 +@@ -619,10 +614,10 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
2068 + /* with mso/msl, the prefix lies at offset *mso* */
2069 + prefix += scb_s->mso;
2070 +
2071 +- rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix);
2072 ++ rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, prefix, NULL);
2073 + if (!rc && (scb_s->ecb & ECB_TE))
2074 + rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
2075 +- prefix + PAGE_SIZE);
2076 ++ prefix + PAGE_SIZE, NULL);
2077 + /*
2078 + * We don't have to mprotect, we will be called for all unshadows.
2079 + * SIE will detect if protection applies and trigger a validity.
2080 +@@ -913,7 +908,7 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
2081 + current->thread.gmap_addr, 1);
2082 +
2083 + rc = kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
2084 +- current->thread.gmap_addr);
2085 ++ current->thread.gmap_addr, NULL);
2086 + if (rc > 0) {
2087 + rc = inject_fault(vcpu, rc,
2088 + current->thread.gmap_addr,
2089 +@@ -935,7 +930,7 @@ static void handle_last_fault(struct kvm_vcpu *vcpu,
2090 + {
2091 + if (vsie_page->fault_addr)
2092 + kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
2093 +- vsie_page->fault_addr);
2094 ++ vsie_page->fault_addr, NULL);
2095 + vsie_page->fault_addr = 0;
2096 + }
2097 +
2098 +@@ -982,6 +977,98 @@ static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
2099 + return 0;
2100 + }
2101 +
2102 ++/*
2103 ++ * Get a register for a nested guest.
2104 ++ * @vcpu the vcpu of the guest
2105 ++ * @vsie_page the vsie_page for the nested guest
2106 ++ * @reg the register number, the upper 4 bits are ignored.
2107 ++ * returns: the value of the register.
2108 ++ */
2109 ++static u64 vsie_get_register(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page, u8 reg)
2110 ++{
2111 ++ /* no need to validate the parameter and/or perform error handling */
2112 ++ reg &= 0xf;
2113 ++ switch (reg) {
2114 ++ case 15:
2115 ++ return vsie_page->scb_s.gg15;
2116 ++ case 14:
2117 ++ return vsie_page->scb_s.gg14;
2118 ++ default:
2119 ++ return vcpu->run->s.regs.gprs[reg];
2120 ++ }
2121 ++}
2122 ++
2123 ++static int vsie_handle_mvpg(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
2124 ++{
2125 ++ struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
2126 ++ unsigned long pei_dest, pei_src, src, dest, mask, prefix;
2127 ++ u64 *pei_block = &vsie_page->scb_o->mcic;
2128 ++ int edat, rc_dest, rc_src;
2129 ++ union ctlreg0 cr0;
2130 ++
2131 ++ cr0.val = vcpu->arch.sie_block->gcr[0];
2132 ++ edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
2133 ++ mask = _kvm_s390_logical_to_effective(&scb_s->gpsw, PAGE_MASK);
2134 ++ prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
2135 ++
2136 ++ dest = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 20) & mask;
2137 ++ dest = _kvm_s390_real_to_abs(prefix, dest) + scb_s->mso;
2138 ++ src = vsie_get_register(vcpu, vsie_page, scb_s->ipb >> 16) & mask;
2139 ++ src = _kvm_s390_real_to_abs(prefix, src) + scb_s->mso;
2140 ++
2141 ++ rc_dest = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, dest, &pei_dest);
2142 ++ rc_src = kvm_s390_shadow_fault(vcpu, vsie_page->gmap, src, &pei_src);
2143 ++ /*
2144 ++ * Either everything went well, or something non-critical went wrong
2145 ++ * e.g. because of a race. In either case, simply retry.
2146 ++ */
2147 ++ if (rc_dest == -EAGAIN || rc_src == -EAGAIN || (!rc_dest && !rc_src)) {
2148 ++ retry_vsie_icpt(vsie_page);
2149 ++ return -EAGAIN;
2150 ++ }
2151 ++ /* Something more serious went wrong, propagate the error */
2152 ++ if (rc_dest < 0)
2153 ++ return rc_dest;
2154 ++ if (rc_src < 0)
2155 ++ return rc_src;
2156 ++
2157 ++ /* The only possible suppressing exception: just deliver it */
2158 ++ if (rc_dest == PGM_TRANSLATION_SPEC || rc_src == PGM_TRANSLATION_SPEC) {
2159 ++ clear_vsie_icpt(vsie_page);
2160 ++ rc_dest = kvm_s390_inject_program_int(vcpu, PGM_TRANSLATION_SPEC);
2161 ++ WARN_ON_ONCE(rc_dest);
2162 ++ return 1;
2163 ++ }
2164 ++
2165 ++ /*
2166 ++ * Forward the PEI intercept to the guest if it was a page fault, or
2167 ++ * also for segment and region table faults if EDAT applies.
2168 ++ */
2169 ++ if (edat) {
2170 ++ rc_dest = rc_dest == PGM_ASCE_TYPE ? rc_dest : 0;
2171 ++ rc_src = rc_src == PGM_ASCE_TYPE ? rc_src : 0;
2172 ++ } else {
2173 ++ rc_dest = rc_dest != PGM_PAGE_TRANSLATION ? rc_dest : 0;
2174 ++ rc_src = rc_src != PGM_PAGE_TRANSLATION ? rc_src : 0;
2175 ++ }
2176 ++ if (!rc_dest && !rc_src) {
2177 ++ pei_block[0] = pei_dest;
2178 ++ pei_block[1] = pei_src;
2179 ++ return 1;
2180 ++ }
2181 ++
2182 ++ retry_vsie_icpt(vsie_page);
2183 ++
2184 ++ /*
2185 ++ * The host has edat, and the guest does not, or it was an ASCE type
2186 ++ * exception. The host needs to inject the appropriate DAT interrupts
2187 ++ * into the guest.
2188 ++ */
2189 ++ if (rc_dest)
2190 ++ return inject_fault(vcpu, rc_dest, dest, 1);
2191 ++ return inject_fault(vcpu, rc_src, src, 0);
2192 ++}
2193 ++
2194 + /*
2195 + * Run the vsie on a shadow scb and a shadow gmap, without any further
2196 + * sanity checks, handling SIE faults.
2197 +@@ -1068,6 +1155,10 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
2198 + if ((scb_s->ipa & 0xf000) != 0xf000)
2199 + scb_s->ipa += 0x1000;
2200 + break;
2201 ++ case ICPT_PARTEXEC:
2202 ++ if (scb_s->ipa == 0xb254)
2203 ++ rc = vsie_handle_mvpg(vcpu, vsie_page);
2204 ++ break;
2205 + }
2206 + return rc;
2207 + }
2208 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
2209 +index 183ee73d9019f..f3c8a8110f60c 100644
2210 +--- a/arch/x86/Kconfig
2211 ++++ b/arch/x86/Kconfig
2212 +@@ -562,6 +562,7 @@ config X86_UV
2213 + depends on X86_EXTENDED_PLATFORM
2214 + depends on NUMA
2215 + depends on EFI
2216 ++ depends on KEXEC_CORE
2217 + depends on X86_X2APIC
2218 + depends on PCI
2219 + help
2220 +diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c
2221 +index c44aba290fbb8..2bc312ffee52b 100644
2222 +--- a/arch/x86/crypto/poly1305_glue.c
2223 ++++ b/arch/x86/crypto/poly1305_glue.c
2224 +@@ -16,7 +16,7 @@
2225 + #include <asm/simd.h>
2226 +
2227 + asmlinkage void poly1305_init_x86_64(void *ctx,
2228 +- const u8 key[POLY1305_KEY_SIZE]);
2229 ++ const u8 key[POLY1305_BLOCK_SIZE]);
2230 + asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp,
2231 + const size_t len, const u32 padbit);
2232 + asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
2233 +@@ -81,7 +81,7 @@ static void convert_to_base2_64(void *ctx)
2234 + state->is_base2_26 = 0;
2235 + }
2236 +
2237 +-static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE])
2238 ++static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE])
2239 + {
2240 + poly1305_init_x86_64(ctx, key);
2241 + }
2242 +@@ -129,7 +129,7 @@ static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE],
2243 + poly1305_emit_avx(ctx, mac, nonce);
2244 + }
2245 +
2246 +-void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key)
2247 ++void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE])
2248 + {
2249 + poly1305_simd_init(&dctx->h, key);
2250 + dctx->s[0] = get_unaligned_le32(&key[16]);
2251 +diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c
2252 +index be50ef8572cce..6a98a76516214 100644
2253 +--- a/arch/x86/events/amd/iommu.c
2254 ++++ b/arch/x86/events/amd/iommu.c
2255 +@@ -81,12 +81,12 @@ static struct attribute_group amd_iommu_events_group = {
2256 + };
2257 +
2258 + struct amd_iommu_event_desc {
2259 +- struct kobj_attribute attr;
2260 ++ struct device_attribute attr;
2261 + const char *event;
2262 + };
2263 +
2264 +-static ssize_t _iommu_event_show(struct kobject *kobj,
2265 +- struct kobj_attribute *attr, char *buf)
2266 ++static ssize_t _iommu_event_show(struct device *dev,
2267 ++ struct device_attribute *attr, char *buf)
2268 + {
2269 + struct amd_iommu_event_desc *event =
2270 + container_of(attr, struct amd_iommu_event_desc, attr);
2271 +diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
2272 +index 7f014d450bc28..582c0ffb5e983 100644
2273 +--- a/arch/x86/events/amd/uncore.c
2274 ++++ b/arch/x86/events/amd/uncore.c
2275 +@@ -275,14 +275,14 @@ static struct attribute_group amd_uncore_attr_group = {
2276 + };
2277 +
2278 + #define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \
2279 +-static ssize_t __uncore_##_var##_show(struct kobject *kobj, \
2280 +- struct kobj_attribute *attr, \
2281 ++static ssize_t __uncore_##_var##_show(struct device *dev, \
2282 ++ struct device_attribute *attr, \
2283 + char *page) \
2284 + { \
2285 + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
2286 + return sprintf(page, _format "\n"); \
2287 + } \
2288 +-static struct kobj_attribute format_attr_##_var = \
2289 ++static struct device_attribute format_attr_##_var = \
2290 + __ATTR(_name, 0444, __uncore_##_var##_show, NULL)
2291 +
2292 + DEFINE_UNCORE_FORMAT_ATTR(event12, event, "config:0-7,32-35");
2293 +diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
2294 +index 235f5cde06fc3..40f466de89242 100644
2295 +--- a/arch/x86/kernel/apic/x2apic_uv_x.c
2296 ++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
2297 +@@ -1652,6 +1652,9 @@ static __init int uv_system_init_hubless(void)
2298 + if (rc < 0)
2299 + return rc;
2300 +
2301 ++ /* Set section block size for current node memory */
2302 ++ set_block_size();
2303 ++
2304 + /* Create user access node */
2305 + if (rc >= 0)
2306 + uv_setup_proc_files(1);
2307 +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
2308 +index ec6f0415bc6d1..bbbd248fe9132 100644
2309 +--- a/arch/x86/kernel/cpu/microcode/core.c
2310 ++++ b/arch/x86/kernel/cpu/microcode/core.c
2311 +@@ -629,16 +629,16 @@ static ssize_t reload_store(struct device *dev,
2312 + if (val != 1)
2313 + return size;
2314 +
2315 +- tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
2316 +- if (tmp_ret != UCODE_NEW)
2317 +- return size;
2318 +-
2319 + get_online_cpus();
2320 +
2321 + ret = check_online_cpus();
2322 + if (ret)
2323 + goto put;
2324 +
2325 ++ tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
2326 ++ if (tmp_ret != UCODE_NEW)
2327 ++ goto put;
2328 ++
2329 + mutex_lock(&microcode_mutex);
2330 + ret = microcode_reload_late();
2331 + mutex_unlock(&microcode_mutex);
2332 +diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
2333 +index 22aad412f965e..629c4994f1654 100644
2334 +--- a/arch/x86/kernel/e820.c
2335 ++++ b/arch/x86/kernel/e820.c
2336 +@@ -31,8 +31,8 @@
2337 + * - inform the user about the firmware's notion of memory layout
2338 + * via /sys/firmware/memmap
2339 + *
2340 +- * - the hibernation code uses it to generate a kernel-independent MD5
2341 +- * fingerprint of the physical memory layout of a system.
2342 ++ * - the hibernation code uses it to generate a kernel-independent CRC32
2343 ++ * checksum of the physical memory layout of a system.
2344 + *
2345 + * - 'e820_table_kexec': a slightly modified (by the kernel) firmware version
2346 + * passed to us by the bootloader - the major difference between
2347 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
2348 +index 39f7d8c3c064b..535da74c124e4 100644
2349 +--- a/arch/x86/kernel/kprobes/core.c
2350 ++++ b/arch/x86/kernel/kprobes/core.c
2351 +@@ -159,6 +159,8 @@ NOKPROBE_SYMBOL(skip_prefixes);
2352 + int can_boost(struct insn *insn, void *addr)
2353 + {
2354 + kprobe_opcode_t opcode;
2355 ++ insn_byte_t prefix;
2356 ++ int i;
2357 +
2358 + if (search_exception_tables((unsigned long)addr))
2359 + return 0; /* Page fault may occur on this address. */
2360 +@@ -171,9 +173,14 @@ int can_boost(struct insn *insn, void *addr)
2361 + if (insn->opcode.nbytes != 1)
2362 + return 0;
2363 +
2364 +- /* Can't boost Address-size override prefix */
2365 +- if (unlikely(inat_is_address_size_prefix(insn->attr)))
2366 +- return 0;
2367 ++ for_each_insn_prefix(insn, i, prefix) {
2368 ++ insn_attr_t attr;
2369 ++
2370 ++ attr = inat_get_opcode_attribute(prefix);
2371 ++ /* Can't boost Address-size override prefix and CS override prefix */
2372 ++ if (prefix == 0x2e || inat_is_address_size_prefix(attr))
2373 ++ return 0;
2374 ++ }
2375 +
2376 + opcode = insn->opcode.bytes[0];
2377 +
2378 +@@ -198,8 +205,8 @@ int can_boost(struct insn *insn, void *addr)
2379 + /* clear and set flags are boostable */
2380 + return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
2381 + default:
2382 +- /* CS override prefix and call are not boostable */
2383 +- return (opcode != 0x2e && opcode != 0x9a);
2384 ++ /* call is not boostable */
2385 ++ return opcode != 0x9a;
2386 + }
2387 + }
2388 +
2389 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
2390 +index b95d1c533fef5..582387fc939f4 100644
2391 +--- a/arch/x86/kernel/smpboot.c
2392 ++++ b/arch/x86/kernel/smpboot.c
2393 +@@ -452,29 +452,52 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2394 + return false;
2395 + }
2396 +
2397 ++static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2398 ++{
2399 ++ if (c->phys_proc_id == o->phys_proc_id &&
2400 ++ c->cpu_die_id == o->cpu_die_id)
2401 ++ return true;
2402 ++ return false;
2403 ++}
2404 ++
2405 + /*
2406 +- * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs.
2407 ++ * Unlike the other levels, we do not enforce keeping a
2408 ++ * multicore group inside a NUMA node. If this happens, we will
2409 ++ * discard the MC level of the topology later.
2410 ++ */
2411 ++static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2412 ++{
2413 ++ if (c->phys_proc_id == o->phys_proc_id)
2414 ++ return true;
2415 ++ return false;
2416 ++}
2417 ++
2418 ++/*
2419 ++ * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs.
2420 + *
2421 +- * These are Intel CPUs that enumerate an LLC that is shared by
2422 +- * multiple NUMA nodes. The LLC on these systems is shared for
2423 +- * off-package data access but private to the NUMA node (half
2424 +- * of the package) for on-package access.
2425 ++ * Any Intel CPU that has multiple nodes per package and does not
2426 ++ * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology.
2427 + *
2428 +- * CPUID (the source of the information about the LLC) can only
2429 +- * enumerate the cache as being shared *or* unshared, but not
2430 +- * this particular configuration. The CPU in this case enumerates
2431 +- * the cache to be shared across the entire package (spanning both
2432 +- * NUMA nodes).
2433 ++ * When in SNC mode, these CPUs enumerate an LLC that is shared
2434 ++ * by multiple NUMA nodes. The LLC is shared for off-package data
2435 ++ * access but private to the NUMA node (half of the package) for
2436 ++ * on-package access. CPUID (the source of the information about
2437 ++ * the LLC) can only enumerate the cache as shared or unshared,
2438 ++ * but not this particular configuration.
2439 + */
2440 +
2441 +-static const struct x86_cpu_id snc_cpu[] = {
2442 +- X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, NULL),
2443 ++static const struct x86_cpu_id intel_cod_cpu[] = {
2444 ++ X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */
2445 ++ X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */
2446 ++ X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */
2447 + {}
2448 + };
2449 +
2450 + static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2451 + {
2452 ++ const struct x86_cpu_id *id = x86_match_cpu(intel_cod_cpu);
2453 + int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
2454 ++ bool intel_snc = id && id->driver_data;
2455 +
2456 + /* Do not match if we do not have a valid APICID for cpu: */
2457 + if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID)
2458 +@@ -489,32 +512,12 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2459 + * means 'c' does not share the LLC of 'o'. This will be
2460 + * reflected to userspace.
2461 + */
2462 +- if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu))
2463 ++ if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc)
2464 + return false;
2465 +
2466 + return topology_sane(c, o, "llc");
2467 + }
2468 +
2469 +-/*
2470 +- * Unlike the other levels, we do not enforce keeping a
2471 +- * multicore group inside a NUMA node. If this happens, we will
2472 +- * discard the MC level of the topology later.
2473 +- */
2474 +-static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2475 +-{
2476 +- if (c->phys_proc_id == o->phys_proc_id)
2477 +- return true;
2478 +- return false;
2479 +-}
2480 +-
2481 +-static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
2482 +-{
2483 +- if ((c->phys_proc_id == o->phys_proc_id) &&
2484 +- (c->cpu_die_id == o->cpu_die_id))
2485 +- return true;
2486 +- return false;
2487 +-}
2488 +-
2489 +
2490 + #if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC)
2491 + static inline int x86_sched_itmt_flags(void)
2492 +@@ -586,14 +589,23 @@ void set_cpu_sibling_map(int cpu)
2493 + for_each_cpu(i, cpu_sibling_setup_mask) {
2494 + o = &cpu_data(i);
2495 +
2496 ++ if (match_pkg(c, o) && !topology_same_node(c, o))
2497 ++ x86_has_numa_in_package = true;
2498 ++
2499 + if ((i == cpu) || (has_smt && match_smt(c, o)))
2500 + link_mask(topology_sibling_cpumask, cpu, i);
2501 +
2502 + if ((i == cpu) || (has_mp && match_llc(c, o)))
2503 + link_mask(cpu_llc_shared_mask, cpu, i);
2504 +
2505 ++ if ((i == cpu) || (has_mp && match_die(c, o)))
2506 ++ link_mask(topology_die_cpumask, cpu, i);
2507 + }
2508 +
2509 ++ threads = cpumask_weight(topology_sibling_cpumask(cpu));
2510 ++ if (threads > __max_smt_threads)
2511 ++ __max_smt_threads = threads;
2512 ++
2513 + /*
2514 + * This needs a separate iteration over the cpus because we rely on all
2515 + * topology_sibling_cpumask links to be set-up.
2516 +@@ -607,8 +619,7 @@ void set_cpu_sibling_map(int cpu)
2517 + /*
2518 + * Does this new cpu bringup a new core?
2519 + */
2520 +- if (cpumask_weight(
2521 +- topology_sibling_cpumask(cpu)) == 1) {
2522 ++ if (threads == 1) {
2523 + /*
2524 + * for each core in package, increment
2525 + * the booted_cores for this new cpu
2526 +@@ -625,16 +636,7 @@ void set_cpu_sibling_map(int cpu)
2527 + } else if (i != cpu && !c->booted_cores)
2528 + c->booted_cores = cpu_data(i).booted_cores;
2529 + }
2530 +- if (match_pkg(c, o) && !topology_same_node(c, o))
2531 +- x86_has_numa_in_package = true;
2532 +-
2533 +- if ((i == cpu) || (has_mp && match_die(c, o)))
2534 +- link_mask(topology_die_cpumask, cpu, i);
2535 + }
2536 +-
2537 +- threads = cpumask_weight(topology_sibling_cpumask(cpu));
2538 +- if (threads > __max_smt_threads)
2539 +- __max_smt_threads = threads;
2540 + }
2541 +
2542 + /* maps the cpu to the sched domain representing multi-core */
2543 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
2544 +index 1453b9b794425..d3f2b63167451 100644
2545 +--- a/arch/x86/kvm/emulate.c
2546 ++++ b/arch/x86/kvm/emulate.c
2547 +@@ -4220,7 +4220,7 @@ static bool valid_cr(int nr)
2548 + }
2549 + }
2550 +
2551 +-static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2552 ++static int check_cr_access(struct x86_emulate_ctxt *ctxt)
2553 + {
2554 + if (!valid_cr(ctxt->modrm_reg))
2555 + return emulate_ud(ctxt);
2556 +@@ -4228,80 +4228,6 @@ static int check_cr_read(struct x86_emulate_ctxt *ctxt)
2557 + return X86EMUL_CONTINUE;
2558 + }
2559 +
2560 +-static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2561 +-{
2562 +- u64 new_val = ctxt->src.val64;
2563 +- int cr = ctxt->modrm_reg;
2564 +- u64 efer = 0;
2565 +-
2566 +- static u64 cr_reserved_bits[] = {
2567 +- 0xffffffff00000000ULL,
2568 +- 0, 0, 0, /* CR3 checked later */
2569 +- CR4_RESERVED_BITS,
2570 +- 0, 0, 0,
2571 +- CR8_RESERVED_BITS,
2572 +- };
2573 +-
2574 +- if (!valid_cr(cr))
2575 +- return emulate_ud(ctxt);
2576 +-
2577 +- if (new_val & cr_reserved_bits[cr])
2578 +- return emulate_gp(ctxt, 0);
2579 +-
2580 +- switch (cr) {
2581 +- case 0: {
2582 +- u64 cr4;
2583 +- if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
2584 +- ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2585 +- return emulate_gp(ctxt, 0);
2586 +-
2587 +- cr4 = ctxt->ops->get_cr(ctxt, 4);
2588 +- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2589 +-
2590 +- if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2591 +- !(cr4 & X86_CR4_PAE))
2592 +- return emulate_gp(ctxt, 0);
2593 +-
2594 +- break;
2595 +- }
2596 +- case 3: {
2597 +- u64 rsvd = 0;
2598 +-
2599 +- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2600 +- if (efer & EFER_LMA) {
2601 +- u64 maxphyaddr;
2602 +- u32 eax, ebx, ecx, edx;
2603 +-
2604 +- eax = 0x80000008;
2605 +- ecx = 0;
2606 +- if (ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx,
2607 +- &edx, true))
2608 +- maxphyaddr = eax & 0xff;
2609 +- else
2610 +- maxphyaddr = 36;
2611 +- rsvd = rsvd_bits(maxphyaddr, 63);
2612 +- if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PCIDE)
2613 +- rsvd &= ~X86_CR3_PCID_NOFLUSH;
2614 +- }
2615 +-
2616 +- if (new_val & rsvd)
2617 +- return emulate_gp(ctxt, 0);
2618 +-
2619 +- break;
2620 +- }
2621 +- case 4: {
2622 +- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2623 +-
2624 +- if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2625 +- return emulate_gp(ctxt, 0);
2626 +-
2627 +- break;
2628 +- }
2629 +- }
2630 +-
2631 +- return X86EMUL_CONTINUE;
2632 +-}
2633 +-
2634 + static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2635 + {
2636 + unsigned long dr7;
2637 +@@ -4841,10 +4767,10 @@ static const struct opcode twobyte_table[256] = {
2638 + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* 8 * reserved NOP */
2639 + D(ImplicitOps | ModRM | SrcMem | NoAccess), /* NOP + 7 * reserved NOP */
2640 + /* 0x20 - 0x2F */
2641 +- DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
2642 ++ DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_access),
2643 + DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
2644 + IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
2645 +- check_cr_write),
2646 ++ check_cr_access),
2647 + IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
2648 + check_dr_write),
2649 + N, N, N, N,
2650 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
2651 +index 15717a28b212e..2f2576fd343e6 100644
2652 +--- a/arch/x86/kvm/mmu/mmu.c
2653 ++++ b/arch/x86/kvm/mmu/mmu.c
2654 +@@ -3195,14 +3195,14 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
2655 + if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
2656 + (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) {
2657 + mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list);
2658 +- } else {
2659 ++ } else if (mmu->pae_root) {
2660 + for (i = 0; i < 4; ++i)
2661 + if (mmu->pae_root[i] != 0)
2662 + mmu_free_root_page(kvm,
2663 + &mmu->pae_root[i],
2664 + &invalid_list);
2665 +- mmu->root_hpa = INVALID_PAGE;
2666 + }
2667 ++ mmu->root_hpa = INVALID_PAGE;
2668 + mmu->root_pgd = 0;
2669 + }
2670 +
2671 +@@ -3314,9 +3314,23 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2672 + * the shadow page table may be a PAE or a long mode page table.
2673 + */
2674 + pm_mask = PT_PRESENT_MASK;
2675 +- if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
2676 ++ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
2677 + pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
2678 +
2679 ++ /*
2680 ++ * Allocate the page for the PDPTEs when shadowing 32-bit NPT
2681 ++ * with 64-bit only when needed. Unlike 32-bit NPT, it doesn't
2682 ++ * need to be in low mem. See also lm_root below.
2683 ++ */
2684 ++ if (!vcpu->arch.mmu->pae_root) {
2685 ++ WARN_ON_ONCE(!tdp_enabled);
2686 ++
2687 ++ vcpu->arch.mmu->pae_root = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2688 ++ if (!vcpu->arch.mmu->pae_root)
2689 ++ return -ENOMEM;
2690 ++ }
2691 ++ }
2692 ++
2693 + for (i = 0; i < 4; ++i) {
2694 + MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
2695 + if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
2696 +@@ -3339,21 +3353,19 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
2697 + vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
2698 +
2699 + /*
2700 +- * If we shadow a 32 bit page table with a long mode page
2701 +- * table we enter this path.
2702 ++ * When shadowing 32-bit or PAE NPT with 64-bit NPT, the PML4 and PDP
2703 ++ * tables are allocated and initialized at MMU creation as there is no
2704 ++ * equivalent level in the guest's NPT to shadow. Allocate the tables
2705 ++ * on demand, as running a 32-bit L1 VMM is very rare. The PDP is
2706 ++ * handled above (to share logic with PAE), deal with the PML4 here.
2707 + */
2708 + if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
2709 + if (vcpu->arch.mmu->lm_root == NULL) {
2710 +- /*
2711 +- * The additional page necessary for this is only
2712 +- * allocated on demand.
2713 +- */
2714 +-
2715 + u64 *lm_root;
2716 +
2717 + lm_root = (void*)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2718 +- if (lm_root == NULL)
2719 +- return 1;
2720 ++ if (!lm_root)
2721 ++ return -ENOMEM;
2722 +
2723 + lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
2724 +
2725 +@@ -3651,6 +3663,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
2726 + struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2727 + bool async;
2728 +
2729 ++ /*
2730 ++ * Retry the page fault if the gfn hit a memslot that is being deleted
2731 ++ * or moved. This ensures any existing SPTEs for the old memslot will
2732 ++ * be zapped before KVM inserts a new MMIO SPTE for the gfn.
2733 ++ */
2734 ++ if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
2735 ++ return true;
2736 ++
2737 + /* Don't expose private memslots to L2. */
2738 + if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
2739 + *pfn = KVM_PFN_NOSLOT;
2740 +@@ -4605,12 +4625,17 @@ void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
2741 + struct kvm_mmu *context = &vcpu->arch.guest_mmu;
2742 + union kvm_mmu_role new_role = kvm_calc_shadow_npt_root_page_role(vcpu);
2743 +
2744 +- context->shadow_root_level = new_role.base.level;
2745 +-
2746 + __kvm_mmu_new_pgd(vcpu, nested_cr3, new_role.base, false, false);
2747 +
2748 +- if (new_role.as_u64 != context->mmu_role.as_u64)
2749 ++ if (new_role.as_u64 != context->mmu_role.as_u64) {
2750 + shadow_mmu_init_context(vcpu, context, cr0, cr4, efer, new_role);
2751 ++
2752 ++ /*
2753 ++ * Override the level set by the common init helper, nested TDP
2754 ++ * always uses the host's TDP configuration.
2755 ++ */
2756 ++ context->shadow_root_level = new_role.base.level;
2757 ++ }
2758 + }
2759 + EXPORT_SYMBOL_GPL(kvm_init_shadow_npt_mmu);
2760 +
2761 +@@ -5297,9 +5322,11 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
2762 + * while the PDP table is a per-vCPU construct that's allocated at MMU
2763 + * creation. When emulating 32-bit mode, cr3 is only 32 bits even on
2764 + * x86_64. Therefore we need to allocate the PDP table in the first
2765 +- * 4GB of memory, which happens to fit the DMA32 zone. Except for
2766 +- * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
2767 +- * skip allocating the PDP table.
2768 ++ * 4GB of memory, which happens to fit the DMA32 zone. TDP paging
2769 ++ * generally doesn't use PAE paging and can skip allocating the PDP
2770 ++ * table. The main exception, handled here, is SVM's 32-bit NPT. The
2771 ++ * other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
2772 ++ * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
2773 + */
2774 + if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
2775 + return 0;
2776 +diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
2777 +index e3e04988fdabe..16b10b9436dc5 100644
2778 +--- a/arch/x86/kvm/svm/sev.c
2779 ++++ b/arch/x86/kvm/svm/sev.c
2780 +@@ -168,6 +168,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
2781 + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2782 + int asid, ret;
2783 +
2784 ++ if (kvm->created_vcpus)
2785 ++ return -EINVAL;
2786 ++
2787 + ret = -EBUSY;
2788 + if (unlikely(sev->active))
2789 + return ret;
2790 +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
2791 +index 642f0da31ac4f..ca7a717477e70 100644
2792 +--- a/arch/x86/kvm/svm/svm.c
2793 ++++ b/arch/x86/kvm/svm/svm.c
2794 +@@ -1805,7 +1805,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
2795 +
2796 + static int pf_interception(struct vcpu_svm *svm)
2797 + {
2798 +- u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
2799 ++ u64 fault_address = svm->vmcb->control.exit_info_2;
2800 + u64 error_code = svm->vmcb->control.exit_info_1;
2801 +
2802 + return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
2803 +@@ -2519,6 +2519,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2804 + case MSR_TSC_AUX:
2805 + if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2806 + return 1;
2807 ++ if (!msr_info->host_initiated &&
2808 ++ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
2809 ++ return 1;
2810 + msr_info->data = svm->tsc_aux;
2811 + break;
2812 + /*
2813 +@@ -2713,6 +2716,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
2814 + if (!boot_cpu_has(X86_FEATURE_RDTSCP))
2815 + return 1;
2816 +
2817 ++ if (!msr->host_initiated &&
2818 ++ !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP))
2819 ++ return 1;
2820 ++
2821 + /*
2822 + * This is rare, so we update the MSR here instead of using
2823 + * direct_access_msrs. Doing that would require a rdmsr in
2824 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
2825 +index 15532feb19f10..e8882715735ae 100644
2826 +--- a/arch/x86/kvm/vmx/nested.c
2827 ++++ b/arch/x86/kvm/vmx/nested.c
2828 +@@ -618,6 +618,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2829 + }
2830 +
2831 + /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
2832 ++#ifdef CONFIG_X86_64
2833 + nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
2834 + MSR_FS_BASE, MSR_TYPE_RW);
2835 +
2836 +@@ -626,6 +627,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
2837 +
2838 + nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
2839 + MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
2840 ++#endif
2841 +
2842 + /*
2843 + * Checking the L0->L1 bitmap is trying to verify two things:
2844 +@@ -4613,9 +4615,9 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
2845 + else if (addr_size == 0)
2846 + off = (gva_t)sign_extend64(off, 15);
2847 + if (base_is_valid)
2848 +- off += kvm_register_read(vcpu, base_reg);
2849 ++ off += kvm_register_readl(vcpu, base_reg);
2850 + if (index_is_valid)
2851 +- off += kvm_register_read(vcpu, index_reg) << scaling;
2852 ++ off += kvm_register_readl(vcpu, index_reg) << scaling;
2853 + vmx_get_segment(vcpu, &s, seg_reg);
2854 +
2855 + /*
2856 +@@ -5491,16 +5493,11 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
2857 + if (!nested_vmx_check_eptp(vcpu, new_eptp))
2858 + return 1;
2859 +
2860 +- kvm_mmu_unload(vcpu);
2861 + mmu->ept_ad = accessed_dirty;
2862 + mmu->mmu_role.base.ad_disabled = !accessed_dirty;
2863 + vmcs12->ept_pointer = new_eptp;
2864 +- /*
2865 +- * TODO: Check what's the correct approach in case
2866 +- * mmu reload fails. Currently, we just let the next
2867 +- * reload potentially fail
2868 +- */
2869 +- kvm_mmu_reload(vcpu);
2870 ++
2871 ++ kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2872 + }
2873 +
2874 + return 0;
2875 +@@ -5729,7 +5726,7 @@ static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
2876 +
2877 + /* Decode instruction info and find the field to access */
2878 + vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
2879 +- field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
2880 ++ field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
2881 +
2882 + /* Out-of-range fields always cause a VM exit from L2 to L1 */
2883 + if (field >> 15)
2884 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
2885 +index f8835cabf29f3..fca4f452827b7 100644
2886 +--- a/arch/x86/kvm/vmx/vmx.c
2887 ++++ b/arch/x86/kvm/vmx/vmx.c
2888 +@@ -156,9 +156,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
2889 + MSR_IA32_SPEC_CTRL,
2890 + MSR_IA32_PRED_CMD,
2891 + MSR_IA32_TSC,
2892 ++#ifdef CONFIG_X86_64
2893 + MSR_FS_BASE,
2894 + MSR_GS_BASE,
2895 + MSR_KERNEL_GS_BASE,
2896 ++#endif
2897 + MSR_IA32_SYSENTER_CS,
2898 + MSR_IA32_SYSENTER_ESP,
2899 + MSR_IA32_SYSENTER_EIP,
2900 +@@ -5779,7 +5781,6 @@ void dump_vmcs(void)
2901 + u32 vmentry_ctl, vmexit_ctl;
2902 + u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
2903 + unsigned long cr4;
2904 +- u64 efer;
2905 +
2906 + if (!dump_invalid_vmcs) {
2907 + pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
2908 +@@ -5791,7 +5792,6 @@ void dump_vmcs(void)
2909 + cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2910 + pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
2911 + cr4 = vmcs_readl(GUEST_CR4);
2912 +- efer = vmcs_read64(GUEST_IA32_EFER);
2913 + secondary_exec_control = 0;
2914 + if (cpu_has_secondary_exec_ctrls())
2915 + secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
2916 +@@ -5803,9 +5803,7 @@ void dump_vmcs(void)
2917 + pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
2918 + cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
2919 + pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
2920 +- if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
2921 +- (cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
2922 +- {
2923 ++ if (cpu_has_vmx_ept()) {
2924 + pr_err("PDPTR0 = 0x%016llx PDPTR1 = 0x%016llx\n",
2925 + vmcs_read64(GUEST_PDPTR0), vmcs_read64(GUEST_PDPTR1));
2926 + pr_err("PDPTR2 = 0x%016llx PDPTR3 = 0x%016llx\n",
2927 +@@ -5831,7 +5829,8 @@ void dump_vmcs(void)
2928 + if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
2929 + (vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
2930 + pr_err("EFER = 0x%016llx PAT = 0x%016llx\n",
2931 +- efer, vmcs_read64(GUEST_IA32_PAT));
2932 ++ vmcs_read64(GUEST_IA32_EFER),
2933 ++ vmcs_read64(GUEST_IA32_PAT));
2934 + pr_err("DebugCtl = 0x%016llx DebugExceptions = 0x%016lx\n",
2935 + vmcs_read64(GUEST_IA32_DEBUGCTL),
2936 + vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
2937 +@@ -6907,9 +6906,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
2938 + bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
2939 +
2940 + vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R);
2941 ++#ifdef CONFIG_X86_64
2942 + vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW);
2943 + vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW);
2944 + vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
2945 ++#endif
2946 + vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW);
2947 + vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW);
2948 + vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW);
2949 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2950 +index 0d8383b82bca4..0a5dd7568ebc8 100644
2951 +--- a/arch/x86/kvm/x86.c
2952 ++++ b/arch/x86/kvm/x86.c
2953 +@@ -11290,7 +11290,7 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva)
2954 +
2955 + fallthrough;
2956 + case INVPCID_TYPE_ALL_INCL_GLOBAL:
2957 +- kvm_mmu_unload(vcpu);
2958 ++ kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2959 + return kvm_skip_emulated_instruction(vcpu);
2960 +
2961 + default:
2962 +diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c
2963 +index cd3914fc9f3d4..e94e0050a583a 100644
2964 +--- a/arch/x86/power/hibernate.c
2965 ++++ b/arch/x86/power/hibernate.c
2966 +@@ -13,8 +13,8 @@
2967 + #include <linux/kdebug.h>
2968 + #include <linux/cpu.h>
2969 + #include <linux/pgtable.h>
2970 +-
2971 +-#include <crypto/hash.h>
2972 ++#include <linux/types.h>
2973 ++#include <linux/crc32.h>
2974 +
2975 + #include <asm/e820/api.h>
2976 + #include <asm/init.h>
2977 +@@ -54,95 +54,33 @@ int pfn_is_nosave(unsigned long pfn)
2978 + return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
2979 + }
2980 +
2981 +-
2982 +-#define MD5_DIGEST_SIZE 16
2983 +-
2984 + struct restore_data_record {
2985 + unsigned long jump_address;
2986 + unsigned long jump_address_phys;
2987 + unsigned long cr3;
2988 + unsigned long magic;
2989 +- u8 e820_digest[MD5_DIGEST_SIZE];
2990 ++ unsigned long e820_checksum;
2991 + };
2992 +
2993 +-#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
2994 + /**
2995 +- * get_e820_md5 - calculate md5 according to given e820 table
2996 ++ * compute_e820_crc32 - calculate crc32 of a given e820 table
2997 + *
2998 + * @table: the e820 table to be calculated
2999 +- * @buf: the md5 result to be stored to
3000 ++ *
3001 ++ * Return: the resulting checksum
3002 + */
3003 +-static int get_e820_md5(struct e820_table *table, void *buf)
3004 ++static inline u32 compute_e820_crc32(struct e820_table *table)
3005 + {
3006 +- struct crypto_shash *tfm;
3007 +- struct shash_desc *desc;
3008 +- int size;
3009 +- int ret = 0;
3010 +-
3011 +- tfm = crypto_alloc_shash("md5", 0, 0);
3012 +- if (IS_ERR(tfm))
3013 +- return -ENOMEM;
3014 +-
3015 +- desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
3016 +- GFP_KERNEL);
3017 +- if (!desc) {
3018 +- ret = -ENOMEM;
3019 +- goto free_tfm;
3020 +- }
3021 +-
3022 +- desc->tfm = tfm;
3023 +-
3024 +- size = offsetof(struct e820_table, entries) +
3025 ++ int size = offsetof(struct e820_table, entries) +
3026 + sizeof(struct e820_entry) * table->nr_entries;
3027 +
3028 +- if (crypto_shash_digest(desc, (u8 *)table, size, buf))
3029 +- ret = -EINVAL;
3030 +-
3031 +- kfree_sensitive(desc);
3032 +-
3033 +-free_tfm:
3034 +- crypto_free_shash(tfm);
3035 +- return ret;
3036 +-}
3037 +-
3038 +-static int hibernation_e820_save(void *buf)
3039 +-{
3040 +- return get_e820_md5(e820_table_firmware, buf);
3041 +-}
3042 +-
3043 +-static bool hibernation_e820_mismatch(void *buf)
3044 +-{
3045 +- int ret;
3046 +- u8 result[MD5_DIGEST_SIZE];
3047 +-
3048 +- memset(result, 0, MD5_DIGEST_SIZE);
3049 +- /* If there is no digest in suspend kernel, let it go. */
3050 +- if (!memcmp(result, buf, MD5_DIGEST_SIZE))
3051 +- return false;
3052 +-
3053 +- ret = get_e820_md5(e820_table_firmware, result);
3054 +- if (ret)
3055 +- return true;
3056 +-
3057 +- return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
3058 +-}
3059 +-#else
3060 +-static int hibernation_e820_save(void *buf)
3061 +-{
3062 +- return 0;
3063 +-}
3064 +-
3065 +-static bool hibernation_e820_mismatch(void *buf)
3066 +-{
3067 +- /* If md5 is not builtin for restore kernel, let it go. */
3068 +- return false;
3069 ++ return ~crc32_le(~0, (unsigned char const *)table, size);
3070 + }
3071 +-#endif
3072 +
3073 + #ifdef CONFIG_X86_64
3074 +-#define RESTORE_MAGIC 0x23456789ABCDEF01UL
3075 ++#define RESTORE_MAGIC 0x23456789ABCDEF02UL
3076 + #else
3077 +-#define RESTORE_MAGIC 0x12345678UL
3078 ++#define RESTORE_MAGIC 0x12345679UL
3079 + #endif
3080 +
3081 + /**
3082 +@@ -179,7 +117,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
3083 + */
3084 + rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
3085 +
3086 +- return hibernation_e820_save(rdr->e820_digest);
3087 ++ rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
3088 ++ return 0;
3089 + }
3090 +
3091 + /**
3092 +@@ -200,7 +139,7 @@ int arch_hibernation_header_restore(void *addr)
3093 + jump_address_phys = rdr->jump_address_phys;
3094 + restore_cr3 = rdr->cr3;
3095 +
3096 +- if (hibernation_e820_mismatch(rdr->e820_digest)) {
3097 ++ if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
3098 + pr_crit("Hibernate inconsistent memory map detected!\n");
3099 + return -ENODEV;
3100 + }
3101 +diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c
3102 +index a057ecb1288d2..6cd7f7025df47 100644
3103 +--- a/crypto/async_tx/async_xor.c
3104 ++++ b/crypto/async_tx/async_xor.c
3105 +@@ -233,6 +233,7 @@ async_xor_offs(struct page *dest, unsigned int offset,
3106 + if (submit->flags & ASYNC_TX_XOR_DROP_DST) {
3107 + src_cnt--;
3108 + src_list++;
3109 ++ src_offs++;
3110 + }
3111 +
3112 + /* wait for any prerequisite operations */
3113 +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
3114 +index 7a99b19bb893d..0a2da06e9d8bf 100644
3115 +--- a/drivers/acpi/cppc_acpi.c
3116 ++++ b/drivers/acpi/cppc_acpi.c
3117 +@@ -118,23 +118,15 @@ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
3118 + */
3119 + #define NUM_RETRIES 500ULL
3120 +
3121 +-struct cppc_attr {
3122 +- struct attribute attr;
3123 +- ssize_t (*show)(struct kobject *kobj,
3124 +- struct attribute *attr, char *buf);
3125 +- ssize_t (*store)(struct kobject *kobj,
3126 +- struct attribute *attr, const char *c, ssize_t count);
3127 +-};
3128 +-
3129 + #define define_one_cppc_ro(_name) \
3130 +-static struct cppc_attr _name = \
3131 ++static struct kobj_attribute _name = \
3132 + __ATTR(_name, 0444, show_##_name, NULL)
3133 +
3134 + #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
3135 +
3136 + #define show_cppc_data(access_fn, struct_name, member_name) \
3137 + static ssize_t show_##member_name(struct kobject *kobj, \
3138 +- struct attribute *attr, char *buf) \
3139 ++ struct kobj_attribute *attr, char *buf) \
3140 + { \
3141 + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
3142 + struct struct_name st_name = {0}; \
3143 +@@ -160,7 +152,7 @@ show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
3144 + show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
3145 +
3146 + static ssize_t show_feedback_ctrs(struct kobject *kobj,
3147 +- struct attribute *attr, char *buf)
3148 ++ struct kobj_attribute *attr, char *buf)
3149 + {
3150 + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
3151 + struct cppc_perf_fb_ctrs fb_ctrs = {0};
3152 +diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
3153 +index de638dafce21e..b2f5520882918 100644
3154 +--- a/drivers/ata/libahci_platform.c
3155 ++++ b/drivers/ata/libahci_platform.c
3156 +@@ -582,11 +582,13 @@ int ahci_platform_init_host(struct platform_device *pdev,
3157 + int i, irq, n_ports, rc;
3158 +
3159 + irq = platform_get_irq(pdev, 0);
3160 +- if (irq <= 0) {
3161 ++ if (irq < 0) {
3162 + if (irq != -EPROBE_DEFER)
3163 + dev_err(dev, "no irq\n");
3164 + return irq;
3165 + }
3166 ++ if (!irq)
3167 ++ return -EINVAL;
3168 +
3169 + hpriv->irq = irq;
3170 +
3171 +diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
3172 +index e9cf31f384506..63f39440a9b42 100644
3173 +--- a/drivers/ata/pata_arasan_cf.c
3174 ++++ b/drivers/ata/pata_arasan_cf.c
3175 +@@ -818,12 +818,19 @@ static int arasan_cf_probe(struct platform_device *pdev)
3176 + else
3177 + quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */
3178 +
3179 +- /* if irq is 0, support only PIO */
3180 +- acdev->irq = platform_get_irq(pdev, 0);
3181 +- if (acdev->irq)
3182 ++ /*
3183 ++ * If there's an error getting IRQ (or we do get IRQ0),
3184 ++ * support only PIO
3185 ++ */
3186 ++ ret = platform_get_irq(pdev, 0);
3187 ++ if (ret > 0) {
3188 ++ acdev->irq = ret;
3189 + irq_handler = arasan_cf_interrupt;
3190 +- else
3191 ++ } else if (ret == -EPROBE_DEFER) {
3192 ++ return ret;
3193 ++ } else {
3194 + quirk |= CF_BROKEN_MWDMA | CF_BROKEN_UDMA;
3195 ++ }
3196 +
3197 + acdev->pbase = res->start;
3198 + acdev->vbase = devm_ioremap(&pdev->dev, res->start,
3199 +diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
3200 +index d1644a8ef9fa6..abc0e87ca1a8b 100644
3201 +--- a/drivers/ata/pata_ixp4xx_cf.c
3202 ++++ b/drivers/ata/pata_ixp4xx_cf.c
3203 +@@ -165,8 +165,12 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
3204 + return -ENOMEM;
3205 +
3206 + irq = platform_get_irq(pdev, 0);
3207 +- if (irq)
3208 ++ if (irq > 0)
3209 + irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
3210 ++ else if (irq < 0)
3211 ++ return irq;
3212 ++ else
3213 ++ return -EINVAL;
3214 +
3215 + /* Setup expansion bus chip selects */
3216 + *data->cs0_cfg = data->cs0_bits;
3217 +diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
3218 +index 664ef658a955f..b62446ea5f408 100644
3219 +--- a/drivers/ata/sata_mv.c
3220 ++++ b/drivers/ata/sata_mv.c
3221 +@@ -4097,6 +4097,10 @@ static int mv_platform_probe(struct platform_device *pdev)
3222 + n_ports = mv_platform_data->n_ports;
3223 + irq = platform_get_irq(pdev, 0);
3224 + }
3225 ++ if (irq < 0)
3226 ++ return irq;
3227 ++ if (!irq)
3228 ++ return -EINVAL;
3229 +
3230 + host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3231 + hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3232 +diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
3233 +index eac184e6d6577..a71d141179439 100644
3234 +--- a/drivers/base/devtmpfs.c
3235 ++++ b/drivers/base/devtmpfs.c
3236 +@@ -416,7 +416,6 @@ static int __init devtmpfs_setup(void *p)
3237 + init_chroot(".");
3238 + out:
3239 + *(int *)p = err;
3240 +- complete(&setup_done);
3241 + return err;
3242 + }
3243 +
3244 +@@ -429,6 +428,7 @@ static int __ref devtmpfsd(void *p)
3245 + {
3246 + int err = devtmpfs_setup(p);
3247 +
3248 ++ complete(&setup_done);
3249 + if (err)
3250 + return err;
3251 + devtmpfs_work_loop();
3252 +diff --git a/drivers/base/node.c b/drivers/base/node.c
3253 +index 6ffa470e29840..21965de8538be 100644
3254 +--- a/drivers/base/node.c
3255 ++++ b/drivers/base/node.c
3256 +@@ -268,21 +268,20 @@ static void node_init_cache_dev(struct node *node)
3257 + if (!dev)
3258 + return;
3259 +
3260 ++ device_initialize(dev);
3261 + dev->parent = &node->dev;
3262 + dev->release = node_cache_release;
3263 + if (dev_set_name(dev, "memory_side_cache"))
3264 +- goto free_dev;
3265 ++ goto put_device;
3266 +
3267 +- if (device_register(dev))
3268 +- goto free_name;
3269 ++ if (device_add(dev))
3270 ++ goto put_device;
3271 +
3272 + pm_runtime_no_callbacks(dev);
3273 + node->cache_dev = dev;
3274 + return;
3275 +-free_name:
3276 +- kfree_const(dev->kobj.name);
3277 +-free_dev:
3278 +- kfree(dev);
3279 ++put_device:
3280 ++ put_device(dev);
3281 + }
3282 +
3283 + /**
3284 +@@ -319,25 +318,24 @@ void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
3285 + return;
3286 +
3287 + dev = &info->dev;
3288 ++ device_initialize(dev);
3289 + dev->parent = node->cache_dev;
3290 + dev->release = node_cacheinfo_release;
3291 + dev->groups = cache_groups;
3292 + if (dev_set_name(dev, "index%d", cache_attrs->level))
3293 +- goto free_cache;
3294 ++ goto put_device;
3295 +
3296 + info->cache_attrs = *cache_attrs;
3297 +- if (device_register(dev)) {
3298 ++ if (device_add(dev)) {
3299 + dev_warn(&node->dev, "failed to add cache level:%d\n",
3300 + cache_attrs->level);
3301 +- goto free_name;
3302 ++ goto put_device;
3303 + }
3304 + pm_runtime_no_callbacks(dev);
3305 + list_add_tail(&info->node, &node->cache_attrs);
3306 + return;
3307 +-free_name:
3308 +- kfree_const(dev->kobj.name);
3309 +-free_cache:
3310 +- kfree(info);
3311 ++put_device:
3312 ++ put_device(dev);
3313 + }
3314 +
3315 + static void node_remove_caches(struct node *node)
3316 +diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
3317 +index ff2ee87987c7e..211a335a608d7 100644
3318 +--- a/drivers/base/regmap/regmap-debugfs.c
3319 ++++ b/drivers/base/regmap/regmap-debugfs.c
3320 +@@ -660,6 +660,7 @@ void regmap_debugfs_exit(struct regmap *map)
3321 + regmap_debugfs_free_dump_cache(map);
3322 + mutex_unlock(&map->cache_lock);
3323 + kfree(map->debugfs_name);
3324 ++ map->debugfs_name = NULL;
3325 + } else {
3326 + struct regmap_debugfs_node *node, *tmp;
3327 +
3328 +diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
3329 +index 172f720b8d637..f5df82c26c16f 100644
3330 +--- a/drivers/block/null_blk_zoned.c
3331 ++++ b/drivers/block/null_blk_zoned.c
3332 +@@ -149,6 +149,7 @@ void null_free_zoned_dev(struct nullb_device *dev)
3333 + {
3334 + bitmap_free(dev->zone_locks);
3335 + kvfree(dev->zones);
3336 ++ dev->zones = NULL;
3337 + }
3338 +
3339 + static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno)
3340 +diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
3341 +index a1b9df2c4ef1a..040829e2d0162 100644
3342 +--- a/drivers/block/xen-blkback/common.h
3343 ++++ b/drivers/block/xen-blkback/common.h
3344 +@@ -313,6 +313,7 @@ struct xen_blkif {
3345 +
3346 + struct work_struct free_work;
3347 + unsigned int nr_ring_pages;
3348 ++ bool multi_ref;
3349 + /* All rings for this device. */
3350 + struct xen_blkif_ring *rings;
3351 + unsigned int nr_rings;
3352 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
3353 +index 9860d4842f36c..6c5e9373e91c3 100644
3354 +--- a/drivers/block/xen-blkback/xenbus.c
3355 ++++ b/drivers/block/xen-blkback/xenbus.c
3356 +@@ -998,14 +998,17 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
3357 + for (i = 0; i < nr_grefs; i++) {
3358 + char ring_ref_name[RINGREF_NAME_LEN];
3359 +
3360 +- snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
3361 ++ if (blkif->multi_ref)
3362 ++ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
3363 ++ else {
3364 ++ WARN_ON(i != 0);
3365 ++ snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref");
3366 ++ }
3367 ++
3368 + err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
3369 + "%u", &ring_ref[i]);
3370 +
3371 + if (err != 1) {
3372 +- if (nr_grefs == 1)
3373 +- break;
3374 +-
3375 + err = -EINVAL;
3376 + xenbus_dev_fatal(dev, err, "reading %s/%s",
3377 + dir, ring_ref_name);
3378 +@@ -1013,18 +1016,6 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
3379 + }
3380 + }
3381 +
3382 +- if (err != 1) {
3383 +- WARN_ON(nr_grefs != 1);
3384 +-
3385 +- err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
3386 +- &ring_ref[0]);
3387 +- if (err != 1) {
3388 +- err = -EINVAL;
3389 +- xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
3390 +- return err;
3391 +- }
3392 +- }
3393 +-
3394 + err = -ENOMEM;
3395 + for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
3396 + req = kzalloc(sizeof(*req), GFP_KERNEL);
3397 +@@ -1129,10 +1120,15 @@ static int connect_ring(struct backend_info *be)
3398 + blkif->nr_rings, blkif->blk_protocol, protocol,
3399 + blkif->vbd.feature_gnt_persistent ? "persistent grants" : "");
3400 +
3401 +- ring_page_order = xenbus_read_unsigned(dev->otherend,
3402 +- "ring-page-order", 0);
3403 +-
3404 +- if (ring_page_order > xen_blkif_max_ring_order) {
3405 ++ err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u",
3406 ++ &ring_page_order);
3407 ++ if (err != 1) {
3408 ++ blkif->nr_ring_pages = 1;
3409 ++ blkif->multi_ref = false;
3410 ++ } else if (ring_page_order <= xen_blkif_max_ring_order) {
3411 ++ blkif->nr_ring_pages = 1 << ring_page_order;
3412 ++ blkif->multi_ref = true;
3413 ++ } else {
3414 + err = -EINVAL;
3415 + xenbus_dev_fatal(dev, err,
3416 + "requested ring page order %d exceed max:%d",
3417 +@@ -1141,8 +1137,6 @@ static int connect_ring(struct backend_info *be)
3418 + return err;
3419 + }
3420 +
3421 +- blkif->nr_ring_pages = 1 << ring_page_order;
3422 +-
3423 + if (blkif->nr_rings == 1)
3424 + return read_per_ring_refs(&blkif->rings[0], dev->otherend);
3425 + else {
3426 +diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c
3427 +index 03ddcf426887b..0b8f53a688b8a 100644
3428 +--- a/drivers/bus/qcom-ebi2.c
3429 ++++ b/drivers/bus/qcom-ebi2.c
3430 +@@ -353,8 +353,10 @@ static int qcom_ebi2_probe(struct platform_device *pdev)
3431 +
3432 + /* Figure out the chipselect */
3433 + ret = of_property_read_u32(child, "reg", &csindex);
3434 +- if (ret)
3435 ++ if (ret) {
3436 ++ of_node_put(child);
3437 + return ret;
3438 ++ }
3439 +
3440 + if (csindex > 5) {
3441 + dev_err(dev,
3442 +diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
3443 +index 6a0059e508e38..93f5d11c830b7 100644
3444 +--- a/drivers/char/ttyprintk.c
3445 ++++ b/drivers/char/ttyprintk.c
3446 +@@ -158,12 +158,23 @@ static int tpk_ioctl(struct tty_struct *tty,
3447 + return 0;
3448 + }
3449 +
3450 ++/*
3451 ++ * TTY operations hangup function.
3452 ++ */
3453 ++static void tpk_hangup(struct tty_struct *tty)
3454 ++{
3455 ++ struct ttyprintk_port *tpkp = tty->driver_data;
3456 ++
3457 ++ tty_port_hangup(&tpkp->port);
3458 ++}
3459 ++
3460 + static const struct tty_operations ttyprintk_ops = {
3461 + .open = tpk_open,
3462 + .close = tpk_close,
3463 + .write = tpk_write,
3464 + .write_room = tpk_write_room,
3465 + .ioctl = tpk_ioctl,
3466 ++ .hangup = tpk_hangup,
3467 + };
3468 +
3469 + static const struct tty_port_operations null_ops = { };
3470 +diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
3471 +index a55b37fc2c8bd..bc3be5f3eae15 100644
3472 +--- a/drivers/clk/clk-ast2600.c
3473 ++++ b/drivers/clk/clk-ast2600.c
3474 +@@ -61,10 +61,10 @@ static void __iomem *scu_g6_base;
3475 + static const struct aspeed_gate_data aspeed_g6_gates[] = {
3476 + /* clk rst name parent flags */
3477 + [ASPEED_CLK_GATE_MCLK] = { 0, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
3478 +- [ASPEED_CLK_GATE_ECLK] = { 1, -1, "eclk-gate", "eclk", 0 }, /* Video Engine */
3479 ++ [ASPEED_CLK_GATE_ECLK] = { 1, 6, "eclk-gate", "eclk", 0 }, /* Video Engine */
3480 + [ASPEED_CLK_GATE_GCLK] = { 2, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
3481 + /* vclk parent - dclk/d1clk/hclk/mclk */
3482 +- [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
3483 ++ [ASPEED_CLK_GATE_VCLK] = { 3, -1, "vclk-gate", NULL, 0 }, /* Video Capture */
3484 + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
3485 + /* From dpll */
3486 + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
3487 +diff --git a/drivers/clk/imx/clk-imx25.c b/drivers/clk/imx/clk-imx25.c
3488 +index a66cabfbf94f1..66192fe0a898c 100644
3489 +--- a/drivers/clk/imx/clk-imx25.c
3490 ++++ b/drivers/clk/imx/clk-imx25.c
3491 +@@ -73,16 +73,6 @@ enum mx25_clks {
3492 +
3493 + static struct clk *clk[clk_max];
3494 +
3495 +-static struct clk ** const uart_clks[] __initconst = {
3496 +- &clk[uart_ipg_per],
3497 +- &clk[uart1_ipg],
3498 +- &clk[uart2_ipg],
3499 +- &clk[uart3_ipg],
3500 +- &clk[uart4_ipg],
3501 +- &clk[uart5_ipg],
3502 +- NULL
3503 +-};
3504 +-
3505 + static int __init __mx25_clocks_init(void __iomem *ccm_base)
3506 + {
3507 + BUG_ON(!ccm_base);
3508 +@@ -228,7 +218,7 @@ static int __init __mx25_clocks_init(void __iomem *ccm_base)
3509 + */
3510 + clk_set_parent(clk[cko_sel], clk[ipg]);
3511 +
3512 +- imx_register_uart_clocks(uart_clks);
3513 ++ imx_register_uart_clocks(6);
3514 +
3515 + return 0;
3516 + }
3517 +diff --git a/drivers/clk/imx/clk-imx27.c b/drivers/clk/imx/clk-imx27.c
3518 +index 5585ded8b8c6f..56a5fc402b10c 100644
3519 +--- a/drivers/clk/imx/clk-imx27.c
3520 ++++ b/drivers/clk/imx/clk-imx27.c
3521 +@@ -49,17 +49,6 @@ static const char *ssi_sel_clks[] = { "spll_gate", "mpll", };
3522 + static struct clk *clk[IMX27_CLK_MAX];
3523 + static struct clk_onecell_data clk_data;
3524 +
3525 +-static struct clk ** const uart_clks[] __initconst = {
3526 +- &clk[IMX27_CLK_PER1_GATE],
3527 +- &clk[IMX27_CLK_UART1_IPG_GATE],
3528 +- &clk[IMX27_CLK_UART2_IPG_GATE],
3529 +- &clk[IMX27_CLK_UART3_IPG_GATE],
3530 +- &clk[IMX27_CLK_UART4_IPG_GATE],
3531 +- &clk[IMX27_CLK_UART5_IPG_GATE],
3532 +- &clk[IMX27_CLK_UART6_IPG_GATE],
3533 +- NULL
3534 +-};
3535 +-
3536 + static void __init _mx27_clocks_init(unsigned long fref)
3537 + {
3538 + BUG_ON(!ccm);
3539 +@@ -176,7 +165,7 @@ static void __init _mx27_clocks_init(unsigned long fref)
3540 +
3541 + clk_prepare_enable(clk[IMX27_CLK_EMI_AHB_GATE]);
3542 +
3543 +- imx_register_uart_clocks(uart_clks);
3544 ++ imx_register_uart_clocks(7);
3545 +
3546 + imx_print_silicon_rev("i.MX27", mx27_revision());
3547 + }
3548 +diff --git a/drivers/clk/imx/clk-imx35.c b/drivers/clk/imx/clk-imx35.c
3549 +index c1df03665c09a..0fe5ac2101566 100644
3550 +--- a/drivers/clk/imx/clk-imx35.c
3551 ++++ b/drivers/clk/imx/clk-imx35.c
3552 +@@ -82,14 +82,6 @@ enum mx35_clks {
3553 +
3554 + static struct clk *clk[clk_max];
3555 +
3556 +-static struct clk ** const uart_clks[] __initconst = {
3557 +- &clk[ipg],
3558 +- &clk[uart1_gate],
3559 +- &clk[uart2_gate],
3560 +- &clk[uart3_gate],
3561 +- NULL
3562 +-};
3563 +-
3564 + static void __init _mx35_clocks_init(void)
3565 + {
3566 + void __iomem *base;
3567 +@@ -243,7 +235,7 @@ static void __init _mx35_clocks_init(void)
3568 + */
3569 + clk_prepare_enable(clk[scc_gate]);
3570 +
3571 +- imx_register_uart_clocks(uart_clks);
3572 ++ imx_register_uart_clocks(4);
3573 +
3574 + imx_print_silicon_rev("i.MX35", mx35_revision());
3575 + }
3576 +diff --git a/drivers/clk/imx/clk-imx5.c b/drivers/clk/imx/clk-imx5.c
3577 +index 01e079b810261..e4493846454dd 100644
3578 +--- a/drivers/clk/imx/clk-imx5.c
3579 ++++ b/drivers/clk/imx/clk-imx5.c
3580 +@@ -128,30 +128,6 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
3581 + static struct clk *clk[IMX5_CLK_END];
3582 + static struct clk_onecell_data clk_data;
3583 +
3584 +-static struct clk ** const uart_clks_mx51[] __initconst = {
3585 +- &clk[IMX5_CLK_UART1_IPG_GATE],
3586 +- &clk[IMX5_CLK_UART1_PER_GATE],
3587 +- &clk[IMX5_CLK_UART2_IPG_GATE],
3588 +- &clk[IMX5_CLK_UART2_PER_GATE],
3589 +- &clk[IMX5_CLK_UART3_IPG_GATE],
3590 +- &clk[IMX5_CLK_UART3_PER_GATE],
3591 +- NULL
3592 +-};
3593 +-
3594 +-static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
3595 +- &clk[IMX5_CLK_UART1_IPG_GATE],
3596 +- &clk[IMX5_CLK_UART1_PER_GATE],
3597 +- &clk[IMX5_CLK_UART2_IPG_GATE],
3598 +- &clk[IMX5_CLK_UART2_PER_GATE],
3599 +- &clk[IMX5_CLK_UART3_IPG_GATE],
3600 +- &clk[IMX5_CLK_UART3_PER_GATE],
3601 +- &clk[IMX5_CLK_UART4_IPG_GATE],
3602 +- &clk[IMX5_CLK_UART4_PER_GATE],
3603 +- &clk[IMX5_CLK_UART5_IPG_GATE],
3604 +- &clk[IMX5_CLK_UART5_PER_GATE],
3605 +- NULL
3606 +-};
3607 +-
3608 + static void __init mx5_clocks_common_init(void __iomem *ccm_base)
3609 + {
3610 + clk[IMX5_CLK_DUMMY] = imx_clk_fixed("dummy", 0);
3611 +@@ -382,7 +358,7 @@ static void __init mx50_clocks_init(struct device_node *np)
3612 + r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
3613 + clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
3614 +
3615 +- imx_register_uart_clocks(uart_clks_mx50_mx53);
3616 ++ imx_register_uart_clocks(5);
3617 + }
3618 + CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
3619 +
3620 +@@ -488,7 +464,7 @@ static void __init mx51_clocks_init(struct device_node *np)
3621 + val |= 1 << 23;
3622 + writel(val, MXC_CCM_CLPCR);
3623 +
3624 +- imx_register_uart_clocks(uart_clks_mx51);
3625 ++ imx_register_uart_clocks(3);
3626 + }
3627 + CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
3628 +
3629 +@@ -633,6 +609,6 @@ static void __init mx53_clocks_init(struct device_node *np)
3630 + r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
3631 + clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
3632 +
3633 +- imx_register_uart_clocks(uart_clks_mx50_mx53);
3634 ++ imx_register_uart_clocks(5);
3635 + }
3636 + CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
3637 +diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
3638 +index b2ff187cedabc..f444bbe8244c2 100644
3639 +--- a/drivers/clk/imx/clk-imx6q.c
3640 ++++ b/drivers/clk/imx/clk-imx6q.c
3641 +@@ -140,13 +140,6 @@ static inline int clk_on_imx6dl(void)
3642 + return of_machine_is_compatible("fsl,imx6dl");
3643 + }
3644 +
3645 +-static const int uart_clk_ids[] __initconst = {
3646 +- IMX6QDL_CLK_UART_IPG,
3647 +- IMX6QDL_CLK_UART_SERIAL,
3648 +-};
3649 +-
3650 +-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
3651 +-
3652 + static int ldb_di_sel_by_clock_id(int clock_id)
3653 + {
3654 + switch (clock_id) {
3655 +@@ -440,7 +433,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
3656 + struct device_node *np;
3657 + void __iomem *anatop_base, *base;
3658 + int ret;
3659 +- int i;
3660 +
3661 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3662 + IMX6QDL_CLK_END), GFP_KERNEL);
3663 +@@ -982,12 +974,6 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
3664 + hws[IMX6QDL_CLK_PLL3_USB_OTG]->clk);
3665 + }
3666 +
3667 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3668 +- int index = uart_clk_ids[i];
3669 +-
3670 +- uart_clks[i] = &hws[index]->clk;
3671 +- }
3672 +-
3673 +- imx_register_uart_clocks(uart_clks);
3674 ++ imx_register_uart_clocks(1);
3675 + }
3676 + CLK_OF_DECLARE(imx6q, "fsl,imx6q-ccm", imx6q_clocks_init);
3677 +diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
3678 +index 2f9361946a0e1..d997b5b078183 100644
3679 +--- a/drivers/clk/imx/clk-imx6sl.c
3680 ++++ b/drivers/clk/imx/clk-imx6sl.c
3681 +@@ -178,19 +178,11 @@ void imx6sl_set_wait_clk(bool enter)
3682 + imx6sl_enable_pll_arm(false);
3683 + }
3684 +
3685 +-static const int uart_clk_ids[] __initconst = {
3686 +- IMX6SL_CLK_UART,
3687 +- IMX6SL_CLK_UART_SERIAL,
3688 +-};
3689 +-
3690 +-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
3691 +-
3692 + static void __init imx6sl_clocks_init(struct device_node *ccm_node)
3693 + {
3694 + struct device_node *np;
3695 + void __iomem *base;
3696 + int ret;
3697 +- int i;
3698 +
3699 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3700 + IMX6SL_CLK_END), GFP_KERNEL);
3701 +@@ -447,12 +439,6 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
3702 + clk_set_parent(hws[IMX6SL_CLK_LCDIF_AXI_SEL]->clk,
3703 + hws[IMX6SL_CLK_PLL2_PFD2]->clk);
3704 +
3705 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3706 +- int index = uart_clk_ids[i];
3707 +-
3708 +- uart_clks[i] = &hws[index]->clk;
3709 +- }
3710 +-
3711 +- imx_register_uart_clocks(uart_clks);
3712 ++ imx_register_uart_clocks(2);
3713 + }
3714 + CLK_OF_DECLARE(imx6sl, "fsl,imx6sl-ccm", imx6sl_clocks_init);
3715 +diff --git a/drivers/clk/imx/clk-imx6sll.c b/drivers/clk/imx/clk-imx6sll.c
3716 +index 8e8288bda4d0b..31d777f300395 100644
3717 +--- a/drivers/clk/imx/clk-imx6sll.c
3718 ++++ b/drivers/clk/imx/clk-imx6sll.c
3719 +@@ -76,26 +76,10 @@ static u32 share_count_ssi1;
3720 + static u32 share_count_ssi2;
3721 + static u32 share_count_ssi3;
3722 +
3723 +-static const int uart_clk_ids[] __initconst = {
3724 +- IMX6SLL_CLK_UART1_IPG,
3725 +- IMX6SLL_CLK_UART1_SERIAL,
3726 +- IMX6SLL_CLK_UART2_IPG,
3727 +- IMX6SLL_CLK_UART2_SERIAL,
3728 +- IMX6SLL_CLK_UART3_IPG,
3729 +- IMX6SLL_CLK_UART3_SERIAL,
3730 +- IMX6SLL_CLK_UART4_IPG,
3731 +- IMX6SLL_CLK_UART4_SERIAL,
3732 +- IMX6SLL_CLK_UART5_IPG,
3733 +- IMX6SLL_CLK_UART5_SERIAL,
3734 +-};
3735 +-
3736 +-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
3737 +-
3738 + static void __init imx6sll_clocks_init(struct device_node *ccm_node)
3739 + {
3740 + struct device_node *np;
3741 + void __iomem *base;
3742 +- int i;
3743 +
3744 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3745 + IMX6SLL_CLK_END), GFP_KERNEL);
3746 +@@ -356,13 +340,7 @@ static void __init imx6sll_clocks_init(struct device_node *ccm_node)
3747 +
3748 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
3749 +
3750 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3751 +- int index = uart_clk_ids[i];
3752 +-
3753 +- uart_clks[i] = &hws[index]->clk;
3754 +- }
3755 +-
3756 +- imx_register_uart_clocks(uart_clks);
3757 ++ imx_register_uart_clocks(5);
3758 +
3759 + /* Lower the AHB clock rate before changing the clock source. */
3760 + clk_set_rate(hws[IMX6SLL_CLK_AHB]->clk, 99000000);
3761 +diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
3762 +index 20dcce526d072..fc1bd23d45834 100644
3763 +--- a/drivers/clk/imx/clk-imx6sx.c
3764 ++++ b/drivers/clk/imx/clk-imx6sx.c
3765 +@@ -117,18 +117,10 @@ static u32 share_count_ssi3;
3766 + static u32 share_count_sai1;
3767 + static u32 share_count_sai2;
3768 +
3769 +-static const int uart_clk_ids[] __initconst = {
3770 +- IMX6SX_CLK_UART_IPG,
3771 +- IMX6SX_CLK_UART_SERIAL,
3772 +-};
3773 +-
3774 +-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
3775 +-
3776 + static void __init imx6sx_clocks_init(struct device_node *ccm_node)
3777 + {
3778 + struct device_node *np;
3779 + void __iomem *base;
3780 +- int i;
3781 +
3782 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3783 + IMX6SX_CLK_CLK_END), GFP_KERNEL);
3784 +@@ -556,12 +548,6 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
3785 + clk_set_parent(hws[IMX6SX_CLK_QSPI1_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
3786 + clk_set_parent(hws[IMX6SX_CLK_QSPI2_SEL]->clk, hws[IMX6SX_CLK_PLL2_BUS]->clk);
3787 +
3788 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3789 +- int index = uart_clk_ids[i];
3790 +-
3791 +- uart_clks[i] = &hws[index]->clk;
3792 +- }
3793 +-
3794 +- imx_register_uart_clocks(uart_clks);
3795 ++ imx_register_uart_clocks(2);
3796 + }
3797 + CLK_OF_DECLARE(imx6sx, "fsl,imx6sx-ccm", imx6sx_clocks_init);
3798 +diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
3799 +index 22d24a6a05e70..c4e0f1c07192f 100644
3800 +--- a/drivers/clk/imx/clk-imx7d.c
3801 ++++ b/drivers/clk/imx/clk-imx7d.c
3802 +@@ -377,23 +377,10 @@ static const char *pll_video_bypass_sel[] = { "pll_video_main", "pll_video_main_
3803 + static struct clk_hw **hws;
3804 + static struct clk_hw_onecell_data *clk_hw_data;
3805 +
3806 +-static const int uart_clk_ids[] __initconst = {
3807 +- IMX7D_UART1_ROOT_CLK,
3808 +- IMX7D_UART2_ROOT_CLK,
3809 +- IMX7D_UART3_ROOT_CLK,
3810 +- IMX7D_UART4_ROOT_CLK,
3811 +- IMX7D_UART5_ROOT_CLK,
3812 +- IMX7D_UART6_ROOT_CLK,
3813 +- IMX7D_UART7_ROOT_CLK,
3814 +-};
3815 +-
3816 +-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1] __initdata;
3817 +-
3818 + static void __init imx7d_clocks_init(struct device_node *ccm_node)
3819 + {
3820 + struct device_node *np;
3821 + void __iomem *base;
3822 +- int i;
3823 +
3824 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3825 + IMX7D_CLK_END), GFP_KERNEL);
3826 +@@ -897,14 +884,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
3827 + hws[IMX7D_USB1_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb1_main_clk", "osc", 20, 1);
3828 + hws[IMX7D_USB_MAIN_480M_CLK] = imx_clk_hw_fixed_factor("pll_usb_main_clk", "osc", 20, 1);
3829 +
3830 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3831 +- int index = uart_clk_ids[i];
3832 +-
3833 +- uart_clks[i] = &hws[index]->clk;
3834 +- }
3835 +-
3836 +-
3837 +- imx_register_uart_clocks(uart_clks);
3838 ++ imx_register_uart_clocks(7);
3839 +
3840 + }
3841 + CLK_OF_DECLARE(imx7d, "fsl,imx7d-ccm", imx7d_clocks_init);
3842 +diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
3843 +index 634c0b6636b0e..779e09105da7d 100644
3844 +--- a/drivers/clk/imx/clk-imx7ulp.c
3845 ++++ b/drivers/clk/imx/clk-imx7ulp.c
3846 +@@ -43,19 +43,6 @@ static const struct clk_div_table ulp_div_table[] = {
3847 + { /* sentinel */ },
3848 + };
3849 +
3850 +-static const int pcc2_uart_clk_ids[] __initconst = {
3851 +- IMX7ULP_CLK_LPUART4,
3852 +- IMX7ULP_CLK_LPUART5,
3853 +-};
3854 +-
3855 +-static const int pcc3_uart_clk_ids[] __initconst = {
3856 +- IMX7ULP_CLK_LPUART6,
3857 +- IMX7ULP_CLK_LPUART7,
3858 +-};
3859 +-
3860 +-static struct clk **pcc2_uart_clks[ARRAY_SIZE(pcc2_uart_clk_ids) + 1] __initdata;
3861 +-static struct clk **pcc3_uart_clks[ARRAY_SIZE(pcc3_uart_clk_ids) + 1] __initdata;
3862 +-
3863 + static void __init imx7ulp_clk_scg1_init(struct device_node *np)
3864 + {
3865 + struct clk_hw_onecell_data *clk_data;
3866 +@@ -150,7 +137,6 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
3867 + struct clk_hw_onecell_data *clk_data;
3868 + struct clk_hw **hws;
3869 + void __iomem *base;
3870 +- int i;
3871 +
3872 + clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC2_END),
3873 + GFP_KERNEL);
3874 +@@ -190,13 +176,7 @@ static void __init imx7ulp_clk_pcc2_init(struct device_node *np)
3875 +
3876 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
3877 +
3878 +- for (i = 0; i < ARRAY_SIZE(pcc2_uart_clk_ids); i++) {
3879 +- int index = pcc2_uart_clk_ids[i];
3880 +-
3881 +- pcc2_uart_clks[i] = &hws[index]->clk;
3882 +- }
3883 +-
3884 +- imx_register_uart_clocks(pcc2_uart_clks);
3885 ++ imx_register_uart_clocks(2);
3886 + }
3887 + CLK_OF_DECLARE(imx7ulp_clk_pcc2, "fsl,imx7ulp-pcc2", imx7ulp_clk_pcc2_init);
3888 +
3889 +@@ -205,7 +185,6 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
3890 + struct clk_hw_onecell_data *clk_data;
3891 + struct clk_hw **hws;
3892 + void __iomem *base;
3893 +- int i;
3894 +
3895 + clk_data = kzalloc(struct_size(clk_data, hws, IMX7ULP_CLK_PCC3_END),
3896 + GFP_KERNEL);
3897 +@@ -244,13 +223,7 @@ static void __init imx7ulp_clk_pcc3_init(struct device_node *np)
3898 +
3899 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
3900 +
3901 +- for (i = 0; i < ARRAY_SIZE(pcc3_uart_clk_ids); i++) {
3902 +- int index = pcc3_uart_clk_ids[i];
3903 +-
3904 +- pcc3_uart_clks[i] = &hws[index]->clk;
3905 +- }
3906 +-
3907 +- imx_register_uart_clocks(pcc3_uart_clks);
3908 ++ imx_register_uart_clocks(7);
3909 + }
3910 + CLK_OF_DECLARE(imx7ulp_clk_pcc3, "fsl,imx7ulp-pcc3", imx7ulp_clk_pcc3_init);
3911 +
3912 +diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
3913 +index f358ad9072990..4cbf86ab2eacf 100644
3914 +--- a/drivers/clk/imx/clk-imx8mm.c
3915 ++++ b/drivers/clk/imx/clk-imx8mm.c
3916 +@@ -291,20 +291,12 @@ static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_
3917 + static struct clk_hw_onecell_data *clk_hw_data;
3918 + static struct clk_hw **hws;
3919 +
3920 +-static const int uart_clk_ids[] = {
3921 +- IMX8MM_CLK_UART1_ROOT,
3922 +- IMX8MM_CLK_UART2_ROOT,
3923 +- IMX8MM_CLK_UART3_ROOT,
3924 +- IMX8MM_CLK_UART4_ROOT,
3925 +-};
3926 +-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
3927 +-
3928 + static int imx8mm_clocks_probe(struct platform_device *pdev)
3929 + {
3930 + struct device *dev = &pdev->dev;
3931 + struct device_node *np = dev->of_node;
3932 + void __iomem *base;
3933 +- int ret, i;
3934 ++ int ret;
3935 +
3936 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3937 + IMX8MM_CLK_END), GFP_KERNEL);
3938 +@@ -622,13 +614,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev)
3939 + goto unregister_hws;
3940 + }
3941 +
3942 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3943 +- int index = uart_clk_ids[i];
3944 +-
3945 +- uart_hws[i] = &hws[index]->clk;
3946 +- }
3947 +-
3948 +- imx_register_uart_clocks(uart_hws);
3949 ++ imx_register_uart_clocks(4);
3950 +
3951 + return 0;
3952 +
3953 +diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
3954 +index f3c5e6cf55dd4..f98f252795396 100644
3955 +--- a/drivers/clk/imx/clk-imx8mn.c
3956 ++++ b/drivers/clk/imx/clk-imx8mn.c
3957 +@@ -284,20 +284,12 @@ static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sy
3958 + static struct clk_hw_onecell_data *clk_hw_data;
3959 + static struct clk_hw **hws;
3960 +
3961 +-static const int uart_clk_ids[] = {
3962 +- IMX8MN_CLK_UART1_ROOT,
3963 +- IMX8MN_CLK_UART2_ROOT,
3964 +- IMX8MN_CLK_UART3_ROOT,
3965 +- IMX8MN_CLK_UART4_ROOT,
3966 +-};
3967 +-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
3968 +-
3969 + static int imx8mn_clocks_probe(struct platform_device *pdev)
3970 + {
3971 + struct device *dev = &pdev->dev;
3972 + struct device_node *np = dev->of_node;
3973 + void __iomem *base;
3974 +- int ret, i;
3975 ++ int ret;
3976 +
3977 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
3978 + IMX8MN_CLK_END), GFP_KERNEL);
3979 +@@ -573,13 +565,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev)
3980 + goto unregister_hws;
3981 + }
3982 +
3983 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
3984 +- int index = uart_clk_ids[i];
3985 +-
3986 +- uart_hws[i] = &hws[index]->clk;
3987 +- }
3988 +-
3989 +- imx_register_uart_clocks(uart_hws);
3990 ++ imx_register_uart_clocks(4);
3991 +
3992 + return 0;
3993 +
3994 +diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
3995 +index 48e212477f52a..0391f5bda5e46 100644
3996 +--- a/drivers/clk/imx/clk-imx8mp.c
3997 ++++ b/drivers/clk/imx/clk-imx8mp.c
3998 +@@ -414,20 +414,11 @@ static const char * const imx8mp_dram_core_sels[] = {"dram_pll_out", "dram_alt_r
3999 + static struct clk_hw **hws;
4000 + static struct clk_hw_onecell_data *clk_hw_data;
4001 +
4002 +-static const int uart_clk_ids[] = {
4003 +- IMX8MP_CLK_UART1_ROOT,
4004 +- IMX8MP_CLK_UART2_ROOT,
4005 +- IMX8MP_CLK_UART3_ROOT,
4006 +- IMX8MP_CLK_UART4_ROOT,
4007 +-};
4008 +-static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
4009 +-
4010 + static int imx8mp_clocks_probe(struct platform_device *pdev)
4011 + {
4012 + struct device *dev = &pdev->dev;
4013 + struct device_node *np = dev->of_node;
4014 + void __iomem *anatop_base, *ccm_base;
4015 +- int i;
4016 +
4017 + np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop");
4018 + anatop_base = of_iomap(np, 0);
4019 +@@ -737,13 +728,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
4020 +
4021 + of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_hw_data);
4022 +
4023 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
4024 +- int index = uart_clk_ids[i];
4025 +-
4026 +- uart_clks[i] = &hws[index]->clk;
4027 +- }
4028 +-
4029 +- imx_register_uart_clocks(uart_clks);
4030 ++ imx_register_uart_clocks(4);
4031 +
4032 + return 0;
4033 + }
4034 +diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
4035 +index 06292d4a98ff7..4e6c81a702214 100644
4036 +--- a/drivers/clk/imx/clk-imx8mq.c
4037 ++++ b/drivers/clk/imx/clk-imx8mq.c
4038 +@@ -273,20 +273,12 @@ static const char * const imx8mq_clko2_sels[] = {"osc_25m", "sys2_pll_200m", "sy
4039 + static struct clk_hw_onecell_data *clk_hw_data;
4040 + static struct clk_hw **hws;
4041 +
4042 +-static const int uart_clk_ids[] = {
4043 +- IMX8MQ_CLK_UART1_ROOT,
4044 +- IMX8MQ_CLK_UART2_ROOT,
4045 +- IMX8MQ_CLK_UART3_ROOT,
4046 +- IMX8MQ_CLK_UART4_ROOT,
4047 +-};
4048 +-static struct clk **uart_hws[ARRAY_SIZE(uart_clk_ids) + 1];
4049 +-
4050 + static int imx8mq_clocks_probe(struct platform_device *pdev)
4051 + {
4052 + struct device *dev = &pdev->dev;
4053 + struct device_node *np = dev->of_node;
4054 + void __iomem *base;
4055 +- int err, i;
4056 ++ int err;
4057 +
4058 + clk_hw_data = kzalloc(struct_size(clk_hw_data, hws,
4059 + IMX8MQ_CLK_END), GFP_KERNEL);
4060 +@@ -607,13 +599,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev)
4061 + goto unregister_hws;
4062 + }
4063 +
4064 +- for (i = 0; i < ARRAY_SIZE(uart_clk_ids); i++) {
4065 +- int index = uart_clk_ids[i];
4066 +-
4067 +- uart_hws[i] = &hws[index]->clk;
4068 +- }
4069 +-
4070 +- imx_register_uart_clocks(uart_hws);
4071 ++ imx_register_uart_clocks(4);
4072 +
4073 + return 0;
4074 +
4075 +diff --git a/drivers/clk/imx/clk.c b/drivers/clk/imx/clk.c
4076 +index 47882c51cb853..7cc669934253a 100644
4077 +--- a/drivers/clk/imx/clk.c
4078 ++++ b/drivers/clk/imx/clk.c
4079 +@@ -147,8 +147,10 @@ void imx_cscmr1_fixup(u32 *val)
4080 + }
4081 +
4082 + #ifndef MODULE
4083 +-static int imx_keep_uart_clocks;
4084 +-static struct clk ** const *imx_uart_clocks;
4085 ++
4086 ++static bool imx_keep_uart_clocks;
4087 ++static int imx_enabled_uart_clocks;
4088 ++static struct clk **imx_uart_clocks;
4089 +
4090 + static int __init imx_keep_uart_clocks_param(char *str)
4091 + {
4092 +@@ -161,24 +163,45 @@ __setup_param("earlycon", imx_keep_uart_earlycon,
4093 + __setup_param("earlyprintk", imx_keep_uart_earlyprintk,
4094 + imx_keep_uart_clocks_param, 0);
4095 +
4096 +-void imx_register_uart_clocks(struct clk ** const clks[])
4097 ++void imx_register_uart_clocks(unsigned int clk_count)
4098 + {
4099 ++ imx_enabled_uart_clocks = 0;
4100 ++
4101 ++/* i.MX boards use device trees now. For build tests without CONFIG_OF, do nothing */
4102 ++#ifdef CONFIG_OF
4103 + if (imx_keep_uart_clocks) {
4104 + int i;
4105 +
4106 +- imx_uart_clocks = clks;
4107 +- for (i = 0; imx_uart_clocks[i]; i++)
4108 +- clk_prepare_enable(*imx_uart_clocks[i]);
4109 ++ imx_uart_clocks = kcalloc(clk_count, sizeof(struct clk *), GFP_KERNEL);
4110 ++
4111 ++ if (!of_stdout)
4112 ++ return;
4113 ++
4114 ++ for (i = 0; i < clk_count; i++) {
4115 ++ imx_uart_clocks[imx_enabled_uart_clocks] = of_clk_get(of_stdout, i);
4116 ++
4117 ++ /* Stop if there are no more of_stdout references */
4118 ++ if (IS_ERR(imx_uart_clocks[imx_enabled_uart_clocks]))
4119 ++ return;
4120 ++
4121 ++ /* Only enable the clock if it's not NULL */
4122 ++ if (imx_uart_clocks[imx_enabled_uart_clocks])
4123 ++ clk_prepare_enable(imx_uart_clocks[imx_enabled_uart_clocks++]);
4124 ++ }
4125 + }
4126 ++#endif
4127 + }
4128 +
4129 + static int __init imx_clk_disable_uart(void)
4130 + {
4131 +- if (imx_keep_uart_clocks && imx_uart_clocks) {
4132 ++ if (imx_keep_uart_clocks && imx_enabled_uart_clocks) {
4133 + int i;
4134 +
4135 +- for (i = 0; imx_uart_clocks[i]; i++)
4136 +- clk_disable_unprepare(*imx_uart_clocks[i]);
4137 ++ for (i = 0; i < imx_enabled_uart_clocks; i++) {
4138 ++ clk_disable_unprepare(imx_uart_clocks[i]);
4139 ++ clk_put(imx_uart_clocks[i]);
4140 ++ }
4141 ++ kfree(imx_uart_clocks);
4142 + }
4143 +
4144 + return 0;
4145 +diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
4146 +index 1d7be0c86538a..f04cbbab9fccd 100644
4147 +--- a/drivers/clk/imx/clk.h
4148 ++++ b/drivers/clk/imx/clk.h
4149 +@@ -13,9 +13,9 @@ extern spinlock_t imx_ccm_lock;
4150 + void imx_check_clocks(struct clk *clks[], unsigned int count);
4151 + void imx_check_clk_hws(struct clk_hw *clks[], unsigned int count);
4152 + #ifndef MODULE
4153 +-void imx_register_uart_clocks(struct clk ** const clks[]);
4154 ++void imx_register_uart_clocks(unsigned int clk_count);
4155 + #else
4156 +-static inline void imx_register_uart_clocks(struct clk ** const clks[])
4157 ++static inline void imx_register_uart_clocks(unsigned int clk_count)
4158 + {
4159 + }
4160 + #endif
4161 +diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
4162 +index f5746f9ea929f..32ac6b6b75306 100644
4163 +--- a/drivers/clk/mvebu/armada-37xx-periph.c
4164 ++++ b/drivers/clk/mvebu/armada-37xx-periph.c
4165 +@@ -84,6 +84,7 @@ struct clk_pm_cpu {
4166 + void __iomem *reg_div;
4167 + u8 shift_div;
4168 + struct regmap *nb_pm_base;
4169 ++ unsigned long l1_expiration;
4170 + };
4171 +
4172 + #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
4173 +@@ -440,33 +441,6 @@ static u8 clk_pm_cpu_get_parent(struct clk_hw *hw)
4174 + return val;
4175 + }
4176 +
4177 +-static int clk_pm_cpu_set_parent(struct clk_hw *hw, u8 index)
4178 +-{
4179 +- struct clk_pm_cpu *pm_cpu = to_clk_pm_cpu(hw);
4180 +- struct regmap *base = pm_cpu->nb_pm_base;
4181 +- int load_level;
4182 +-
4183 +- /*
4184 +- * We set the clock parent only if the DVFS is available but
4185 +- * not enabled.
4186 +- */
4187 +- if (IS_ERR(base) || armada_3700_pm_dvfs_is_enabled(base))
4188 +- return -EINVAL;
4189 +-
4190 +- /* Set the parent clock for all the load level */
4191 +- for (load_level = 0; load_level < LOAD_LEVEL_NR; load_level++) {
4192 +- unsigned int reg, mask, val,
4193 +- offset = ARMADA_37XX_NB_TBG_SEL_OFF;
4194 +-
4195 +- armada_3700_pm_dvfs_update_regs(load_level, &reg, &offset);
4196 +-
4197 +- val = index << offset;
4198 +- mask = ARMADA_37XX_NB_TBG_SEL_MASK << offset;
4199 +- regmap_update_bits(base, reg, mask, val);
4200 +- }
4201 +- return 0;
4202 +-}
4203 +-
4204 + static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw *hw,
4205 + unsigned long parent_rate)
4206 + {
4207 +@@ -514,8 +488,10 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
4208 + }
4209 +
4210 + /*
4211 +- * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
4212 +- * respectively) to L0 frequency (1.2 Ghz) requires a significant
4213 ++ * Workaround when base CPU frequnecy is 1000 or 1200 MHz
4214 ++ *
4215 ++ * Switching the CPU from the L2 or L3 frequencies (250/300 or 200 MHz
4216 ++ * respectively) to L0 frequency (1/1.2 GHz) requires a significant
4217 + * amount of time to let VDD stabilize to the appropriate
4218 + * voltage. This amount of time is large enough that it cannot be
4219 + * covered by the hardware countdown register. Due to this, the CPU
4220 +@@ -525,26 +501,56 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
4221 + * To work around this problem, we prevent switching directly from the
4222 + * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
4223 + * frequency in-between. The sequence therefore becomes:
4224 +- * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
4225 ++ * 1. First switch from L2/L3 (200/250/300 MHz) to L1 (500/600 MHz)
4226 + * 2. Sleep 20ms for stabling VDD voltage
4227 +- * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
4228 ++ * 3. Then switch from L1 (500/600 MHz) to L0 (1000/1200 MHz).
4229 + */
4230 +-static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
4231 ++static void clk_pm_cpu_set_rate_wa(struct clk_pm_cpu *pm_cpu,
4232 ++ unsigned int new_level, unsigned long rate,
4233 ++ struct regmap *base)
4234 + {
4235 + unsigned int cur_level;
4236 +
4237 +- if (rate != 1200 * 1000 * 1000)
4238 +- return;
4239 +-
4240 + regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
4241 + cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
4242 +- if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
4243 ++
4244 ++ if (cur_level == new_level)
4245 ++ return;
4246 ++
4247 ++ /*
4248 ++ * System wants to go to L1 on its own. If we are going from L2/L3,
4249 ++ * remember when 20ms will expire. If from L0, set the value so that
4250 ++ * next switch to L0 won't have to wait.
4251 ++ */
4252 ++ if (new_level == ARMADA_37XX_DVFS_LOAD_1) {
4253 ++ if (cur_level == ARMADA_37XX_DVFS_LOAD_0)
4254 ++ pm_cpu->l1_expiration = jiffies;
4255 ++ else
4256 ++ pm_cpu->l1_expiration = jiffies + msecs_to_jiffies(20);
4257 + return;
4258 ++ }
4259 ++
4260 ++ /*
4261 ++ * If we are setting to L2/L3, just invalidate L1 expiration time,
4262 ++ * sleeping is not needed.
4263 ++ */
4264 ++ if (rate < 1000*1000*1000)
4265 ++ goto invalidate_l1_exp;
4266 ++
4267 ++ /*
4268 ++ * We are going to L0 with rate >= 1GHz. Check whether we have been at
4269 ++ * L1 for long enough time. If not, go to L1 for 20ms.
4270 ++ */
4271 ++ if (pm_cpu->l1_expiration && jiffies >= pm_cpu->l1_expiration)
4272 ++ goto invalidate_l1_exp;
4273 +
4274 + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
4275 + ARMADA_37XX_NB_CPU_LOAD_MASK,
4276 + ARMADA_37XX_DVFS_LOAD_1);
4277 + msleep(20);
4278 ++
4279 ++invalidate_l1_exp:
4280 ++ pm_cpu->l1_expiration = 0;
4281 + }
4282 +
4283 + static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
4284 +@@ -578,7 +584,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
4285 + reg = ARMADA_37XX_NB_CPU_LOAD;
4286 + mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
4287 +
4288 +- clk_pm_cpu_set_rate_wa(rate, base);
4289 ++ /* Apply workaround when base CPU frequency is 1000 or 1200 MHz */
4290 ++ if (parent_rate >= 1000*1000*1000)
4291 ++ clk_pm_cpu_set_rate_wa(pm_cpu, load_level, rate, base);
4292 +
4293 + regmap_update_bits(base, reg, mask, load_level);
4294 +
4295 +@@ -592,7 +600,6 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
4296 +
4297 + static const struct clk_ops clk_pm_cpu_ops = {
4298 + .get_parent = clk_pm_cpu_get_parent,
4299 +- .set_parent = clk_pm_cpu_set_parent,
4300 + .round_rate = clk_pm_cpu_round_rate,
4301 + .set_rate = clk_pm_cpu_set_rate,
4302 + .recalc_rate = clk_pm_cpu_recalc_rate,
4303 +diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
4304 +index 45cfc57bff924..af6ac17c7daeb 100644
4305 +--- a/drivers/clk/qcom/a53-pll.c
4306 ++++ b/drivers/clk/qcom/a53-pll.c
4307 +@@ -93,6 +93,7 @@ static const struct of_device_id qcom_a53pll_match_table[] = {
4308 + { .compatible = "qcom,msm8916-a53pll" },
4309 + { }
4310 + };
4311 ++MODULE_DEVICE_TABLE(of, qcom_a53pll_match_table);
4312 +
4313 + static struct platform_driver qcom_a53pll_driver = {
4314 + .probe = qcom_a53pll_probe,
4315 +diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c
4316 +index 30be87fb222aa..bef7899ad0d66 100644
4317 +--- a/drivers/clk/qcom/apss-ipq-pll.c
4318 ++++ b/drivers/clk/qcom/apss-ipq-pll.c
4319 +@@ -81,6 +81,7 @@ static const struct of_device_id apss_ipq_pll_match_table[] = {
4320 + { .compatible = "qcom,ipq6018-a53pll" },
4321 + { }
4322 + };
4323 ++MODULE_DEVICE_TABLE(of, apss_ipq_pll_match_table);
4324 +
4325 + static struct platform_driver apss_ipq_pll_driver = {
4326 + .probe = apss_ipq_pll_probe,
4327 +diff --git a/drivers/clk/uniphier/clk-uniphier-mux.c b/drivers/clk/uniphier/clk-uniphier-mux.c
4328 +index 462c84321b2d2..1998e9d4cfc02 100644
4329 +--- a/drivers/clk/uniphier/clk-uniphier-mux.c
4330 ++++ b/drivers/clk/uniphier/clk-uniphier-mux.c
4331 +@@ -31,10 +31,10 @@ static int uniphier_clk_mux_set_parent(struct clk_hw *hw, u8 index)
4332 + static u8 uniphier_clk_mux_get_parent(struct clk_hw *hw)
4333 + {
4334 + struct uniphier_clk_mux *mux = to_uniphier_clk_mux(hw);
4335 +- int num_parents = clk_hw_get_num_parents(hw);
4336 ++ unsigned int num_parents = clk_hw_get_num_parents(hw);
4337 + int ret;
4338 + unsigned int val;
4339 +- u8 i;
4340 ++ unsigned int i;
4341 +
4342 + ret = regmap_read(mux->regmap, mux->reg, &val);
4343 + if (ret)
4344 +diff --git a/drivers/clk/zynqmp/pll.c b/drivers/clk/zynqmp/pll.c
4345 +index 92f449ed38e51..abe6afbf3407b 100644
4346 +--- a/drivers/clk/zynqmp/pll.c
4347 ++++ b/drivers/clk/zynqmp/pll.c
4348 +@@ -14,10 +14,12 @@
4349 + * struct zynqmp_pll - PLL clock
4350 + * @hw: Handle between common and hardware-specific interfaces
4351 + * @clk_id: PLL clock ID
4352 ++ * @set_pll_mode: Whether an IOCTL_SET_PLL_FRAC_MODE request be sent to ATF
4353 + */
4354 + struct zynqmp_pll {
4355 + struct clk_hw hw;
4356 + u32 clk_id;
4357 ++ bool set_pll_mode;
4358 + };
4359 +
4360 + #define to_zynqmp_pll(_hw) container_of(_hw, struct zynqmp_pll, hw)
4361 +@@ -81,6 +83,8 @@ static inline void zynqmp_pll_set_mode(struct clk_hw *hw, bool on)
4362 + if (ret)
4363 + pr_warn_once("%s() PLL set frac mode failed for %s, ret = %d\n",
4364 + __func__, clk_name, ret);
4365 ++ else
4366 ++ clk->set_pll_mode = true;
4367 + }
4368 +
4369 + /**
4370 +@@ -100,9 +104,7 @@ static long zynqmp_pll_round_rate(struct clk_hw *hw, unsigned long rate,
4371 + /* Enable the fractional mode if needed */
4372 + rate_div = (rate * FRAC_DIV) / *prate;
4373 + f = rate_div % FRAC_DIV;
4374 +- zynqmp_pll_set_mode(hw, !!f);
4375 +-
4376 +- if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
4377 ++ if (f) {
4378 + if (rate > PS_PLL_VCO_MAX) {
4379 + fbdiv = rate / PS_PLL_VCO_MAX;
4380 + rate = rate / (fbdiv + 1);
4381 +@@ -173,10 +175,12 @@ static int zynqmp_pll_set_rate(struct clk_hw *hw, unsigned long rate,
4382 + long rate_div, frac, m, f;
4383 + int ret;
4384 +
4385 +- if (zynqmp_pll_get_mode(hw) == PLL_MODE_FRAC) {
4386 +- rate_div = (rate * FRAC_DIV) / parent_rate;
4387 ++ rate_div = (rate * FRAC_DIV) / parent_rate;
4388 ++ f = rate_div % FRAC_DIV;
4389 ++ zynqmp_pll_set_mode(hw, !!f);
4390 ++
4391 ++ if (f) {
4392 + m = rate_div / FRAC_DIV;
4393 +- f = rate_div % FRAC_DIV;
4394 + m = clamp_t(u32, m, (PLL_FBDIV_MIN), (PLL_FBDIV_MAX));
4395 + rate = parent_rate * m;
4396 + frac = (parent_rate * f) / FRAC_DIV;
4397 +@@ -240,9 +244,15 @@ static int zynqmp_pll_enable(struct clk_hw *hw)
4398 + u32 clk_id = clk->clk_id;
4399 + int ret;
4400 +
4401 +- if (zynqmp_pll_is_enabled(hw))
4402 ++ /*
4403 ++ * Don't skip enabling clock if there is an IOCTL_SET_PLL_FRAC_MODE request
4404 ++ * that has been sent to ATF.
4405 ++ */
4406 ++ if (zynqmp_pll_is_enabled(hw) && (!clk->set_pll_mode))
4407 + return 0;
4408 +
4409 ++ clk->set_pll_mode = false;
4410 ++
4411 + ret = zynqmp_pm_clock_enable(clk_id);
4412 + if (ret)
4413 + pr_warn_once("%s() clock enable failed for %s, ret = %d\n",
4414 +diff --git a/drivers/clocksource/ingenic-ost.c b/drivers/clocksource/ingenic-ost.c
4415 +index 029efc2731b49..6af2470136bd2 100644
4416 +--- a/drivers/clocksource/ingenic-ost.c
4417 ++++ b/drivers/clocksource/ingenic-ost.c
4418 +@@ -88,9 +88,9 @@ static int __init ingenic_ost_probe(struct platform_device *pdev)
4419 + return PTR_ERR(ost->regs);
4420 +
4421 + map = device_node_to_regmap(dev->parent->of_node);
4422 +- if (!map) {
4423 ++ if (IS_ERR(map)) {
4424 + dev_err(dev, "regmap not found");
4425 +- return -EINVAL;
4426 ++ return PTR_ERR(map);
4427 + }
4428 +
4429 + ost->clk = devm_clk_get(dev, "ost");
4430 +diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
4431 +index 33b3e8aa2cc50..3fae9ebb58b83 100644
4432 +--- a/drivers/clocksource/timer-ti-dm-systimer.c
4433 ++++ b/drivers/clocksource/timer-ti-dm-systimer.c
4434 +@@ -449,13 +449,13 @@ static int dmtimer_set_next_event(unsigned long cycles,
4435 + struct dmtimer_systimer *t = &clkevt->t;
4436 + void __iomem *pend = t->base + t->pend;
4437 +
4438 +- writel_relaxed(0xffffffff - cycles, t->base + t->counter);
4439 + while (readl_relaxed(pend) & WP_TCRR)
4440 + cpu_relax();
4441 ++ writel_relaxed(0xffffffff - cycles, t->base + t->counter);
4442 +
4443 +- writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
4444 + while (readl_relaxed(pend) & WP_TCLR)
4445 + cpu_relax();
4446 ++ writel_relaxed(OMAP_TIMER_CTRL_ST, t->base + t->ctrl);
4447 +
4448 + return 0;
4449 + }
4450 +@@ -490,18 +490,18 @@ static int dmtimer_set_periodic(struct clock_event_device *evt)
4451 + dmtimer_clockevent_shutdown(evt);
4452 +
4453 + /* Looks like we need to first set the load value separately */
4454 +- writel_relaxed(clkevt->period, t->base + t->load);
4455 + while (readl_relaxed(pend) & WP_TLDR)
4456 + cpu_relax();
4457 ++ writel_relaxed(clkevt->period, t->base + t->load);
4458 +
4459 +- writel_relaxed(clkevt->period, t->base + t->counter);
4460 + while (readl_relaxed(pend) & WP_TCRR)
4461 + cpu_relax();
4462 ++ writel_relaxed(clkevt->period, t->base + t->counter);
4463 +
4464 +- writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
4465 +- t->base + t->ctrl);
4466 + while (readl_relaxed(pend) & WP_TCLR)
4467 + cpu_relax();
4468 ++ writel_relaxed(OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST,
4469 ++ t->base + t->ctrl);
4470 +
4471 + return 0;
4472 + }
4473 +@@ -554,6 +554,7 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
4474 + dev->set_state_shutdown = dmtimer_clockevent_shutdown;
4475 + dev->set_state_periodic = dmtimer_set_periodic;
4476 + dev->set_state_oneshot = dmtimer_clockevent_shutdown;
4477 ++ dev->set_state_oneshot_stopped = dmtimer_clockevent_shutdown;
4478 + dev->tick_resume = dmtimer_clockevent_shutdown;
4479 + dev->cpumask = cpu_possible_mask;
4480 +
4481 +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
4482 +index b4af4094309b0..e4782f562e7a9 100644
4483 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c
4484 ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
4485 +@@ -25,6 +25,10 @@
4486 +
4487 + #include "cpufreq-dt.h"
4488 +
4489 ++/* Clk register set */
4490 ++#define ARMADA_37XX_CLK_TBG_SEL 0
4491 ++#define ARMADA_37XX_CLK_TBG_SEL_CPU_OFF 22
4492 ++
4493 + /* Power management in North Bridge register set */
4494 + #define ARMADA_37XX_NB_L0L1 0x18
4495 + #define ARMADA_37XX_NB_L2L3 0x1C
4496 +@@ -69,6 +73,8 @@
4497 + #define LOAD_LEVEL_NR 4
4498 +
4499 + #define MIN_VOLT_MV 1000
4500 ++#define MIN_VOLT_MV_FOR_L1_1000MHZ 1108
4501 ++#define MIN_VOLT_MV_FOR_L1_1200MHZ 1155
4502 +
4503 + /* AVS value for the corresponding voltage (in mV) */
4504 + static int avs_map[] = {
4505 +@@ -120,10 +126,15 @@ static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq)
4506 + * will be configured then the DVFS will be enabled.
4507 + */
4508 + static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
4509 +- struct clk *clk, u8 *divider)
4510 ++ struct regmap *clk_base, u8 *divider)
4511 + {
4512 ++ u32 cpu_tbg_sel;
4513 + int load_lvl;
4514 +- struct clk *parent;
4515 ++
4516 ++ /* Determine to which TBG clock is CPU connected */
4517 ++ regmap_read(clk_base, ARMADA_37XX_CLK_TBG_SEL, &cpu_tbg_sel);
4518 ++ cpu_tbg_sel >>= ARMADA_37XX_CLK_TBG_SEL_CPU_OFF;
4519 ++ cpu_tbg_sel &= ARMADA_37XX_NB_TBG_SEL_MASK;
4520 +
4521 + for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) {
4522 + unsigned int reg, mask, val, offset = 0;
4523 +@@ -142,6 +153,11 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
4524 + mask = (ARMADA_37XX_NB_CLK_SEL_MASK
4525 + << ARMADA_37XX_NB_CLK_SEL_OFF);
4526 +
4527 ++ /* Set TBG index, for all levels we use the same TBG */
4528 ++ val = cpu_tbg_sel << ARMADA_37XX_NB_TBG_SEL_OFF;
4529 ++ mask = (ARMADA_37XX_NB_TBG_SEL_MASK
4530 ++ << ARMADA_37XX_NB_TBG_SEL_OFF);
4531 ++
4532 + /*
4533 + * Set cpu divider based on the pre-computed array in
4534 + * order to have balanced step.
4535 +@@ -160,14 +176,6 @@ static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base,
4536 +
4537 + regmap_update_bits(base, reg, mask, val);
4538 + }
4539 +-
4540 +- /*
4541 +- * Set cpu clock source, for all the level we keep the same
4542 +- * clock source that the one already configured. For this one
4543 +- * we need to use the clock framework
4544 +- */
4545 +- parent = clk_get_parent(clk);
4546 +- clk_set_parent(clk, parent);
4547 + }
4548 +
4549 + /*
4550 +@@ -202,6 +210,8 @@ static u32 armada_37xx_avs_val_match(int target_vm)
4551 + * - L2 & L3 voltage should be about 150mv smaller than L0 voltage.
4552 + * This function calculates L1 & L2 & L3 AVS values dynamically based
4553 + * on L0 voltage and fill all AVS values to the AVS value table.
4554 ++ * When base CPU frequency is 1000 or 1200 MHz then there is additional
4555 ++ * minimal avs value for load L1.
4556 + */
4557 + static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
4558 + struct armada_37xx_dvfs *dvfs)
4559 +@@ -233,6 +243,19 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
4560 + for (load_level = 1; load_level < LOAD_LEVEL_NR; load_level++)
4561 + dvfs->avs[load_level] = avs_min;
4562 +
4563 ++ /*
4564 ++ * Set the avs values for load L0 and L1 when base CPU frequency
4565 ++ * is 1000/1200 MHz to its typical initial values according to
4566 ++ * the Armada 3700 Hardware Specifications.
4567 ++ */
4568 ++ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
4569 ++ if (dvfs->cpu_freq_max >= 1200*1000*1000)
4570 ++ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
4571 ++ else
4572 ++ avs_min = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
4573 ++ dvfs->avs[0] = dvfs->avs[1] = avs_min;
4574 ++ }
4575 ++
4576 + return;
4577 + }
4578 +
4579 +@@ -252,6 +275,26 @@ static void __init armada37xx_cpufreq_avs_configure(struct regmap *base,
4580 + target_vm = avs_map[l0_vdd_min] - 150;
4581 + target_vm = target_vm > MIN_VOLT_MV ? target_vm : MIN_VOLT_MV;
4582 + dvfs->avs[2] = dvfs->avs[3] = armada_37xx_avs_val_match(target_vm);
4583 ++
4584 ++ /*
4585 ++ * Fix the avs value for load L1 when base CPU frequency is 1000/1200 MHz,
4586 ++ * otherwise the CPU gets stuck when switching from load L1 to load L0.
4587 ++ * Also ensure that avs value for load L1 is not higher than for L0.
4588 ++ */
4589 ++ if (dvfs->cpu_freq_max >= 1000*1000*1000) {
4590 ++ u32 avs_min_l1;
4591 ++
4592 ++ if (dvfs->cpu_freq_max >= 1200*1000*1000)
4593 ++ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1200MHZ);
4594 ++ else
4595 ++ avs_min_l1 = armada_37xx_avs_val_match(MIN_VOLT_MV_FOR_L1_1000MHZ);
4596 ++
4597 ++ if (avs_min_l1 > dvfs->avs[0])
4598 ++ avs_min_l1 = dvfs->avs[0];
4599 ++
4600 ++ if (dvfs->avs[1] < avs_min_l1)
4601 ++ dvfs->avs[1] = avs_min_l1;
4602 ++ }
4603 + }
4604 +
4605 + static void __init armada37xx_cpufreq_avs_setup(struct regmap *base,
4606 +@@ -358,11 +401,16 @@ static int __init armada37xx_cpufreq_driver_init(void)
4607 + struct platform_device *pdev;
4608 + unsigned long freq;
4609 + unsigned int cur_frequency, base_frequency;
4610 +- struct regmap *nb_pm_base, *avs_base;
4611 ++ struct regmap *nb_clk_base, *nb_pm_base, *avs_base;
4612 + struct device *cpu_dev;
4613 + int load_lvl, ret;
4614 + struct clk *clk, *parent;
4615 +
4616 ++ nb_clk_base =
4617 ++ syscon_regmap_lookup_by_compatible("marvell,armada-3700-periph-clock-nb");
4618 ++ if (IS_ERR(nb_clk_base))
4619 ++ return -ENODEV;
4620 ++
4621 + nb_pm_base =
4622 + syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm");
4623 +
4624 +@@ -421,7 +469,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
4625 + return -EINVAL;
4626 + }
4627 +
4628 +- dvfs = armada_37xx_cpu_freq_info_get(cur_frequency);
4629 ++ dvfs = armada_37xx_cpu_freq_info_get(base_frequency);
4630 + if (!dvfs) {
4631 + clk_put(clk);
4632 + return -EINVAL;
4633 +@@ -439,7 +487,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
4634 + armada37xx_cpufreq_avs_configure(avs_base, dvfs);
4635 + armada37xx_cpufreq_avs_setup(avs_base, dvfs);
4636 +
4637 +- armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
4638 ++ armada37xx_cpufreq_dvfs_setup(nb_pm_base, nb_clk_base, dvfs->divider);
4639 + clk_put(clk);
4640 +
4641 + for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
4642 +@@ -473,7 +521,7 @@ disable_dvfs:
4643 + remove_opp:
4644 + /* clean-up the already added opp before leaving */
4645 + while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) {
4646 +- freq = cur_frequency / dvfs->divider[load_lvl];
4647 ++ freq = base_frequency / dvfs->divider[load_lvl];
4648 + dev_pm_opp_remove(cpu_dev, freq);
4649 + }
4650 +
4651 +diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
4652 +index 0844fadc4be85..334f83e56120c 100644
4653 +--- a/drivers/cpuidle/Kconfig.arm
4654 ++++ b/drivers/cpuidle/Kconfig.arm
4655 +@@ -107,7 +107,7 @@ config ARM_TEGRA_CPUIDLE
4656 +
4657 + config ARM_QCOM_SPM_CPUIDLE
4658 + bool "CPU Idle Driver for Qualcomm Subsystem Power Manager (SPM)"
4659 +- depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64
4660 ++ depends on (ARCH_QCOM || COMPILE_TEST) && !ARM64 && MMU
4661 + select ARM_CPU_SUSPEND
4662 + select CPU_IDLE_MULTIPLE_DRIVERS
4663 + select DT_IDLE_STATES
4664 +diff --git a/drivers/crypto/allwinner/Kconfig b/drivers/crypto/allwinner/Kconfig
4665 +index 0cdfe0e8cc66b..ce34048d0d68a 100644
4666 +--- a/drivers/crypto/allwinner/Kconfig
4667 ++++ b/drivers/crypto/allwinner/Kconfig
4668 +@@ -62,10 +62,10 @@ config CRYPTO_DEV_SUN8I_CE_DEBUG
4669 + config CRYPTO_DEV_SUN8I_CE_HASH
4670 + bool "Enable support for hash on sun8i-ce"
4671 + depends on CRYPTO_DEV_SUN8I_CE
4672 +- select MD5
4673 +- select SHA1
4674 +- select SHA256
4675 +- select SHA512
4676 ++ select CRYPTO_MD5
4677 ++ select CRYPTO_SHA1
4678 ++ select CRYPTO_SHA256
4679 ++ select CRYPTO_SHA512
4680 + help
4681 + Say y to enable support for hash algorithms.
4682 +
4683 +@@ -123,8 +123,8 @@ config CRYPTO_DEV_SUN8I_SS_PRNG
4684 + config CRYPTO_DEV_SUN8I_SS_HASH
4685 + bool "Enable support for hash on sun8i-ss"
4686 + depends on CRYPTO_DEV_SUN8I_SS
4687 +- select MD5
4688 +- select SHA1
4689 +- select SHA256
4690 ++ select CRYPTO_MD5
4691 ++ select CRYPTO_SHA1
4692 ++ select CRYPTO_SHA256
4693 + help
4694 + Say y to enable support for hash algorithms.
4695 +diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
4696 +index b6ab2054f217b..756d5a7835482 100644
4697 +--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
4698 ++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
4699 +@@ -347,8 +347,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
4700 + bf = (__le32 *)pad;
4701 +
4702 + result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA);
4703 +- if (!result)
4704 ++ if (!result) {
4705 ++ kfree(pad);
4706 + return -ENOMEM;
4707 ++ }
4708 +
4709 + for (i = 0; i < MAX_SG; i++) {
4710 + rctx->t_dst[i].addr = 0;
4711 +@@ -434,11 +436,10 @@ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq)
4712 + dma_unmap_sg(ss->dev, areq->src, nr_sgs, DMA_TO_DEVICE);
4713 + dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE);
4714 +
4715 +- kfree(pad);
4716 +-
4717 + memcpy(areq->result, result, algt->alg.hash.halg.digestsize);
4718 +- kfree(result);
4719 + theend:
4720 ++ kfree(pad);
4721 ++ kfree(result);
4722 + crypto_finalize_hash_request(engine, breq, err);
4723 + return 0;
4724 + }
4725 +diff --git a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
4726 +index 08a1473b21457..3191527928e41 100644
4727 +--- a/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
4728 ++++ b/drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
4729 +@@ -103,7 +103,8 @@ int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
4730 + dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE);
4731 + if (dma_mapping_error(ss->dev, dma_iv)) {
4732 + dev_err(ss->dev, "Cannot DMA MAP IV\n");
4733 +- return -EFAULT;
4734 ++ err = -EFAULT;
4735 ++ goto err_free;
4736 + }
4737 +
4738 + dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE);
4739 +@@ -167,6 +168,7 @@ err_iv:
4740 + memcpy(ctx->seed, d + dlen, ctx->slen);
4741 + }
4742 + memzero_explicit(d, todo);
4743 ++err_free:
4744 + kfree(d);
4745 +
4746 + return err;
4747 +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
4748 +index 476113e12489f..5b82ba7acc7cb 100644
4749 +--- a/drivers/crypto/ccp/sev-dev.c
4750 ++++ b/drivers/crypto/ccp/sev-dev.c
4751 +@@ -149,6 +149,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
4752 +
4753 + sev = psp->sev_data;
4754 +
4755 ++ if (data && WARN_ON_ONCE(!virt_addr_valid(data)))
4756 ++ return -EINVAL;
4757 ++
4758 + /* Get the physical address of the command buffer */
4759 + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
4760 + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
4761 +diff --git a/drivers/crypto/ccp/tee-dev.c b/drivers/crypto/ccp/tee-dev.c
4762 +index 5e697a90ea7f4..bcb81fef42118 100644
4763 +--- a/drivers/crypto/ccp/tee-dev.c
4764 ++++ b/drivers/crypto/ccp/tee-dev.c
4765 +@@ -36,6 +36,7 @@ static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size)
4766 + if (!start_addr)
4767 + return -ENOMEM;
4768 +
4769 ++ memset(start_addr, 0x0, ring_size);
4770 + rb_mgr->ring_start = start_addr;
4771 + rb_mgr->ring_size = ring_size;
4772 + rb_mgr->ring_pa = __psp_pa(start_addr);
4773 +@@ -244,41 +245,54 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
4774 + void *buf, size_t len, struct tee_ring_cmd **resp)
4775 + {
4776 + struct tee_ring_cmd *cmd;
4777 +- u32 rptr, wptr;
4778 + int nloop = 1000, ret = 0;
4779 ++ u32 rptr;
4780 +
4781 + *resp = NULL;
4782 +
4783 + mutex_lock(&tee->rb_mgr.mutex);
4784 +
4785 +- wptr = tee->rb_mgr.wptr;
4786 +-
4787 +- /* Check if ring buffer is full */
4788 ++ /* Loop until empty entry found in ring buffer */
4789 + do {
4790 ++ /* Get pointer to ring buffer command entry */
4791 ++ cmd = (struct tee_ring_cmd *)
4792 ++ (tee->rb_mgr.ring_start + tee->rb_mgr.wptr);
4793 ++
4794 + rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg);
4795 +
4796 +- if (!(wptr + sizeof(struct tee_ring_cmd) == rptr))
4797 ++ /* Check if ring buffer is full or command entry is waiting
4798 ++ * for response from TEE
4799 ++ */
4800 ++ if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
4801 ++ cmd->flag == CMD_WAITING_FOR_RESPONSE))
4802 + break;
4803 +
4804 +- dev_info(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
4805 +- rptr, wptr);
4806 ++ dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
4807 ++ rptr, tee->rb_mgr.wptr);
4808 +
4809 +- /* Wait if ring buffer is full */
4810 ++ /* Wait if ring buffer is full or TEE is processing data */
4811 + mutex_unlock(&tee->rb_mgr.mutex);
4812 + schedule_timeout_interruptible(msecs_to_jiffies(10));
4813 + mutex_lock(&tee->rb_mgr.mutex);
4814 +
4815 + } while (--nloop);
4816 +
4817 +- if (!nloop && (wptr + sizeof(struct tee_ring_cmd) == rptr)) {
4818 +- dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n",
4819 +- rptr, wptr);
4820 ++ if (!nloop &&
4821 ++ (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr ||
4822 ++ cmd->flag == CMD_WAITING_FOR_RESPONSE)) {
4823 ++ dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n",
4824 ++ rptr, tee->rb_mgr.wptr, cmd->flag);
4825 + ret = -EBUSY;
4826 + goto unlock;
4827 + }
4828 +
4829 +- /* Pointer to empty data entry in ring buffer */
4830 +- cmd = (struct tee_ring_cmd *)(tee->rb_mgr.ring_start + wptr);
4831 ++ /* Do not submit command if PSP got disabled while processing any
4832 ++ * command in another thread
4833 ++ */
4834 ++ if (psp_dead) {
4835 ++ ret = -EBUSY;
4836 ++ goto unlock;
4837 ++ }
4838 +
4839 + /* Write command data into ring buffer */
4840 + cmd->cmd_id = cmd_id;
4841 +@@ -286,6 +300,9 @@ static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id,
4842 + memset(&cmd->buf[0], 0, sizeof(cmd->buf));
4843 + memcpy(&cmd->buf[0], buf, len);
4844 +
4845 ++ /* Indicate driver is waiting for response */
4846 ++ cmd->flag = CMD_WAITING_FOR_RESPONSE;
4847 ++
4848 + /* Update local copy of write pointer */
4849 + tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd);
4850 + if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size)
4851 +@@ -353,12 +370,16 @@ int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
4852 + return ret;
4853 +
4854 + ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT);
4855 +- if (ret)
4856 ++ if (ret) {
4857 ++ resp->flag = CMD_RESPONSE_TIMEDOUT;
4858 + return ret;
4859 ++ }
4860 +
4861 + memcpy(buf, &resp->buf[0], len);
4862 + *status = resp->status;
4863 +
4864 ++ resp->flag = CMD_RESPONSE_COPIED;
4865 ++
4866 + return 0;
4867 + }
4868 + EXPORT_SYMBOL(psp_tee_process_cmd);
4869 +diff --git a/drivers/crypto/ccp/tee-dev.h b/drivers/crypto/ccp/tee-dev.h
4870 +index f099601121150..49d26158b71e3 100644
4871 +--- a/drivers/crypto/ccp/tee-dev.h
4872 ++++ b/drivers/crypto/ccp/tee-dev.h
4873 +@@ -1,6 +1,6 @@
4874 + /* SPDX-License-Identifier: MIT */
4875 + /*
4876 +- * Copyright 2019 Advanced Micro Devices, Inc.
4877 ++ * Copyright (C) 2019,2021 Advanced Micro Devices, Inc.
4878 + *
4879 + * Author: Rijo Thomas <Rijo-john.Thomas@×××.com>
4880 + * Author: Devaraj Rangasamy <Devaraj.Rangasamy@×××.com>
4881 +@@ -18,7 +18,7 @@
4882 + #include <linux/mutex.h>
4883 +
4884 + #define TEE_DEFAULT_TIMEOUT 10
4885 +-#define MAX_BUFFER_SIZE 992
4886 ++#define MAX_BUFFER_SIZE 988
4887 +
4888 + /**
4889 + * enum tee_ring_cmd_id - TEE interface commands for ring buffer configuration
4890 +@@ -81,6 +81,20 @@ enum tee_cmd_state {
4891 + TEE_CMD_STATE_COMPLETED,
4892 + };
4893 +
4894 ++/**
4895 ++ * enum cmd_resp_state - TEE command's response status maintained by driver
4896 ++ * @CMD_RESPONSE_INVALID: initial state when no command is written to ring
4897 ++ * @CMD_WAITING_FOR_RESPONSE: driver waiting for response from TEE
4898 ++ * @CMD_RESPONSE_TIMEDOUT: failed to get response from TEE
4899 ++ * @CMD_RESPONSE_COPIED: driver has copied response from TEE
4900 ++ */
4901 ++enum cmd_resp_state {
4902 ++ CMD_RESPONSE_INVALID,
4903 ++ CMD_WAITING_FOR_RESPONSE,
4904 ++ CMD_RESPONSE_TIMEDOUT,
4905 ++ CMD_RESPONSE_COPIED,
4906 ++};
4907 ++
4908 + /**
4909 + * struct tee_ring_cmd - Structure of the command buffer in TEE ring
4910 + * @cmd_id: refers to &enum tee_cmd_id. Command id for the ring buffer
4911 +@@ -91,6 +105,7 @@ enum tee_cmd_state {
4912 + * @pdata: private data (currently unused)
4913 + * @res1: reserved region
4914 + * @buf: TEE command specific buffer
4915 ++ * @flag: refers to &enum cmd_resp_state
4916 + */
4917 + struct tee_ring_cmd {
4918 + u32 cmd_id;
4919 +@@ -100,6 +115,7 @@ struct tee_ring_cmd {
4920 + u64 pdata;
4921 + u32 res1[2];
4922 + u8 buf[MAX_BUFFER_SIZE];
4923 ++ u32 flag;
4924 +
4925 + /* Total size: 1024 bytes */
4926 + } __packed;
4927 +diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
4928 +index 13b908ea48738..884adeb63ba3c 100644
4929 +--- a/drivers/crypto/chelsio/chcr_algo.c
4930 ++++ b/drivers/crypto/chelsio/chcr_algo.c
4931 +@@ -768,13 +768,14 @@ static inline void create_wreq(struct chcr_context *ctx,
4932 + struct uld_ctx *u_ctx = ULD_CTX(ctx);
4933 + unsigned int tx_channel_id, rx_channel_id;
4934 + unsigned int txqidx = 0, rxqidx = 0;
4935 +- unsigned int qid, fid;
4936 ++ unsigned int qid, fid, portno;
4937 +
4938 + get_qidxs(req, &txqidx, &rxqidx);
4939 + qid = u_ctx->lldi.rxq_ids[rxqidx];
4940 + fid = u_ctx->lldi.rxq_ids[0];
4941 ++ portno = rxqidx / ctx->rxq_perchan;
4942 + tx_channel_id = txqidx / ctx->txq_perchan;
4943 +- rx_channel_id = rxqidx / ctx->rxq_perchan;
4944 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]);
4945 +
4946 +
4947 + chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
4948 +@@ -805,6 +806,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
4949 + {
4950 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
4951 + struct chcr_context *ctx = c_ctx(tfm);
4952 ++ struct uld_ctx *u_ctx = ULD_CTX(ctx);
4953 + struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
4954 + struct sk_buff *skb = NULL;
4955 + struct chcr_wr *chcr_req;
4956 +@@ -821,6 +823,7 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
4957 + struct adapter *adap = padap(ctx->dev);
4958 + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
4959 +
4960 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
4961 + nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
4962 + reqctx->dst_ofst);
4963 + dst_size = get_space_for_phys_dsgl(nents);
4964 +@@ -1579,6 +1582,7 @@ static struct sk_buff *create_hash_wr(struct ahash_request *req,
4965 + int error = 0;
4966 + unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan;
4967 +
4968 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
4969 + transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
4970 + req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
4971 + param->sg_len) <= SGE_MAX_WR_LEN;
4972 +@@ -2437,6 +2441,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
4973 + {
4974 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
4975 + struct chcr_context *ctx = a_ctx(tfm);
4976 ++ struct uld_ctx *u_ctx = ULD_CTX(ctx);
4977 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
4978 + struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
4979 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
4980 +@@ -2456,6 +2461,7 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
4981 + struct adapter *adap = padap(ctx->dev);
4982 + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
4983 +
4984 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
4985 + if (req->cryptlen == 0)
4986 + return NULL;
4987 +
4988 +@@ -2709,9 +2715,11 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
4989 + struct dsgl_walk dsgl_walk;
4990 + unsigned int authsize = crypto_aead_authsize(tfm);
4991 + struct chcr_context *ctx = a_ctx(tfm);
4992 ++ struct uld_ctx *u_ctx = ULD_CTX(ctx);
4993 + u32 temp;
4994 + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
4995 +
4996 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
4997 + dsgl_walk_init(&dsgl_walk, phys_cpl);
4998 + dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
4999 + temp = req->assoclen + req->cryptlen +
5000 +@@ -2751,9 +2759,11 @@ void chcr_add_cipher_dst_ent(struct skcipher_request *req,
5001 + struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req);
5002 + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req);
5003 + struct chcr_context *ctx = c_ctx(tfm);
5004 ++ struct uld_ctx *u_ctx = ULD_CTX(ctx);
5005 + struct dsgl_walk dsgl_walk;
5006 + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
5007 +
5008 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
5009 + dsgl_walk_init(&dsgl_walk, phys_cpl);
5010 + dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
5011 + reqctx->dst_ofst);
5012 +@@ -2957,6 +2967,7 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
5013 + {
5014 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
5015 + struct chcr_context *ctx = a_ctx(tfm);
5016 ++ struct uld_ctx *u_ctx = ULD_CTX(ctx);
5017 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
5018 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
5019 + unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
5020 +@@ -2966,6 +2977,8 @@ static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
5021 + unsigned int tag_offset = 0, auth_offset = 0;
5022 + unsigned int assoclen;
5023 +
5024 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
5025 ++
5026 + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
5027 + assoclen = req->assoclen - 8;
5028 + else
5029 +@@ -3126,6 +3139,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
5030 + {
5031 + struct crypto_aead *tfm = crypto_aead_reqtfm(req);
5032 + struct chcr_context *ctx = a_ctx(tfm);
5033 ++ struct uld_ctx *u_ctx = ULD_CTX(ctx);
5034 + struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx);
5035 + struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
5036 + struct sk_buff *skb = NULL;
5037 +@@ -3142,6 +3156,7 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
5038 + struct adapter *adap = padap(ctx->dev);
5039 + unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan;
5040 +
5041 ++ rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]);
5042 + if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
5043 + assoclen = req->assoclen - 8;
5044 +
5045 +diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
5046 +index 456979b136a27..ea932b6c4534f 100644
5047 +--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
5048 ++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
5049 +@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5050 + if (ret)
5051 + goto out_err_free_reg;
5052 +
5053 +- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
5054 +-
5055 + ret = adf_dev_init(accel_dev);
5056 + if (ret)
5057 + goto out_err_dev_shutdown;
5058 +
5059 ++ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
5060 ++
5061 + ret = adf_dev_start(accel_dev);
5062 + if (ret)
5063 + goto out_err_dev_stop;
5064 +diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
5065 +index b9810f79eb848..6200ad448b119 100644
5066 +--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
5067 ++++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
5068 +@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5069 + if (ret)
5070 + goto out_err_free_reg;
5071 +
5072 +- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
5073 +-
5074 + ret = adf_dev_init(accel_dev);
5075 + if (ret)
5076 + goto out_err_dev_shutdown;
5077 +
5078 ++ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
5079 ++
5080 + ret = adf_dev_start(accel_dev);
5081 + if (ret)
5082 + goto out_err_dev_stop;
5083 +diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
5084 +index 36136f7db509d..da6ef007a6aef 100644
5085 +--- a/drivers/crypto/qat/qat_common/adf_isr.c
5086 ++++ b/drivers/crypto/qat/qat_common/adf_isr.c
5087 +@@ -286,19 +286,32 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
5088 +
5089 + ret = adf_isr_alloc_msix_entry_table(accel_dev);
5090 + if (ret)
5091 +- return ret;
5092 +- if (adf_enable_msix(accel_dev))
5093 + goto err_out;
5094 +
5095 +- if (adf_setup_bh(accel_dev))
5096 +- goto err_out;
5097 ++ ret = adf_enable_msix(accel_dev);
5098 ++ if (ret)
5099 ++ goto err_free_msix_table;
5100 +
5101 +- if (adf_request_irqs(accel_dev))
5102 +- goto err_out;
5103 ++ ret = adf_setup_bh(accel_dev);
5104 ++ if (ret)
5105 ++ goto err_disable_msix;
5106 ++
5107 ++ ret = adf_request_irqs(accel_dev);
5108 ++ if (ret)
5109 ++ goto err_cleanup_bh;
5110 +
5111 + return 0;
5112 ++
5113 ++err_cleanup_bh:
5114 ++ adf_cleanup_bh(accel_dev);
5115 ++
5116 ++err_disable_msix:
5117 ++ adf_disable_msix(&accel_dev->accel_pci_dev);
5118 ++
5119 ++err_free_msix_table:
5120 ++ adf_isr_free_msix_entry_table(accel_dev);
5121 ++
5122 + err_out:
5123 +- adf_isr_resource_free(accel_dev);
5124 +- return -EFAULT;
5125 ++ return ret;
5126 + }
5127 + EXPORT_SYMBOL_GPL(adf_isr_resource_alloc);
5128 +diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
5129 +index 2ad774017200f..cdfd56c9e3458 100644
5130 +--- a/drivers/crypto/qat/qat_common/adf_transport.c
5131 ++++ b/drivers/crypto/qat/qat_common/adf_transport.c
5132 +@@ -153,6 +153,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
5133 + dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
5134 + dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
5135 + ring->base_addr, ring->dma_addr);
5136 ++ ring->base_addr = NULL;
5137 + return -EFAULT;
5138 + }
5139 +
5140 +diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
5141 +index c4a44dc6af3ee..31a36288623a2 100644
5142 +--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
5143 ++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
5144 +@@ -260,17 +260,26 @@ int adf_vf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
5145 + goto err_out;
5146 +
5147 + if (adf_setup_pf2vf_bh(accel_dev))
5148 +- goto err_out;
5149 ++ goto err_disable_msi;
5150 +
5151 + if (adf_setup_bh(accel_dev))
5152 +- goto err_out;
5153 ++ goto err_cleanup_pf2vf_bh;
5154 +
5155 + if (adf_request_msi_irq(accel_dev))
5156 +- goto err_out;
5157 ++ goto err_cleanup_bh;
5158 +
5159 + return 0;
5160 ++
5161 ++err_cleanup_bh:
5162 ++ adf_cleanup_bh(accel_dev);
5163 ++
5164 ++err_cleanup_pf2vf_bh:
5165 ++ adf_cleanup_pf2vf_bh(accel_dev);
5166 ++
5167 ++err_disable_msi:
5168 ++ adf_disable_msi(accel_dev);
5169 ++
5170 + err_out:
5171 +- adf_vf_isr_resource_free(accel_dev);
5172 + return -EFAULT;
5173 + }
5174 + EXPORT_SYMBOL_GPL(adf_vf_isr_resource_alloc);
5175 +diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
5176 +index 404cf9df69220..737508ded37b4 100644
5177 +--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
5178 ++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
5179 +@@ -184,12 +184,12 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5180 + if (ret)
5181 + goto out_err_free_reg;
5182 +
5183 +- set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
5184 +-
5185 + ret = adf_dev_init(accel_dev);
5186 + if (ret)
5187 + goto out_err_dev_shutdown;
5188 +
5189 ++ set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
5190 ++
5191 + ret = adf_dev_start(accel_dev);
5192 + if (ret)
5193 + goto out_err_dev_stop;
5194 +diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
5195 +index 39d56ab12f275..4640fe0c1f221 100644
5196 +--- a/drivers/crypto/sa2ul.c
5197 ++++ b/drivers/crypto/sa2ul.c
5198 +@@ -1138,8 +1138,10 @@ static int sa_run(struct sa_req *req)
5199 + mapped_sg->sgt.sgl = src;
5200 + mapped_sg->sgt.orig_nents = src_nents;
5201 + ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
5202 +- if (ret)
5203 ++ if (ret) {
5204 ++ kfree(rxd);
5205 + return ret;
5206 ++ }
5207 +
5208 + mapped_sg->dir = dir_src;
5209 + mapped_sg->mapped = true;
5210 +@@ -1147,8 +1149,10 @@ static int sa_run(struct sa_req *req)
5211 + mapped_sg->sgt.sgl = req->src;
5212 + mapped_sg->sgt.orig_nents = sg_nents;
5213 + ret = dma_map_sgtable(ddev, &mapped_sg->sgt, dir_src, 0);
5214 +- if (ret)
5215 ++ if (ret) {
5216 ++ kfree(rxd);
5217 + return ret;
5218 ++ }
5219 +
5220 + mapped_sg->dir = dir_src;
5221 + mapped_sg->mapped = true;
5222 +diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
5223 +index 861c100f9fac3..98f03a02d1122 100644
5224 +--- a/drivers/devfreq/devfreq.c
5225 ++++ b/drivers/devfreq/devfreq.c
5226 +@@ -377,7 +377,7 @@ static int devfreq_set_target(struct devfreq *devfreq, unsigned long new_freq,
5227 + devfreq->previous_freq = new_freq;
5228 +
5229 + if (devfreq->suspend_freq)
5230 +- devfreq->resume_freq = cur_freq;
5231 ++ devfreq->resume_freq = new_freq;
5232 +
5233 + return err;
5234 + }
5235 +@@ -788,7 +788,8 @@ struct devfreq *devfreq_add_device(struct device *dev,
5236 +
5237 + if (devfreq->profile->timer < 0
5238 + || devfreq->profile->timer >= DEVFREQ_TIMER_NUM) {
5239 +- goto err_out;
5240 ++ mutex_unlock(&devfreq->lock);
5241 ++ goto err_dev;
5242 + }
5243 +
5244 + if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
5245 +diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
5246 +index 3315e3c215864..5fa6b3ca0a385 100644
5247 +--- a/drivers/firmware/Kconfig
5248 ++++ b/drivers/firmware/Kconfig
5249 +@@ -237,6 +237,7 @@ config INTEL_STRATIX10_RSU
5250 + config QCOM_SCM
5251 + bool
5252 + depends on ARM || ARM64
5253 ++ depends on HAVE_ARM_SMCCC
5254 + select RESET_CONTROLLER
5255 +
5256 + config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
5257 +diff --git a/drivers/firmware/qcom_scm-smc.c b/drivers/firmware/qcom_scm-smc.c
5258 +index 497c13ba98d67..d111833364ba4 100644
5259 +--- a/drivers/firmware/qcom_scm-smc.c
5260 ++++ b/drivers/firmware/qcom_scm-smc.c
5261 +@@ -77,8 +77,10 @@ static void __scm_smc_do(const struct arm_smccc_args *smc,
5262 + } while (res->a0 == QCOM_SCM_V2_EBUSY);
5263 + }
5264 +
5265 +-int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
5266 +- struct qcom_scm_res *res, bool atomic)
5267 ++
5268 ++int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
5269 ++ enum qcom_scm_convention qcom_convention,
5270 ++ struct qcom_scm_res *res, bool atomic)
5271 + {
5272 + int arglen = desc->arginfo & 0xf;
5273 + int i;
5274 +@@ -87,9 +89,8 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
5275 + size_t alloc_len;
5276 + gfp_t flag = atomic ? GFP_ATOMIC : GFP_KERNEL;
5277 + u32 smccc_call_type = atomic ? ARM_SMCCC_FAST_CALL : ARM_SMCCC_STD_CALL;
5278 +- u32 qcom_smccc_convention =
5279 +- (qcom_scm_convention == SMC_CONVENTION_ARM_32) ?
5280 +- ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
5281 ++ u32 qcom_smccc_convention = (qcom_convention == SMC_CONVENTION_ARM_32) ?
5282 ++ ARM_SMCCC_SMC_32 : ARM_SMCCC_SMC_64;
5283 + struct arm_smccc_res smc_res;
5284 + struct arm_smccc_args smc = {0};
5285 +
5286 +@@ -148,4 +149,5 @@ int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
5287 + }
5288 +
5289 + return (long)smc_res.a0 ? qcom_scm_remap_error(smc_res.a0) : 0;
5290 ++
5291 + }
5292 +diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
5293 +index 7be48c1bec96d..c5b20bdc08e9d 100644
5294 +--- a/drivers/firmware/qcom_scm.c
5295 ++++ b/drivers/firmware/qcom_scm.c
5296 +@@ -113,14 +113,10 @@ static void qcom_scm_clk_disable(void)
5297 + clk_disable_unprepare(__scm->bus_clk);
5298 + }
5299 +
5300 +-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
5301 +- u32 cmd_id);
5302 ++enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
5303 ++static DEFINE_SPINLOCK(scm_query_lock);
5304 +
5305 +-enum qcom_scm_convention qcom_scm_convention;
5306 +-static bool has_queried __read_mostly;
5307 +-static DEFINE_SPINLOCK(query_lock);
5308 +-
5309 +-static void __query_convention(void)
5310 ++static enum qcom_scm_convention __get_convention(void)
5311 + {
5312 + unsigned long flags;
5313 + struct qcom_scm_desc desc = {
5314 +@@ -133,36 +129,50 @@ static void __query_convention(void)
5315 + .owner = ARM_SMCCC_OWNER_SIP,
5316 + };
5317 + struct qcom_scm_res res;
5318 ++ enum qcom_scm_convention probed_convention;
5319 + int ret;
5320 ++ bool forced = false;
5321 +
5322 +- spin_lock_irqsave(&query_lock, flags);
5323 +- if (has_queried)
5324 +- goto out;
5325 ++ if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
5326 ++ return qcom_scm_convention;
5327 +
5328 +- qcom_scm_convention = SMC_CONVENTION_ARM_64;
5329 +- // Device isn't required as there is only one argument - no device
5330 +- // needed to dma_map_single to secure world
5331 +- ret = scm_smc_call(NULL, &desc, &res, true);
5332 ++ /*
5333 ++ * Device isn't required as there is only one argument - no device
5334 ++ * needed to dma_map_single to secure world
5335 ++ */
5336 ++ probed_convention = SMC_CONVENTION_ARM_64;
5337 ++ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
5338 + if (!ret && res.result[0] == 1)
5339 +- goto out;
5340 ++ goto found;
5341 ++
5342 ++ /*
5343 ++ * Some SC7180 firmwares didn't implement the
5344 ++ * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
5345 ++ * calling conventions on these firmwares. Luckily we don't make any
5346 ++ * early calls into the firmware on these SoCs so the device pointer
5347 ++ * will be valid here to check if the compatible matches.
5348 ++ */
5349 ++ if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
5350 ++ forced = true;
5351 ++ goto found;
5352 ++ }
5353 +
5354 +- qcom_scm_convention = SMC_CONVENTION_ARM_32;
5355 +- ret = scm_smc_call(NULL, &desc, &res, true);
5356 ++ probed_convention = SMC_CONVENTION_ARM_32;
5357 ++ ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
5358 + if (!ret && res.result[0] == 1)
5359 +- goto out;
5360 +-
5361 +- qcom_scm_convention = SMC_CONVENTION_LEGACY;
5362 +-out:
5363 +- has_queried = true;
5364 +- spin_unlock_irqrestore(&query_lock, flags);
5365 +- pr_info("qcom_scm: convention: %s\n",
5366 +- qcom_scm_convention_names[qcom_scm_convention]);
5367 +-}
5368 ++ goto found;
5369 ++
5370 ++ probed_convention = SMC_CONVENTION_LEGACY;
5371 ++found:
5372 ++ spin_lock_irqsave(&scm_query_lock, flags);
5373 ++ if (probed_convention != qcom_scm_convention) {
5374 ++ qcom_scm_convention = probed_convention;
5375 ++ pr_info("qcom_scm: convention: %s%s\n",
5376 ++ qcom_scm_convention_names[qcom_scm_convention],
5377 ++ forced ? " (forced)" : "");
5378 ++ }
5379 ++ spin_unlock_irqrestore(&scm_query_lock, flags);
5380 +
5381 +-static inline enum qcom_scm_convention __get_convention(void)
5382 +-{
5383 +- if (unlikely(!has_queried))
5384 +- __query_convention();
5385 + return qcom_scm_convention;
5386 + }
5387 +
5388 +@@ -219,8 +229,8 @@ static int qcom_scm_call_atomic(struct device *dev,
5389 + }
5390 + }
5391 +
5392 +-static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
5393 +- u32 cmd_id)
5394 ++static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
5395 ++ u32 cmd_id)
5396 + {
5397 + int ret;
5398 + struct qcom_scm_desc desc = {
5399 +@@ -247,7 +257,7 @@ static int __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
5400 +
5401 + ret = qcom_scm_call(dev, &desc, &res);
5402 +
5403 +- return ret ? : res.result[0];
5404 ++ return ret ? false : !!res.result[0];
5405 + }
5406 +
5407 + /**
5408 +@@ -585,9 +595,8 @@ bool qcom_scm_pas_supported(u32 peripheral)
5409 + };
5410 + struct qcom_scm_res res;
5411 +
5412 +- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
5413 +- QCOM_SCM_PIL_PAS_IS_SUPPORTED);
5414 +- if (ret <= 0)
5415 ++ if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
5416 ++ QCOM_SCM_PIL_PAS_IS_SUPPORTED))
5417 + return false;
5418 +
5419 + ret = qcom_scm_call(__scm->dev, &desc, &res);
5420 +@@ -1054,17 +1063,18 @@ EXPORT_SYMBOL(qcom_scm_ice_set_key);
5421 + */
5422 + bool qcom_scm_hdcp_available(void)
5423 + {
5424 ++ bool avail;
5425 + int ret = qcom_scm_clk_enable();
5426 +
5427 + if (ret)
5428 + return ret;
5429 +
5430 +- ret = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
5431 ++ avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
5432 + QCOM_SCM_HDCP_INVOKE);
5433 +
5434 + qcom_scm_clk_disable();
5435 +
5436 +- return ret > 0;
5437 ++ return avail;
5438 + }
5439 + EXPORT_SYMBOL(qcom_scm_hdcp_available);
5440 +
5441 +@@ -1236,7 +1246,7 @@ static int qcom_scm_probe(struct platform_device *pdev)
5442 + __scm = scm;
5443 + __scm->dev = &pdev->dev;
5444 +
5445 +- __query_convention();
5446 ++ __get_convention();
5447 +
5448 + /*
5449 + * If requested enable "download mode", from this point on warmboot
5450 +diff --git a/drivers/firmware/qcom_scm.h b/drivers/firmware/qcom_scm.h
5451 +index 95cd1ac30ab0b..632fe31424621 100644
5452 +--- a/drivers/firmware/qcom_scm.h
5453 ++++ b/drivers/firmware/qcom_scm.h
5454 +@@ -61,8 +61,11 @@ struct qcom_scm_res {
5455 + };
5456 +
5457 + #define SCM_SMC_FNID(s, c) ((((s) & 0xFF) << 8) | ((c) & 0xFF))
5458 +-extern int scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
5459 +- struct qcom_scm_res *res, bool atomic);
5460 ++extern int __scm_smc_call(struct device *dev, const struct qcom_scm_desc *desc,
5461 ++ enum qcom_scm_convention qcom_convention,
5462 ++ struct qcom_scm_res *res, bool atomic);
5463 ++#define scm_smc_call(dev, desc, res, atomic) \
5464 ++ __scm_smc_call((dev), (desc), qcom_scm_convention, (res), (atomic))
5465 +
5466 + #define SCM_LEGACY_FNID(s, c) (((s) << 10) | ((c) & 0x3ff))
5467 + extern int scm_legacy_call_atomic(struct device *dev,
5468 +diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
5469 +index fd95edeb702b1..9e6504592646e 100644
5470 +--- a/drivers/firmware/xilinx/zynqmp.c
5471 ++++ b/drivers/firmware/xilinx/zynqmp.c
5472 +@@ -2,7 +2,7 @@
5473 + /*
5474 + * Xilinx Zynq MPSoC Firmware layer
5475 + *
5476 +- * Copyright (C) 2014-2020 Xilinx, Inc.
5477 ++ * Copyright (C) 2014-2021 Xilinx, Inc.
5478 + *
5479 + * Michal Simek <michal.simek@××××××.com>
5480 + * Davorin Mista <davorin.mista@××××××.com>
5481 +@@ -1280,12 +1280,13 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
5482 + static int zynqmp_firmware_remove(struct platform_device *pdev)
5483 + {
5484 + struct pm_api_feature_data *feature_data;
5485 ++ struct hlist_node *tmp;
5486 + int i;
5487 +
5488 + mfd_remove_devices(&pdev->dev);
5489 + zynqmp_pm_api_debugfs_exit();
5490 +
5491 +- hash_for_each(pm_api_features_map, i, feature_data, hentry) {
5492 ++ hash_for_each_safe(pm_api_features_map, i, tmp, feature_data, hentry) {
5493 + hash_del(&feature_data->hentry);
5494 + kfree(feature_data);
5495 + }
5496 +diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
5497 +index 824abbbd631e4..d3e6f41e78bf7 100644
5498 +--- a/drivers/fpga/xilinx-spi.c
5499 ++++ b/drivers/fpga/xilinx-spi.c
5500 +@@ -233,25 +233,19 @@ static int xilinx_spi_probe(struct spi_device *spi)
5501 +
5502 + /* PROGRAM_B is active low */
5503 + conf->prog_b = devm_gpiod_get(&spi->dev, "prog_b", GPIOD_OUT_LOW);
5504 +- if (IS_ERR(conf->prog_b)) {
5505 +- dev_err(&spi->dev, "Failed to get PROGRAM_B gpio: %ld\n",
5506 +- PTR_ERR(conf->prog_b));
5507 +- return PTR_ERR(conf->prog_b);
5508 +- }
5509 ++ if (IS_ERR(conf->prog_b))
5510 ++ return dev_err_probe(&spi->dev, PTR_ERR(conf->prog_b),
5511 ++ "Failed to get PROGRAM_B gpio\n");
5512 +
5513 + conf->init_b = devm_gpiod_get_optional(&spi->dev, "init-b", GPIOD_IN);
5514 +- if (IS_ERR(conf->init_b)) {
5515 +- dev_err(&spi->dev, "Failed to get INIT_B gpio: %ld\n",
5516 +- PTR_ERR(conf->init_b));
5517 +- return PTR_ERR(conf->init_b);
5518 +- }
5519 ++ if (IS_ERR(conf->init_b))
5520 ++ return dev_err_probe(&spi->dev, PTR_ERR(conf->init_b),
5521 ++ "Failed to get INIT_B gpio\n");
5522 +
5523 + conf->done = devm_gpiod_get(&spi->dev, "done", GPIOD_IN);
5524 +- if (IS_ERR(conf->done)) {
5525 +- dev_err(&spi->dev, "Failed to get DONE gpio: %ld\n",
5526 +- PTR_ERR(conf->done));
5527 +- return PTR_ERR(conf->done);
5528 +- }
5529 ++ if (IS_ERR(conf->done))
5530 ++ return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
5531 ++ "Failed to get DONE gpio\n");
5532 +
5533 + mgr = devm_fpga_mgr_create(&spi->dev,
5534 + "Xilinx Slave Serial FPGA Manager",
5535 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
5536 +index 6e9a9e5dbea07..90e16d14e6c38 100644
5537 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
5538 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
5539 +@@ -215,7 +215,11 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
5540 + /* Check if we have an idle VMID */
5541 + i = 0;
5542 + list_for_each_entry((*idle), &id_mgr->ids_lru, list) {
5543 +- fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, ring);
5544 ++ /* Don't use per engine and per process VMID at the same time */
5545 ++ struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ?
5546 ++ NULL : ring;
5547 ++
5548 ++ fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r);
5549 + if (!fences[i])
5550 + break;
5551 + ++i;
5552 +@@ -280,7 +284,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
5553 + if (updates && (*id)->flushed_updates &&
5554 + updates->context == (*id)->flushed_updates->context &&
5555 + !dma_fence_is_later(updates, (*id)->flushed_updates))
5556 +- updates = NULL;
5557 ++ updates = NULL;
5558 +
5559 + if ((*id)->owner != vm->immediate.fence_context ||
5560 + job->vm_pd_addr != (*id)->pd_gpu_addr ||
5561 +@@ -289,6 +293,10 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
5562 + !dma_fence_is_signaled((*id)->last_flush))) {
5563 + struct dma_fence *tmp;
5564 +
5565 ++ /* Don't use per engine and per process VMID at the same time */
5566 ++ if (adev->vm_manager.concurrent_flush)
5567 ++ ring = NULL;
5568 ++
5569 + /* to prevent one context starved by another context */
5570 + (*id)->pd_gpu_addr = 0;
5571 + tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
5572 +@@ -364,12 +372,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
5573 + if (updates && (!flushed || dma_fence_is_later(updates, flushed)))
5574 + needs_flush = true;
5575 +
5576 +- /* Concurrent flushes are only possible starting with Vega10 and
5577 +- * are broken on Navi10 and Navi14.
5578 +- */
5579 +- if (needs_flush && (adev->asic_type < CHIP_VEGA10 ||
5580 +- adev->asic_type == CHIP_NAVI10 ||
5581 +- adev->asic_type == CHIP_NAVI14))
5582 ++ if (needs_flush && !adev->vm_manager.concurrent_flush)
5583 + continue;
5584 +
5585 + /* Good, we can use this VMID. Remember this submission as
5586 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
5587 +index 605d1545274c2..b47829ff30af7 100644
5588 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
5589 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
5590 +@@ -3173,6 +3173,12 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
5591 + {
5592 + unsigned i;
5593 +
5594 ++ /* Concurrent flushes are only possible starting with Vega10 and
5595 ++ * are broken on Navi10 and Navi14.
5596 ++ */
5597 ++ adev->vm_manager.concurrent_flush = !(adev->asic_type < CHIP_VEGA10 ||
5598 ++ adev->asic_type == CHIP_NAVI10 ||
5599 ++ adev->asic_type == CHIP_NAVI14);
5600 + amdgpu_vmid_mgr_init(adev);
5601 +
5602 + adev->vm_manager.fence_context =
5603 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
5604 +index 58c83a7ad0fd9..c4218800e043f 100644
5605 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
5606 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
5607 +@@ -325,6 +325,7 @@ struct amdgpu_vm_manager {
5608 + /* Handling of VMIDs */
5609 + struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
5610 + unsigned int first_kfd_vmid;
5611 ++ bool concurrent_flush;
5612 +
5613 + /* Handling of VM fences */
5614 + u64 fence_context;
5615 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
5616 +index 66bbca61e3ef5..9318936aa8054 100644
5617 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
5618 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
5619 +@@ -20,6 +20,10 @@
5620 + * OTHER DEALINGS IN THE SOFTWARE.
5621 + */
5622 +
5623 ++#include <linux/kconfig.h>
5624 ++
5625 ++#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
5626 ++
5627 + #include <linux/printk.h>
5628 + #include <linux/device.h>
5629 + #include <linux/slab.h>
5630 +@@ -355,3 +359,5 @@ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
5631 +
5632 + return 0;
5633 + }
5634 ++
5635 ++#endif
5636 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
5637 +index dd23d9fdf6a82..afd420b01a0c2 100644
5638 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
5639 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.h
5640 +@@ -23,7 +23,9 @@
5641 + #ifndef __KFD_IOMMU_H__
5642 + #define __KFD_IOMMU_H__
5643 +
5644 +-#if defined(CONFIG_AMD_IOMMU_V2_MODULE) || defined(CONFIG_AMD_IOMMU_V2)
5645 ++#include <linux/kconfig.h>
5646 ++
5647 ++#if IS_REACHABLE(CONFIG_AMD_IOMMU_V2)
5648 +
5649 + #define KFD_SUPPORT_IOMMU_V2
5650 +
5651 +@@ -46,6 +48,9 @@ static inline int kfd_iommu_check_device(struct kfd_dev *kfd)
5652 + }
5653 + static inline int kfd_iommu_device_init(struct kfd_dev *kfd)
5654 + {
5655 ++#if IS_MODULE(CONFIG_AMD_IOMMU_V2)
5656 ++ WARN_ONCE(1, "iommu_v2 module is not usable by built-in KFD");
5657 ++#endif
5658 + return 0;
5659 + }
5660 +
5661 +@@ -73,6 +78,6 @@ static inline int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev)
5662 + return 0;
5663 + }
5664 +
5665 +-#endif /* defined(CONFIG_AMD_IOMMU_V2) */
5666 ++#endif /* IS_REACHABLE(CONFIG_AMD_IOMMU_V2) */
5667 +
5668 + #endif /* __KFD_IOMMU_H__ */
5669 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
5670 +index d18341b7daacd..8180894bbd1e3 100644
5671 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
5672 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
5673 +@@ -3685,6 +3685,23 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
5674 + scaling_info->src_rect.x = state->src_x >> 16;
5675 + scaling_info->src_rect.y = state->src_y >> 16;
5676 +
5677 ++ /*
5678 ++ * For reasons we don't (yet) fully understand a non-zero
5679 ++ * src_y coordinate into an NV12 buffer can cause a
5680 ++ * system hang. To avoid hangs (and maybe be overly cautious)
5681 ++ * let's reject both non-zero src_x and src_y.
5682 ++ *
5683 ++ * We currently know of only one use-case to reproduce a
5684 ++ * scenario with non-zero src_x and src_y for NV12, which
5685 ++ * is to gesture the YouTube Android app into full screen
5686 ++ * on ChromeOS.
5687 ++ */
5688 ++ if (state->fb &&
5689 ++ state->fb->format->format == DRM_FORMAT_NV12 &&
5690 ++ (scaling_info->src_rect.x != 0 ||
5691 ++ scaling_info->src_rect.y != 0))
5692 ++ return -EINVAL;
5693 ++
5694 + scaling_info->src_rect.width = state->src_w >> 16;
5695 + if (scaling_info->src_rect.width == 0)
5696 + return -EINVAL;
5697 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
5698 +index 4e87e70237e3d..874b132fe1d78 100644
5699 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
5700 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
5701 +@@ -283,7 +283,7 @@ struct abm *dce_abm_create(
5702 + const struct dce_abm_shift *abm_shift,
5703 + const struct dce_abm_mask *abm_mask)
5704 + {
5705 +- struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
5706 ++ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_ATOMIC);
5707 +
5708 + if (abm_dce == NULL) {
5709 + BREAK_TO_DEBUGGER();
5710 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
5711 +index f0cebe721bcc1..4216419503af4 100644
5712 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
5713 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
5714 +@@ -925,7 +925,7 @@ struct dmcu *dcn10_dmcu_create(
5715 + const struct dce_dmcu_shift *dmcu_shift,
5716 + const struct dce_dmcu_mask *dmcu_mask)
5717 + {
5718 +- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
5719 ++ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
5720 +
5721 + if (dmcu_dce == NULL) {
5722 + BREAK_TO_DEBUGGER();
5723 +@@ -946,7 +946,7 @@ struct dmcu *dcn20_dmcu_create(
5724 + const struct dce_dmcu_shift *dmcu_shift,
5725 + const struct dce_dmcu_mask *dmcu_mask)
5726 + {
5727 +- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
5728 ++ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
5729 +
5730 + if (dmcu_dce == NULL) {
5731 + BREAK_TO_DEBUGGER();
5732 +@@ -967,7 +967,7 @@ struct dmcu *dcn21_dmcu_create(
5733 + const struct dce_dmcu_shift *dmcu_shift,
5734 + const struct dce_dmcu_mask *dmcu_mask)
5735 + {
5736 +- struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_KERNEL);
5737 ++ struct dce_dmcu *dmcu_dce = kzalloc(sizeof(*dmcu_dce), GFP_ATOMIC);
5738 +
5739 + if (dmcu_dce == NULL) {
5740 + BREAK_TO_DEBUGGER();
5741 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
5742 +index 62cc2651e00c1..8774406120fc1 100644
5743 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
5744 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
5745 +@@ -112,7 +112,7 @@ struct dccg *dccg2_create(
5746 + const struct dccg_shift *dccg_shift,
5747 + const struct dccg_mask *dccg_mask)
5748 + {
5749 +- struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
5750 ++ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_ATOMIC);
5751 + struct dccg *base;
5752 +
5753 + if (dccg_dcn == NULL) {
5754 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
5755 +index 4ea53c543e082..33488b3c5c3ce 100644
5756 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
5757 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
5758 +@@ -1104,7 +1104,7 @@ struct dpp *dcn20_dpp_create(
5759 + uint32_t inst)
5760 + {
5761 + struct dcn20_dpp *dpp =
5762 +- kzalloc(sizeof(struct dcn20_dpp), GFP_KERNEL);
5763 ++ kzalloc(sizeof(struct dcn20_dpp), GFP_ATOMIC);
5764 +
5765 + if (!dpp)
5766 + return NULL;
5767 +@@ -1122,7 +1122,7 @@ struct input_pixel_processor *dcn20_ipp_create(
5768 + struct dc_context *ctx, uint32_t inst)
5769 + {
5770 + struct dcn10_ipp *ipp =
5771 +- kzalloc(sizeof(struct dcn10_ipp), GFP_KERNEL);
5772 ++ kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
5773 +
5774 + if (!ipp) {
5775 + BREAK_TO_DEBUGGER();
5776 +@@ -1139,7 +1139,7 @@ struct output_pixel_processor *dcn20_opp_create(
5777 + struct dc_context *ctx, uint32_t inst)
5778 + {
5779 + struct dcn20_opp *opp =
5780 +- kzalloc(sizeof(struct dcn20_opp), GFP_KERNEL);
5781 ++ kzalloc(sizeof(struct dcn20_opp), GFP_ATOMIC);
5782 +
5783 + if (!opp) {
5784 + BREAK_TO_DEBUGGER();
5785 +@@ -1156,7 +1156,7 @@ struct dce_aux *dcn20_aux_engine_create(
5786 + uint32_t inst)
5787 + {
5788 + struct aux_engine_dce110 *aux_engine =
5789 +- kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
5790 ++ kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
5791 +
5792 + if (!aux_engine)
5793 + return NULL;
5794 +@@ -1194,7 +1194,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
5795 + uint32_t inst)
5796 + {
5797 + struct dce_i2c_hw *dce_i2c_hw =
5798 +- kzalloc(sizeof(struct dce_i2c_hw), GFP_KERNEL);
5799 ++ kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
5800 +
5801 + if (!dce_i2c_hw)
5802 + return NULL;
5803 +@@ -1207,7 +1207,7 @@ struct dce_i2c_hw *dcn20_i2c_hw_create(
5804 + struct mpc *dcn20_mpc_create(struct dc_context *ctx)
5805 + {
5806 + struct dcn20_mpc *mpc20 = kzalloc(sizeof(struct dcn20_mpc),
5807 +- GFP_KERNEL);
5808 ++ GFP_ATOMIC);
5809 +
5810 + if (!mpc20)
5811 + return NULL;
5812 +@@ -1225,7 +1225,7 @@ struct hubbub *dcn20_hubbub_create(struct dc_context *ctx)
5813 + {
5814 + int i;
5815 + struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
5816 +- GFP_KERNEL);
5817 ++ GFP_ATOMIC);
5818 +
5819 + if (!hubbub)
5820 + return NULL;
5821 +@@ -1253,7 +1253,7 @@ struct timing_generator *dcn20_timing_generator_create(
5822 + uint32_t instance)
5823 + {
5824 + struct optc *tgn10 =
5825 +- kzalloc(sizeof(struct optc), GFP_KERNEL);
5826 ++ kzalloc(sizeof(struct optc), GFP_ATOMIC);
5827 +
5828 + if (!tgn10)
5829 + return NULL;
5830 +@@ -1332,7 +1332,7 @@ static struct clock_source *dcn20_clock_source_create(
5831 + bool dp_clk_src)
5832 + {
5833 + struct dce110_clk_src *clk_src =
5834 +- kzalloc(sizeof(struct dce110_clk_src), GFP_KERNEL);
5835 ++ kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
5836 +
5837 + if (!clk_src)
5838 + return NULL;
5839 +@@ -1438,7 +1438,7 @@ struct display_stream_compressor *dcn20_dsc_create(
5840 + struct dc_context *ctx, uint32_t inst)
5841 + {
5842 + struct dcn20_dsc *dsc =
5843 +- kzalloc(sizeof(struct dcn20_dsc), GFP_KERNEL);
5844 ++ kzalloc(sizeof(struct dcn20_dsc), GFP_ATOMIC);
5845 +
5846 + if (!dsc) {
5847 + BREAK_TO_DEBUGGER();
5848 +@@ -1572,7 +1572,7 @@ struct hubp *dcn20_hubp_create(
5849 + uint32_t inst)
5850 + {
5851 + struct dcn20_hubp *hubp2 =
5852 +- kzalloc(sizeof(struct dcn20_hubp), GFP_KERNEL);
5853 ++ kzalloc(sizeof(struct dcn20_hubp), GFP_ATOMIC);
5854 +
5855 + if (!hubp2)
5856 + return NULL;
5857 +@@ -3391,7 +3391,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool)
5858 +
5859 + static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx)
5860 + {
5861 +- struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
5862 ++ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_ATOMIC);
5863 +
5864 + if (!pp_smu)
5865 + return pp_smu;
5866 +@@ -4142,7 +4142,7 @@ struct resource_pool *dcn20_create_resource_pool(
5867 + struct dc *dc)
5868 + {
5869 + struct dcn20_resource_pool *pool =
5870 +- kzalloc(sizeof(struct dcn20_resource_pool), GFP_KERNEL);
5871 ++ kzalloc(sizeof(struct dcn20_resource_pool), GFP_ATOMIC);
5872 +
5873 + if (!pool)
5874 + return NULL;
5875 +diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
5876 +index 5e384a8a83dc2..51855a2624cf4 100644
5877 +--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
5878 ++++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
5879 +@@ -39,7 +39,7 @@
5880 + #define HDCP14_KSV_SIZE 5
5881 + #define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE
5882 +
5883 +-static const bool hdcp_cmd_is_read[] = {
5884 ++static const bool hdcp_cmd_is_read[HDCP_MESSAGE_ID_MAX] = {
5885 + [HDCP_MESSAGE_ID_READ_BKSV] = true,
5886 + [HDCP_MESSAGE_ID_READ_RI_R0] = true,
5887 + [HDCP_MESSAGE_ID_READ_PJ] = true,
5888 +@@ -75,7 +75,7 @@ static const bool hdcp_cmd_is_read[] = {
5889 + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false
5890 + };
5891 +
5892 +-static const uint8_t hdcp_i2c_offsets[] = {
5893 ++static const uint8_t hdcp_i2c_offsets[HDCP_MESSAGE_ID_MAX] = {
5894 + [HDCP_MESSAGE_ID_READ_BKSV] = 0x0,
5895 + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8,
5896 + [HDCP_MESSAGE_ID_READ_PJ] = 0xA,
5897 +@@ -106,7 +106,8 @@ static const uint8_t hdcp_i2c_offsets[] = {
5898 + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
5899 + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
5900 + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
5901 +- [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70
5902 ++ [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70,
5903 ++ [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x0,
5904 + };
5905 +
5906 + struct protection_properties {
5907 +@@ -184,7 +185,7 @@ static const struct protection_properties hdmi_14_protection = {
5908 + .process_transaction = hdmi_14_process_transaction
5909 + };
5910 +
5911 +-static const uint32_t hdcp_dpcd_addrs[] = {
5912 ++static const uint32_t hdcp_dpcd_addrs[HDCP_MESSAGE_ID_MAX] = {
5913 + [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000,
5914 + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005,
5915 + [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF,
5916 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
5917 +index 5cc45b1cff7e7..e5893218fa4bb 100644
5918 +--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
5919 ++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
5920 +@@ -2001,6 +2001,7 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
5921 + dev_err(smu->adev->dev,
5922 + "New power limit (%d) is over the max allowed %d\n",
5923 + limit, smu->max_power_limit);
5924 ++ ret = -EINVAL;
5925 + goto out;
5926 + }
5927 +
5928 +diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
5929 +index ef91646441b16..e145cbb35baca 100644
5930 +--- a/drivers/gpu/drm/bridge/Kconfig
5931 ++++ b/drivers/gpu/drm/bridge/Kconfig
5932 +@@ -54,6 +54,7 @@ config DRM_LONTIUM_LT9611
5933 + depends on OF
5934 + select DRM_PANEL_BRIDGE
5935 + select DRM_KMS_HELPER
5936 ++ select DRM_MIPI_DSI
5937 + select REGMAP_I2C
5938 + help
5939 + Driver for Lontium LT9611 DSI to HDMI bridge
5940 +@@ -138,6 +139,7 @@ config DRM_SII902X
5941 + tristate "Silicon Image sii902x RGB/HDMI bridge"
5942 + depends on OF
5943 + select DRM_KMS_HELPER
5944 ++ select DRM_MIPI_DSI
5945 + select REGMAP_I2C
5946 + select I2C_MUX
5947 + select SND_SOC_HDMI_CODEC if SND_SOC
5948 +@@ -187,6 +189,7 @@ config DRM_TOSHIBA_TC358767
5949 + tristate "Toshiba TC358767 eDP bridge"
5950 + depends on OF
5951 + select DRM_KMS_HELPER
5952 ++ select DRM_MIPI_DSI
5953 + select REGMAP_I2C
5954 + select DRM_PANEL
5955 + help
5956 +diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
5957 +index 0ddc37551194e..c916f4b8907ef 100644
5958 +--- a/drivers/gpu/drm/bridge/panel.c
5959 ++++ b/drivers/gpu/drm/bridge/panel.c
5960 +@@ -87,6 +87,18 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
5961 +
5962 + static void panel_bridge_detach(struct drm_bridge *bridge)
5963 + {
5964 ++ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
5965 ++ struct drm_connector *connector = &panel_bridge->connector;
5966 ++
5967 ++ /*
5968 ++ * Cleanup the connector if we know it was initialized.
5969 ++ *
5970 ++ * FIXME: This wouldn't be needed if the panel_bridge structure was
5971 ++ * allocated with drmm_kzalloc(). This might be tricky since the
5972 ++ * drm_device pointer can only be retrieved when the bridge is attached.
5973 ++ */
5974 ++ if (connector->dev)
5975 ++ drm_connector_cleanup(connector);
5976 + }
5977 +
5978 + static void panel_bridge_pre_enable(struct drm_bridge *bridge)
5979 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
5980 +index 9cf35dab25273..a08cc6b53bc2f 100644
5981 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
5982 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
5983 +@@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
5984 +
5985 + req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
5986 + drm_dp_encode_sideband_req(&req, msg);
5987 ++ msg->path_msg = true;
5988 + }
5989 +
5990 + static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
5991 +@@ -2824,15 +2825,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
5992 +
5993 + req_type = txmsg->msg[0] & 0x7f;
5994 + if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
5995 +- req_type == DP_RESOURCE_STATUS_NOTIFY)
5996 ++ req_type == DP_RESOURCE_STATUS_NOTIFY ||
5997 ++ req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
5998 + hdr->broadcast = 1;
5999 + else
6000 + hdr->broadcast = 0;
6001 + hdr->path_msg = txmsg->path_msg;
6002 +- hdr->lct = mstb->lct;
6003 +- hdr->lcr = mstb->lct - 1;
6004 +- if (mstb->lct > 1)
6005 +- memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
6006 ++ if (hdr->broadcast) {
6007 ++ hdr->lct = 1;
6008 ++ hdr->lcr = 6;
6009 ++ } else {
6010 ++ hdr->lct = mstb->lct;
6011 ++ hdr->lcr = mstb->lct - 1;
6012 ++ }
6013 ++
6014 ++ memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
6015 +
6016 + return 0;
6017 + }
6018 +diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
6019 +index d6017726cc2a0..e5432dcf69996 100644
6020 +--- a/drivers/gpu/drm/drm_probe_helper.c
6021 ++++ b/drivers/gpu/drm/drm_probe_helper.c
6022 +@@ -623,6 +623,7 @@ static void output_poll_execute(struct work_struct *work)
6023 + struct drm_connector_list_iter conn_iter;
6024 + enum drm_connector_status old_status;
6025 + bool repoll = false, changed;
6026 ++ u64 old_epoch_counter;
6027 +
6028 + if (!dev->mode_config.poll_enabled)
6029 + return;
6030 +@@ -659,8 +660,9 @@ static void output_poll_execute(struct work_struct *work)
6031 +
6032 + repoll = true;
6033 +
6034 ++ old_epoch_counter = connector->epoch_counter;
6035 + connector->status = drm_helper_probe_detect(connector, NULL, false);
6036 +- if (old_status != connector->status) {
6037 ++ if (old_epoch_counter != connector->epoch_counter) {
6038 + const char *old, *new;
6039 +
6040 + /*
6041 +@@ -689,6 +691,9 @@ static void output_poll_execute(struct work_struct *work)
6042 + connector->base.id,
6043 + connector->name,
6044 + old, new);
6045 ++ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] epoch counter %llu -> %llu\n",
6046 ++ connector->base.id, connector->name,
6047 ++ old_epoch_counter, connector->epoch_counter);
6048 +
6049 + changed = true;
6050 + }
6051 +diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
6052 +index d7898e87791fe..234650230701d 100644
6053 +--- a/drivers/gpu/drm/i915/gvt/display.c
6054 ++++ b/drivers/gpu/drm/i915/gvt/display.c
6055 +@@ -173,21 +173,176 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
6056 + int pipe;
6057 +
6058 + if (IS_BROXTON(dev_priv)) {
6059 ++ enum transcoder trans;
6060 ++ enum port port;
6061 ++
6062 ++ /* Clear PIPE, DDI, PHY, HPD before setting new */
6063 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~(BXT_DE_PORT_HP_DDIA |
6064 + BXT_DE_PORT_HP_DDIB |
6065 + BXT_DE_PORT_HP_DDIC);
6066 +
6067 ++ for_each_pipe(dev_priv, pipe) {
6068 ++ vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
6069 ++ ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE);
6070 ++ vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
6071 ++ vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
6072 ++ vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
6073 ++ vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
6074 ++ }
6075 ++
6076 ++ for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
6077 ++ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &=
6078 ++ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
6079 ++ TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
6080 ++ }
6081 ++ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
6082 ++ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
6083 ++ TRANS_DDI_PORT_MASK);
6084 ++
6085 ++ for (port = PORT_A; port <= PORT_C; port++) {
6086 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &=
6087 ++ ~BXT_PHY_LANE_ENABLED;
6088 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |=
6089 ++ (BXT_PHY_CMNLANE_POWERDOWN_ACK |
6090 ++ BXT_PHY_LANE_POWERDOWN_ACK);
6091 ++
6092 ++ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &=
6093 ++ ~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
6094 ++ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
6095 ++ PORT_PLL_ENABLE);
6096 ++
6097 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &=
6098 ++ ~(DDI_INIT_DISPLAY_DETECTED |
6099 ++ DDI_BUF_CTL_ENABLE);
6100 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
6101 ++ }
6102 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
6103 ++ ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK);
6104 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
6105 ++ ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK);
6106 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
6107 ++ ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK);
6108 ++ /* No hpd_invert set in vgpu vbt, need to clear invert mask */
6109 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK;
6110 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK;
6111 ++
6112 ++ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
6113 ++ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
6114 ++ ~PHY_POWER_GOOD;
6115 ++ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
6116 ++ ~PHY_POWER_GOOD;
6117 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30);
6118 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30);
6119 ++
6120 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED;
6121 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED;
6122 ++
6123 ++ /*
6124 ++ * Only 1 PIPE enabled in current vGPU display and PIPE_A is
6125 ++ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
6126 ++ * TRANSCODER_A can be enabled. PORT_x depends on the input of
6127 ++ * setup_virtual_dp_monitor.
6128 ++ */
6129 ++ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
6130 ++ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE;
6131 ++
6132 ++ /*
6133 ++ * Golden M/N are calculated based on:
6134 ++ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
6135 ++ * DP link clk 1620 MHz and non-constant_n.
6136 ++ * TODO: calculate DP link symbol clk and stream clk m/n.
6137 ++ */
6138 ++ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
6139 ++ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
6140 ++ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
6141 ++ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
6142 ++ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
6143 ++
6144 ++ /* Enable per-DDI/PORT vreg */
6145 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
6146 ++ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1);
6147 ++ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
6148 ++ PHY_POWER_GOOD;
6149 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |=
6150 ++ BIT(30);
6151 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
6152 ++ BXT_PHY_LANE_ENABLED;
6153 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
6154 ++ ~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
6155 ++ BXT_PHY_LANE_POWERDOWN_ACK);
6156 ++ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |=
6157 ++ (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
6158 ++ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
6159 ++ PORT_PLL_ENABLE);
6160 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |=
6161 ++ (DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED);
6162 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
6163 ++ ~DDI_BUF_IS_IDLE;
6164 ++ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
6165 ++ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
6166 ++ TRANS_DDI_FUNC_ENABLE);
6167 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6168 ++ PORTA_HOTPLUG_ENABLE;
6169 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
6170 + BXT_DE_PORT_HP_DDIA;
6171 + }
6172 +
6173 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
6174 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
6175 ++ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
6176 ++ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
6177 ++ PHY_POWER_GOOD;
6178 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
6179 ++ BIT(30);
6180 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
6181 ++ BXT_PHY_LANE_ENABLED;
6182 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
6183 ++ ~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
6184 ++ BXT_PHY_LANE_POWERDOWN_ACK);
6185 ++ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |=
6186 ++ (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
6187 ++ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
6188 ++ PORT_PLL_ENABLE);
6189 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |=
6190 ++ DDI_BUF_CTL_ENABLE;
6191 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
6192 ++ ~DDI_BUF_IS_IDLE;
6193 ++ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
6194 ++ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
6195 ++ (PORT_B << TRANS_DDI_PORT_SHIFT) |
6196 ++ TRANS_DDI_FUNC_ENABLE);
6197 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6198 ++ PORTB_HOTPLUG_ENABLE;
6199 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
6200 + BXT_DE_PORT_HP_DDIB;
6201 + }
6202 +
6203 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
6204 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
6205 ++ vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
6206 ++ vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
6207 ++ PHY_POWER_GOOD;
6208 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
6209 ++ BIT(30);
6210 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
6211 ++ BXT_PHY_LANE_ENABLED;
6212 ++ vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
6213 ++ ~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
6214 ++ BXT_PHY_LANE_POWERDOWN_ACK);
6215 ++ vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |=
6216 ++ (PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
6217 ++ PORT_PLL_REF_SEL | PORT_PLL_LOCK |
6218 ++ PORT_PLL_ENABLE);
6219 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |=
6220 ++ DDI_BUF_CTL_ENABLE;
6221 ++ vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
6222 ++ ~DDI_BUF_IS_IDLE;
6223 ++ vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
6224 ++ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
6225 ++ (PORT_B << TRANS_DDI_PORT_SHIFT) |
6226 ++ TRANS_DDI_FUNC_ENABLE);
6227 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6228 ++ PORTC_HOTPLUG_ENABLE;
6229 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
6230 + BXT_DE_PORT_HP_DDIC;
6231 + }
6232 +@@ -519,6 +674,63 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
6233 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6234 + PORTD_HOTPLUG_STATUS_MASK;
6235 + intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
6236 ++ } else if (IS_BROXTON(i915)) {
6237 ++ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
6238 ++ if (connected) {
6239 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
6240 ++ BXT_DE_PORT_HP_DDIA;
6241 ++ } else {
6242 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
6243 ++ ~BXT_DE_PORT_HP_DDIA;
6244 ++ }
6245 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
6246 ++ BXT_DE_PORT_HP_DDIA;
6247 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
6248 ++ ~PORTA_HOTPLUG_STATUS_MASK;
6249 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6250 ++ PORTA_HOTPLUG_LONG_DETECT;
6251 ++ intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG);
6252 ++ }
6253 ++ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
6254 ++ if (connected) {
6255 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
6256 ++ BXT_DE_PORT_HP_DDIB;
6257 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
6258 ++ SFUSE_STRAP_DDIB_DETECTED;
6259 ++ } else {
6260 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
6261 ++ ~BXT_DE_PORT_HP_DDIB;
6262 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
6263 ++ ~SFUSE_STRAP_DDIB_DETECTED;
6264 ++ }
6265 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
6266 ++ BXT_DE_PORT_HP_DDIB;
6267 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
6268 ++ ~PORTB_HOTPLUG_STATUS_MASK;
6269 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6270 ++ PORTB_HOTPLUG_LONG_DETECT;
6271 ++ intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
6272 ++ }
6273 ++ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
6274 ++ if (connected) {
6275 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
6276 ++ BXT_DE_PORT_HP_DDIC;
6277 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
6278 ++ SFUSE_STRAP_DDIC_DETECTED;
6279 ++ } else {
6280 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
6281 ++ ~BXT_DE_PORT_HP_DDIC;
6282 ++ vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
6283 ++ ~SFUSE_STRAP_DDIC_DETECTED;
6284 ++ }
6285 ++ vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |=
6286 ++ BXT_DE_PORT_HP_DDIC;
6287 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &=
6288 ++ ~PORTC_HOTPLUG_STATUS_MASK;
6289 ++ vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
6290 ++ PORTC_HOTPLUG_LONG_DETECT;
6291 ++ intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG);
6292 ++ }
6293 + }
6294 + }
6295 +
6296 +diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
6297 +index c7c5612378832..5c9ef8e58a087 100644
6298 +--- a/drivers/gpu/drm/i915/gvt/gvt.c
6299 ++++ b/drivers/gpu/drm/i915/gvt/gvt.c
6300 +@@ -126,7 +126,7 @@ static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups
6301 + return true;
6302 + }
6303 +
6304 +-static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
6305 ++static int intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
6306 + {
6307 + int i, j;
6308 + struct intel_vgpu_type *type;
6309 +@@ -144,7 +144,7 @@ static bool intel_gvt_init_vgpu_type_groups(struct intel_gvt *gvt)
6310 + gvt_vgpu_type_groups[i] = group;
6311 + }
6312 +
6313 +- return true;
6314 ++ return 0;
6315 +
6316 + unwind:
6317 + for (j = 0; j < i; j++) {
6318 +@@ -152,7 +152,7 @@ unwind:
6319 + kfree(group);
6320 + }
6321 +
6322 +- return false;
6323 ++ return -ENOMEM;
6324 + }
6325 +
6326 + static void intel_gvt_cleanup_vgpu_type_groups(struct intel_gvt *gvt)
6327 +@@ -360,7 +360,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
6328 + goto out_clean_thread;
6329 +
6330 + ret = intel_gvt_init_vgpu_type_groups(gvt);
6331 +- if (ret == false) {
6332 ++ if (ret) {
6333 + gvt_err("failed to init vgpu type groups: %d\n", ret);
6334 + goto out_clean_types;
6335 + }
6336 +diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
6337 +index b6811f6a230df..24210b1eaec58 100644
6338 +--- a/drivers/gpu/drm/i915/gvt/mmio.c
6339 ++++ b/drivers/gpu/drm/i915/gvt/mmio.c
6340 +@@ -280,6 +280,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
6341 + vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
6342 + BXT_PHY_CMNLANE_POWERDOWN_ACK |
6343 + BXT_PHY_LANE_POWERDOWN_ACK;
6344 ++ vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
6345 ++ SKL_FUSE_DOWNLOAD_STATUS |
6346 ++ SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
6347 ++ SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
6348 ++ SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
6349 + }
6350 + } else {
6351 + #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
6352 +diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
6353 +index 399582aeeefb9..821b6c3ff88b2 100644
6354 +--- a/drivers/gpu/drm/i915/gvt/vgpu.c
6355 ++++ b/drivers/gpu/drm/i915/gvt/vgpu.c
6356 +@@ -437,10 +437,9 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
6357 + if (ret)
6358 + goto out_clean_sched_policy;
6359 +
6360 +- if (IS_BROADWELL(dev_priv))
6361 ++ if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv))
6362 + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B);
6363 +- /* FixMe: Re-enable APL/BXT once vfio_edid enabled */
6364 +- else if (!IS_BROXTON(dev_priv))
6365 ++ else
6366 + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
6367 + if (ret)
6368 + goto out_clean_sched_policy;
6369 +diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
6370 +index 2314c81229920..b3fd3501c4127 100644
6371 +--- a/drivers/gpu/drm/mcde/mcde_dsi.c
6372 ++++ b/drivers/gpu/drm/mcde/mcde_dsi.c
6373 +@@ -760,7 +760,7 @@ static void mcde_dsi_start(struct mcde_dsi *d)
6374 + DSI_MCTL_MAIN_DATA_CTL_BTA_EN |
6375 + DSI_MCTL_MAIN_DATA_CTL_READ_EN |
6376 + DSI_MCTL_MAIN_DATA_CTL_REG_TE_EN;
6377 +- if (d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
6378 ++ if (!(d->mdsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET))
6379 + val |= DSI_MCTL_MAIN_DATA_CTL_HOST_EOT_GEN;
6380 + writel(val, d->regs + DSI_MCTL_MAIN_DATA_CTL);
6381 +
6382 +diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
6383 +index b9a0e56f33e24..ef70140c5b09d 100644
6384 +--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
6385 ++++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
6386 +@@ -898,8 +898,7 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
6387 + */
6388 + dsi->hs_rate = 349440000;
6389 + dsi->lp_rate = 9600000;
6390 +- dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
6391 +- MIPI_DSI_MODE_EOT_PACKET;
6392 ++ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
6393 +
6394 + /*
6395 + * Every new incarnation of this display must have a unique
6396 +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
6397 +index 4aac0d1573dd0..70560cac53a99 100644
6398 +--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
6399 ++++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
6400 +@@ -184,9 +184,7 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
6401 + * As we only send commands we do not need to be continuously
6402 + * clocked.
6403 + */
6404 +- dsi->mode_flags =
6405 +- MIPI_DSI_CLOCK_NON_CONTINUOUS |
6406 +- MIPI_DSI_MODE_EOT_PACKET;
6407 ++ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
6408 +
6409 + s6->supply = devm_regulator_get(dev, "vdd1");
6410 + if (IS_ERR(s6->supply))
6411 +diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
6412 +index eec74c10dddaf..9c3563c61e8cc 100644
6413 +--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
6414 ++++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0-dsi.c
6415 +@@ -97,7 +97,6 @@ static int s6e63m0_dsi_probe(struct mipi_dsi_device *dsi)
6416 + dsi->hs_rate = 349440000;
6417 + dsi->lp_rate = 9600000;
6418 + dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
6419 +- MIPI_DSI_MODE_EOT_PACKET |
6420 + MIPI_DSI_MODE_VIDEO_BURST;
6421 +
6422 + ret = s6e63m0_probe(dev, s6e63m0_dsi_dcs_read, s6e63m0_dsi_dcs_write,
6423 +diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
6424 +index 065efae213f5b..95659a4d15e97 100644
6425 +--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
6426 ++++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
6427 +@@ -449,8 +449,7 @@ static int acx424akp_probe(struct mipi_dsi_device *dsi)
6428 + MIPI_DSI_MODE_VIDEO_BURST;
6429 + else
6430 + dsi->mode_flags =
6431 +- MIPI_DSI_CLOCK_NON_CONTINUOUS |
6432 +- MIPI_DSI_MODE_EOT_PACKET;
6433 ++ MIPI_DSI_CLOCK_NON_CONTINUOUS;
6434 +
6435 + acx->supply = devm_regulator_get(dev, "vddi");
6436 + if (IS_ERR(acx->supply))
6437 +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
6438 +index be8d68fb0e11e..1986862163178 100644
6439 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
6440 ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
6441 +@@ -495,8 +495,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
6442 + }
6443 + bo->base.pages = pages;
6444 + bo->base.pages_use_count = 1;
6445 +- } else
6446 ++ } else {
6447 + pages = bo->base.pages;
6448 ++ if (pages[page_offset]) {
6449 ++ /* Pages are already mapped, bail out. */
6450 ++ mutex_unlock(&bo->base.pages_lock);
6451 ++ goto out;
6452 ++ }
6453 ++ }
6454 +
6455 + mapping = bo->base.base.filp->f_mapping;
6456 + mapping_set_unevictable(mapping);
6457 +@@ -529,6 +535,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
6458 +
6459 + dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
6460 +
6461 ++out:
6462 + panfrost_gem_mapping_put(bomapping);
6463 +
6464 + return 0;
6465 +@@ -600,6 +607,8 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
6466 + access_type = (fault_status >> 8) & 0x3;
6467 + source_id = (fault_status >> 16);
6468 +
6469 ++ mmu_write(pfdev, MMU_INT_CLEAR, mask);
6470 ++
6471 + /* Page fault only */
6472 + ret = -1;
6473 + if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
6474 +@@ -623,8 +632,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
6475 + access_type, access_type_name(pfdev, fault_status),
6476 + source_id);
6477 +
6478 +- mmu_write(pfdev, MMU_INT_CLEAR, mask);
6479 +-
6480 + status &= ~mask;
6481 + }
6482 +
6483 +diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
6484 +index 54e3c3a974407..741cc983daf1c 100644
6485 +--- a/drivers/gpu/drm/qxl/qxl_cmd.c
6486 ++++ b/drivers/gpu/drm/qxl/qxl_cmd.c
6487 +@@ -268,7 +268,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
6488 + int ret;
6489 +
6490 + ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
6491 +- false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
6492 ++ false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
6493 + if (ret) {
6494 + DRM_ERROR("failed to allocate VRAM BO\n");
6495 + return ret;
6496 +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
6497 +index 1f0802f5d84ef..f22a1b776f4ba 100644
6498 +--- a/drivers/gpu/drm/qxl/qxl_display.c
6499 ++++ b/drivers/gpu/drm/qxl/qxl_display.c
6500 +@@ -791,8 +791,8 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
6501 + qdev->dumb_shadow_bo = NULL;
6502 + }
6503 + qxl_bo_create(qdev, surf.height * surf.stride,
6504 +- true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
6505 +- &qdev->dumb_shadow_bo);
6506 ++ true, true, QXL_GEM_DOMAIN_SURFACE, 0,
6507 ++ &surf, &qdev->dumb_shadow_bo);
6508 + }
6509 + if (user_bo->shadow != qdev->dumb_shadow_bo) {
6510 + if (user_bo->shadow) {
6511 +diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
6512 +index 41cdf9d1e59dc..6e7f16f4cec79 100644
6513 +--- a/drivers/gpu/drm/qxl/qxl_drv.c
6514 ++++ b/drivers/gpu/drm/qxl/qxl_drv.c
6515 +@@ -144,8 +144,6 @@ static void qxl_drm_release(struct drm_device *dev)
6516 + * reodering qxl_modeset_fini() + qxl_device_fini() calls is
6517 + * non-trivial though.
6518 + */
6519 +- if (!dev->registered)
6520 +- return;
6521 + qxl_modeset_fini(qdev);
6522 + qxl_device_fini(qdev);
6523 + }
6524 +diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
6525 +index 48e096285b4c6..a08da0bd9098b 100644
6526 +--- a/drivers/gpu/drm/qxl/qxl_gem.c
6527 ++++ b/drivers/gpu/drm/qxl/qxl_gem.c
6528 +@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
6529 + /* At least align on page size */
6530 + if (alignment < PAGE_SIZE)
6531 + alignment = PAGE_SIZE;
6532 +- r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
6533 ++ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
6534 + if (r) {
6535 + if (r != -ERESTARTSYS)
6536 + DRM_ERROR(
6537 +diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
6538 +index 2bc364412e8b8..544a9e4df2a86 100644
6539 +--- a/drivers/gpu/drm/qxl/qxl_object.c
6540 ++++ b/drivers/gpu/drm/qxl/qxl_object.c
6541 +@@ -103,8 +103,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
6542 + .print_info = drm_gem_ttm_print_info,
6543 + };
6544 +
6545 +-int qxl_bo_create(struct qxl_device *qdev,
6546 +- unsigned long size, bool kernel, bool pinned, u32 domain,
6547 ++int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
6548 ++ bool kernel, bool pinned, u32 domain, u32 priority,
6549 + struct qxl_surface *surf,
6550 + struct qxl_bo **bo_ptr)
6551 + {
6552 +@@ -137,6 +137,7 @@ int qxl_bo_create(struct qxl_device *qdev,
6553 +
6554 + qxl_ttm_placement_from_domain(bo, domain, pinned);
6555 +
6556 ++ bo->tbo.priority = priority;
6557 + r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
6558 + &bo->placement, 0, !kernel, size,
6559 + NULL, NULL, &qxl_ttm_bo_destroy);
6560 +diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
6561 +index 6b434e5ef795a..5762ea40d047c 100644
6562 +--- a/drivers/gpu/drm/qxl/qxl_object.h
6563 ++++ b/drivers/gpu/drm/qxl/qxl_object.h
6564 +@@ -84,6 +84,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
6565 + extern int qxl_bo_create(struct qxl_device *qdev,
6566 + unsigned long size,
6567 + bool kernel, bool pinned, u32 domain,
6568 ++ u32 priority,
6569 + struct qxl_surface *surf,
6570 + struct qxl_bo **bo_ptr);
6571 + extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
6572 +diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
6573 +index 4fae3e393da14..b2a475a0ca4aa 100644
6574 +--- a/drivers/gpu/drm/qxl/qxl_release.c
6575 ++++ b/drivers/gpu/drm/qxl/qxl_release.c
6576 +@@ -199,11 +199,12 @@ qxl_release_free(struct qxl_device *qdev,
6577 + }
6578 +
6579 + static int qxl_release_bo_alloc(struct qxl_device *qdev,
6580 +- struct qxl_bo **bo)
6581 ++ struct qxl_bo **bo,
6582 ++ u32 priority)
6583 + {
6584 + /* pin releases bo's they are too messy to evict */
6585 + return qxl_bo_create(qdev, PAGE_SIZE, false, true,
6586 +- QXL_GEM_DOMAIN_VRAM, NULL, bo);
6587 ++ QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
6588 + }
6589 +
6590 + int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
6591 +@@ -326,13 +327,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
6592 + int ret = 0;
6593 + union qxl_release_info *info;
6594 + int cur_idx;
6595 ++ u32 priority;
6596 +
6597 +- if (type == QXL_RELEASE_DRAWABLE)
6598 ++ if (type == QXL_RELEASE_DRAWABLE) {
6599 + cur_idx = 0;
6600 +- else if (type == QXL_RELEASE_SURFACE_CMD)
6601 ++ priority = 0;
6602 ++ } else if (type == QXL_RELEASE_SURFACE_CMD) {
6603 + cur_idx = 1;
6604 +- else if (type == QXL_RELEASE_CURSOR_CMD)
6605 ++ priority = 1;
6606 ++ } else if (type == QXL_RELEASE_CURSOR_CMD) {
6607 + cur_idx = 2;
6608 ++ priority = 1;
6609 ++ }
6610 + else {
6611 + DRM_ERROR("got illegal type: %d\n", type);
6612 + return -EINVAL;
6613 +@@ -352,7 +358,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
6614 + qdev->current_release_bo[cur_idx] = NULL;
6615 + }
6616 + if (!qdev->current_release_bo[cur_idx]) {
6617 +- ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
6618 ++ ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
6619 + if (ret) {
6620 + mutex_unlock(&qdev->release_mutex);
6621 + qxl_release_free(qdev, *release);
6622 +diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
6623 +index 008308780443c..9bd6c06975385 100644
6624 +--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
6625 ++++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
6626 +@@ -242,6 +242,9 @@ radeon_dp_mst_detect(struct drm_connector *connector,
6627 + to_radeon_connector(connector);
6628 + struct radeon_connector *master = radeon_connector->mst_port;
6629 +
6630 ++ if (drm_connector_is_unregistered(connector))
6631 ++ return connector_status_disconnected;
6632 ++
6633 + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr,
6634 + radeon_connector->port);
6635 + }
6636 +diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
6637 +index 99ee60f8b604d..8c0a572940e82 100644
6638 +--- a/drivers/gpu/drm/radeon/radeon_kms.c
6639 ++++ b/drivers/gpu/drm/radeon/radeon_kms.c
6640 +@@ -512,6 +512,7 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
6641 + *value = rdev->config.si.backend_enable_mask;
6642 + } else {
6643 + DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
6644 ++ return -EINVAL;
6645 + }
6646 + break;
6647 + case RADEON_INFO_MAX_SCLK:
6648 +diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
6649 +index 6e28f707092f0..62488ac149238 100644
6650 +--- a/drivers/gpu/drm/stm/ltdc.c
6651 ++++ b/drivers/gpu/drm/stm/ltdc.c
6652 +@@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
6653 + {
6654 + struct ltdc_device *ldev = crtc_to_ltdc(crtc);
6655 + struct drm_device *ddev = crtc->dev;
6656 ++ struct drm_connector_list_iter iter;
6657 ++ struct drm_connector *connector = NULL;
6658 ++ struct drm_encoder *encoder = NULL;
6659 ++ struct drm_bridge *bridge = NULL;
6660 + struct drm_display_mode *mode = &crtc->state->adjusted_mode;
6661 + struct videomode vm;
6662 + u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
6663 + u32 total_width, total_height;
6664 ++ u32 bus_flags = 0;
6665 + u32 val;
6666 + int ret;
6667 +
6668 ++ /* get encoder from crtc */
6669 ++ drm_for_each_encoder(encoder, ddev)
6670 ++ if (encoder->crtc == crtc)
6671 ++ break;
6672 ++
6673 ++ if (encoder) {
6674 ++ /* get bridge from encoder */
6675 ++ list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
6676 ++ if (bridge->encoder == encoder)
6677 ++ break;
6678 ++
6679 ++ /* Get the connector from encoder */
6680 ++ drm_connector_list_iter_begin(ddev, &iter);
6681 ++ drm_for_each_connector_iter(connector, &iter)
6682 ++ if (connector->encoder == encoder)
6683 ++ break;
6684 ++ drm_connector_list_iter_end(&iter);
6685 ++ }
6686 ++
6687 ++ if (bridge && bridge->timings)
6688 ++ bus_flags = bridge->timings->input_bus_flags;
6689 ++ else if (connector)
6690 ++ bus_flags = connector->display_info.bus_flags;
6691 ++
6692 + if (!pm_runtime_active(ddev->dev)) {
6693 + ret = pm_runtime_get_sync(ddev->dev);
6694 + if (ret) {
6695 +@@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
6696 + if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
6697 + val |= GCR_VSPOL;
6698 +
6699 +- if (vm.flags & DISPLAY_FLAGS_DE_LOW)
6700 ++ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
6701 + val |= GCR_DEPOL;
6702 +
6703 +- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
6704 ++ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
6705 + val |= GCR_PCPOL;
6706 +
6707 + reg_update_bits(ldev->regs, LTDC_GCR,
6708 +diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
6709 +index 518220bd092a6..0aaa4a26b5db5 100644
6710 +--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
6711 ++++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
6712 +@@ -518,6 +518,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
6713 +
6714 + drm_crtc_vblank_off(crtc);
6715 +
6716 ++ spin_lock_irq(&crtc->dev->event_lock);
6717 ++
6718 ++ if (crtc->state->event) {
6719 ++ drm_crtc_send_vblank_event(crtc, crtc->state->event);
6720 ++ crtc->state->event = NULL;
6721 ++ }
6722 ++
6723 ++ spin_unlock_irq(&crtc->dev->event_lock);
6724 ++
6725 + tilcdc_crtc_disable_irqs(dev);
6726 +
6727 + pm_runtime_put_sync(dev->dev);
6728 +diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
6729 +index 99158ee67d02b..59d1fb017da01 100644
6730 +--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
6731 ++++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
6732 +@@ -866,7 +866,7 @@ static int zynqmp_dp_train(struct zynqmp_dp *dp)
6733 + return ret;
6734 +
6735 + zynqmp_dp_write(dp, ZYNQMP_DP_SCRAMBLING_DISABLE, 1);
6736 +- memset(dp->train_set, 0, 4);
6737 ++ memset(dp->train_set, 0, sizeof(dp->train_set));
6738 + ret = zynqmp_dp_link_train_cr(dp);
6739 + if (ret)
6740 + return ret;
6741 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
6742 +index b93ce0d475e09..e220a05a05b48 100644
6743 +--- a/drivers/hid/hid-ids.h
6744 ++++ b/drivers/hid/hid-ids.h
6745 +@@ -938,6 +938,7 @@
6746 + #define USB_DEVICE_ID_ORTEK_IHOME_IMAC_A210S 0x8003
6747 +
6748 + #define USB_VENDOR_ID_PLANTRONICS 0x047f
6749 ++#define USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES 0xc056
6750 +
6751 + #define USB_VENDOR_ID_PANASONIC 0x04da
6752 + #define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
6753 +diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
6754 +index c6c8e20f3e8d5..0ff03fed97709 100644
6755 +--- a/drivers/hid/hid-lenovo.c
6756 ++++ b/drivers/hid/hid-lenovo.c
6757 +@@ -33,6 +33,9 @@
6758 +
6759 + #include "hid-ids.h"
6760 +
6761 ++/* Userspace expects F20 for mic-mute KEY_MICMUTE does not work */
6762 ++#define LENOVO_KEY_MICMUTE KEY_F20
6763 ++
6764 + struct lenovo_drvdata {
6765 + u8 led_report[3]; /* Must be first for proper alignment */
6766 + int led_state;
6767 +@@ -62,8 +65,8 @@ struct lenovo_drvdata {
6768 + #define TP10UBKBD_LED_OFF 1
6769 + #define TP10UBKBD_LED_ON 2
6770 +
6771 +-static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
6772 +- enum led_brightness value)
6773 ++static int lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
6774 ++ enum led_brightness value)
6775 + {
6776 + struct lenovo_drvdata *data = hid_get_drvdata(hdev);
6777 + int ret;
6778 +@@ -75,10 +78,18 @@ static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code,
6779 + data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF;
6780 + ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3,
6781 + HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
6782 +- if (ret)
6783 +- hid_err(hdev, "Set LED output report error: %d\n", ret);
6784 ++ if (ret != 3) {
6785 ++ if (ret != -ENODEV)
6786 ++ hid_err(hdev, "Set LED output report error: %d\n", ret);
6787 ++
6788 ++ ret = ret < 0 ? ret : -EIO;
6789 ++ } else {
6790 ++ ret = 0;
6791 ++ }
6792 +
6793 + mutex_unlock(&data->led_report_mutex);
6794 ++
6795 ++ return ret;
6796 + }
6797 +
6798 + static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work)
6799 +@@ -126,7 +137,7 @@ static int lenovo_input_mapping_tpkbd(struct hid_device *hdev,
6800 + if (usage->hid == (HID_UP_BUTTON | 0x0010)) {
6801 + /* This sub-device contains trackpoint, mark it */
6802 + hid_set_drvdata(hdev, (void *)1);
6803 +- map_key_clear(KEY_MICMUTE);
6804 ++ map_key_clear(LENOVO_KEY_MICMUTE);
6805 + return 1;
6806 + }
6807 + return 0;
6808 +@@ -141,7 +152,7 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
6809 + (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
6810 + switch (usage->hid & HID_USAGE) {
6811 + case 0x00f1: /* Fn-F4: Mic mute */
6812 +- map_key_clear(KEY_MICMUTE);
6813 ++ map_key_clear(LENOVO_KEY_MICMUTE);
6814 + return 1;
6815 + case 0x00f2: /* Fn-F5: Brightness down */
6816 + map_key_clear(KEY_BRIGHTNESSDOWN);
6817 +@@ -231,7 +242,7 @@ static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev,
6818 + map_key_clear(KEY_FN_ESC);
6819 + return 1;
6820 + case 9: /* Fn-F4: Mic mute */
6821 +- map_key_clear(KEY_MICMUTE);
6822 ++ map_key_clear(LENOVO_KEY_MICMUTE);
6823 + return 1;
6824 + case 10: /* Fn-F7: Control panel */
6825 + map_key_clear(KEY_CONFIG);
6826 +@@ -349,7 +360,7 @@ static ssize_t attr_fn_lock_store(struct device *dev,
6827 + {
6828 + struct hid_device *hdev = to_hid_device(dev);
6829 + struct lenovo_drvdata *data = hid_get_drvdata(hdev);
6830 +- int value;
6831 ++ int value, ret;
6832 +
6833 + if (kstrtoint(buf, 10, &value))
6834 + return -EINVAL;
6835 +@@ -364,7 +375,9 @@ static ssize_t attr_fn_lock_store(struct device *dev,
6836 + lenovo_features_set_cptkbd(hdev);
6837 + break;
6838 + case USB_DEVICE_ID_LENOVO_TP10UBKBD:
6839 +- lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
6840 ++ ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value);
6841 ++ if (ret)
6842 ++ return ret;
6843 + break;
6844 + }
6845 +
6846 +@@ -498,6 +511,9 @@ static int lenovo_event_cptkbd(struct hid_device *hdev,
6847 + static int lenovo_event(struct hid_device *hdev, struct hid_field *field,
6848 + struct hid_usage *usage, __s32 value)
6849 + {
6850 ++ if (!hid_get_drvdata(hdev))
6851 ++ return 0;
6852 ++
6853 + switch (hdev->product) {
6854 + case USB_DEVICE_ID_LENOVO_CUSBKBD:
6855 + case USB_DEVICE_ID_LENOVO_CBTKBD:
6856 +@@ -777,7 +793,7 @@ static enum led_brightness lenovo_led_brightness_get(
6857 + : LED_OFF;
6858 + }
6859 +
6860 +-static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
6861 ++static int lenovo_led_brightness_set(struct led_classdev *led_cdev,
6862 + enum led_brightness value)
6863 + {
6864 + struct device *dev = led_cdev->dev->parent;
6865 +@@ -785,6 +801,7 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
6866 + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev);
6867 + u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED };
6868 + int led_nr = 0;
6869 ++ int ret = 0;
6870 +
6871 + if (led_cdev == &data_pointer->led_micmute)
6872 + led_nr = 1;
6873 +@@ -799,9 +816,11 @@ static void lenovo_led_brightness_set(struct led_classdev *led_cdev,
6874 + lenovo_led_set_tpkbd(hdev);
6875 + break;
6876 + case USB_DEVICE_ID_LENOVO_TP10UBKBD:
6877 +- lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
6878 ++ ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value);
6879 + break;
6880 + }
6881 ++
6882 ++ return ret;
6883 + }
6884 +
6885 + static int lenovo_register_leds(struct hid_device *hdev)
6886 +@@ -822,7 +841,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
6887 +
6888 + data->led_mute.name = name_mute;
6889 + data->led_mute.brightness_get = lenovo_led_brightness_get;
6890 +- data->led_mute.brightness_set = lenovo_led_brightness_set;
6891 ++ data->led_mute.brightness_set_blocking = lenovo_led_brightness_set;
6892 ++ data->led_mute.flags = LED_HW_PLUGGABLE;
6893 + data->led_mute.dev = &hdev->dev;
6894 + ret = led_classdev_register(&hdev->dev, &data->led_mute);
6895 + if (ret < 0)
6896 +@@ -830,7 +850,8 @@ static int lenovo_register_leds(struct hid_device *hdev)
6897 +
6898 + data->led_micmute.name = name_micm;
6899 + data->led_micmute.brightness_get = lenovo_led_brightness_get;
6900 +- data->led_micmute.brightness_set = lenovo_led_brightness_set;
6901 ++ data->led_micmute.brightness_set_blocking = lenovo_led_brightness_set;
6902 ++ data->led_micmute.flags = LED_HW_PLUGGABLE;
6903 + data->led_micmute.dev = &hdev->dev;
6904 + ret = led_classdev_register(&hdev->dev, &data->led_micmute);
6905 + if (ret < 0) {
6906 +diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
6907 +index 85b685efc12f3..e81b7cec2d124 100644
6908 +--- a/drivers/hid/hid-plantronics.c
6909 ++++ b/drivers/hid/hid-plantronics.c
6910 +@@ -13,6 +13,7 @@
6911 +
6912 + #include <linux/hid.h>
6913 + #include <linux/module.h>
6914 ++#include <linux/jiffies.h>
6915 +
6916 + #define PLT_HID_1_0_PAGE 0xffa00000
6917 + #define PLT_HID_2_0_PAGE 0xffa20000
6918 +@@ -36,6 +37,16 @@
6919 + #define PLT_ALLOW_CONSUMER (field->application == HID_CP_CONSUMERCONTROL && \
6920 + (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER)
6921 +
6922 ++#define PLT_QUIRK_DOUBLE_VOLUME_KEYS BIT(0)
6923 ++
6924 ++#define PLT_DOUBLE_KEY_TIMEOUT 5 /* ms */
6925 ++
6926 ++struct plt_drv_data {
6927 ++ unsigned long device_type;
6928 ++ unsigned long last_volume_key_ts;
6929 ++ u32 quirks;
6930 ++};
6931 ++
6932 + static int plantronics_input_mapping(struct hid_device *hdev,
6933 + struct hid_input *hi,
6934 + struct hid_field *field,
6935 +@@ -43,7 +54,8 @@ static int plantronics_input_mapping(struct hid_device *hdev,
6936 + unsigned long **bit, int *max)
6937 + {
6938 + unsigned short mapped_key;
6939 +- unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev);
6940 ++ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
6941 ++ unsigned long plt_type = drv_data->device_type;
6942 +
6943 + /* special case for PTT products */
6944 + if (field->application == HID_GD_JOYSTICK)
6945 +@@ -105,6 +117,30 @@ mapped:
6946 + return 1;
6947 + }
6948 +
6949 ++static int plantronics_event(struct hid_device *hdev, struct hid_field *field,
6950 ++ struct hid_usage *usage, __s32 value)
6951 ++{
6952 ++ struct plt_drv_data *drv_data = hid_get_drvdata(hdev);
6953 ++
6954 ++ if (drv_data->quirks & PLT_QUIRK_DOUBLE_VOLUME_KEYS) {
6955 ++ unsigned long prev_ts, cur_ts;
6956 ++
6957 ++ /* Usages are filtered in plantronics_usages. */
6958 ++
6959 ++ if (!value) /* Handle key presses only. */
6960 ++ return 0;
6961 ++
6962 ++ prev_ts = drv_data->last_volume_key_ts;
6963 ++ cur_ts = jiffies;
6964 ++ if (jiffies_to_msecs(cur_ts - prev_ts) <= PLT_DOUBLE_KEY_TIMEOUT)
6965 ++ return 1; /* Ignore the repeated key. */
6966 ++
6967 ++ drv_data->last_volume_key_ts = cur_ts;
6968 ++ }
6969 ++
6970 ++ return 0;
6971 ++}
6972 ++
6973 + static unsigned long plantronics_device_type(struct hid_device *hdev)
6974 + {
6975 + unsigned i, col_page;
6976 +@@ -133,15 +169,24 @@ exit:
6977 + static int plantronics_probe(struct hid_device *hdev,
6978 + const struct hid_device_id *id)
6979 + {
6980 ++ struct plt_drv_data *drv_data;
6981 + int ret;
6982 +
6983 ++ drv_data = devm_kzalloc(&hdev->dev, sizeof(*drv_data), GFP_KERNEL);
6984 ++ if (!drv_data)
6985 ++ return -ENOMEM;
6986 ++
6987 + ret = hid_parse(hdev);
6988 + if (ret) {
6989 + hid_err(hdev, "parse failed\n");
6990 + goto err;
6991 + }
6992 +
6993 +- hid_set_drvdata(hdev, (void *)plantronics_device_type(hdev));
6994 ++ drv_data->device_type = plantronics_device_type(hdev);
6995 ++ drv_data->quirks = id->driver_data;
6996 ++ drv_data->last_volume_key_ts = jiffies - msecs_to_jiffies(PLT_DOUBLE_KEY_TIMEOUT);
6997 ++
6998 ++ hid_set_drvdata(hdev, drv_data);
6999 +
7000 + ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT |
7001 + HID_CONNECT_HIDINPUT_FORCE | HID_CONNECT_HIDDEV_FORCE);
7002 +@@ -153,15 +198,26 @@ err:
7003 + }
7004 +
7005 + static const struct hid_device_id plantronics_devices[] = {
7006 ++ { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS,
7007 ++ USB_DEVICE_ID_PLANTRONICS_BLACKWIRE_3220_SERIES),
7008 ++ .driver_data = PLT_QUIRK_DOUBLE_VOLUME_KEYS },
7009 + { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
7010 + { }
7011 + };
7012 + MODULE_DEVICE_TABLE(hid, plantronics_devices);
7013 +
7014 ++static const struct hid_usage_id plantronics_usages[] = {
7015 ++ { HID_CP_VOLUMEUP, EV_KEY, HID_ANY_ID },
7016 ++ { HID_CP_VOLUMEDOWN, EV_KEY, HID_ANY_ID },
7017 ++ { HID_TERMINATOR, HID_TERMINATOR, HID_TERMINATOR }
7018 ++};
7019 ++
7020 + static struct hid_driver plantronics_driver = {
7021 + .name = "plantronics",
7022 + .id_table = plantronics_devices,
7023 ++ .usage_table = plantronics_usages,
7024 + .input_mapping = plantronics_input_mapping,
7025 ++ .event = plantronics_event,
7026 + .probe = plantronics_probe,
7027 + };
7028 + module_hid_driver(plantronics_driver);
7029 +diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
7030 +index 47f0208aa7c37..a5f92e2889cb8 100644
7031 +--- a/drivers/hsi/hsi_core.c
7032 ++++ b/drivers/hsi/hsi_core.c
7033 +@@ -210,8 +210,6 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
7034 + if (err)
7035 + goto err;
7036 +
7037 +- dev_set_name(&cl->device, "%s", name);
7038 +-
7039 + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode);
7040 + if (err) {
7041 + err = hsi_of_property_parse_mode(client, "hsi-rx-mode",
7042 +@@ -293,6 +291,7 @@ static void hsi_add_client_from_dt(struct hsi_port *port,
7043 + cl->device.release = hsi_client_release;
7044 + cl->device.of_node = client;
7045 +
7046 ++ dev_set_name(&cl->device, "%s", name);
7047 + if (device_register(&cl->device) < 0) {
7048 + pr_err("hsi: failed to register client: %s\n", name);
7049 + put_device(&cl->device);
7050 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
7051 +index fbdda9938039a..f064fa6ef181a 100644
7052 +--- a/drivers/hv/channel.c
7053 ++++ b/drivers/hv/channel.c
7054 +@@ -583,7 +583,7 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
7055 +
7056 + if (newchannel->rescind) {
7057 + err = -ENODEV;
7058 +- goto error_free_info;
7059 ++ goto error_clean_msglist;
7060 + }
7061 +
7062 + err = vmbus_post_msg(open_msg,
7063 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
7064 +index 6be9f56cb6270..6476bfe193afd 100644
7065 +--- a/drivers/hv/channel_mgmt.c
7066 ++++ b/drivers/hv/channel_mgmt.c
7067 +@@ -725,6 +725,12 @@ static void init_vp_index(struct vmbus_channel *channel)
7068 + free_cpumask_var(available_mask);
7069 + }
7070 +
7071 ++#define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
7072 ++#define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
7073 ++#define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
7074 ++#define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
7075 ++#define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
7076 ++
7077 + static void vmbus_wait_for_unload(void)
7078 + {
7079 + int cpu;
7080 +@@ -742,12 +748,17 @@ static void vmbus_wait_for_unload(void)
7081 + * vmbus_connection.unload_event. If not, the last thing we can do is
7082 + * read message pages for all CPUs directly.
7083 + *
7084 +- * Wait no more than 10 seconds so that the panic path can't get
7085 +- * hung forever in case the response message isn't seen.
7086 ++ * Wait up to 100 seconds since an Azure host must writeback any dirty
7087 ++ * data in its disk cache before the VMbus UNLOAD request will
7088 ++ * complete. This flushing has been empirically observed to take up
7089 ++ * to 50 seconds in cases with a lot of dirty data, so allow additional
7090 ++ * leeway and for inaccuracies in mdelay(). But eventually time out so
7091 ++ * that the panic path can't get hung forever in case the response
7092 ++ * message isn't seen.
7093 + */
7094 +- for (i = 0; i < 1000; i++) {
7095 ++ for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
7096 + if (completion_done(&vmbus_connection.unload_event))
7097 +- break;
7098 ++ goto completed;
7099 +
7100 + for_each_online_cpu(cpu) {
7101 + struct hv_per_cpu_context *hv_cpu
7102 +@@ -770,9 +781,18 @@ static void vmbus_wait_for_unload(void)
7103 + vmbus_signal_eom(msg, message_type);
7104 + }
7105 +
7106 +- mdelay(10);
7107 ++ /*
7108 ++ * Give a notice periodically so someone watching the
7109 ++ * serial output won't think it is completely hung.
7110 ++ */
7111 ++ if (!(i % UNLOAD_MSG_LOOPS))
7112 ++ pr_notice("Waiting for VMBus UNLOAD to complete\n");
7113 ++
7114 ++ mdelay(UNLOAD_DELAY_UNIT_MS);
7115 + }
7116 ++ pr_err("Continuing even though VMBus UNLOAD did not complete\n");
7117 +
7118 ++completed:
7119 + /*
7120 + * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
7121 + * maybe-pending messages on all CPUs to be able to receive new
7122 +diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c
7123 +index fa5c5dd29b7ae..212433eb6cc31 100644
7124 +--- a/drivers/hwmon/pmbus/pxe1610.c
7125 ++++ b/drivers/hwmon/pmbus/pxe1610.c
7126 +@@ -41,6 +41,15 @@ static int pxe1610_identify(struct i2c_client *client,
7127 + info->vrm_version[i] = vr13;
7128 + break;
7129 + default:
7130 ++ /*
7131 ++ * If prior pages are available limit operation
7132 ++ * to them
7133 ++ */
7134 ++ if (i != 0) {
7135 ++ info->pages = i;
7136 ++ return 0;
7137 ++ }
7138 ++
7139 + return -ENODEV;
7140 + }
7141 + }
7142 +diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
7143 +index e4b7f2a951ad5..c1bbc4caeb5c9 100644
7144 +--- a/drivers/i2c/busses/i2c-cadence.c
7145 ++++ b/drivers/i2c/busses/i2c-cadence.c
7146 +@@ -789,7 +789,7 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
7147 + bool change_role = false;
7148 + #endif
7149 +
7150 +- ret = pm_runtime_get_sync(id->dev);
7151 ++ ret = pm_runtime_resume_and_get(id->dev);
7152 + if (ret < 0)
7153 + return ret;
7154 +
7155 +@@ -911,7 +911,7 @@ static int cdns_reg_slave(struct i2c_client *slave)
7156 + if (slave->flags & I2C_CLIENT_TEN)
7157 + return -EAFNOSUPPORT;
7158 +
7159 +- ret = pm_runtime_get_sync(id->dev);
7160 ++ ret = pm_runtime_resume_and_get(id->dev);
7161 + if (ret < 0)
7162 + return ret;
7163 +
7164 +@@ -1200,7 +1200,10 @@ static int cdns_i2c_probe(struct platform_device *pdev)
7165 + if (IS_ERR(id->membase))
7166 + return PTR_ERR(id->membase);
7167 +
7168 +- id->irq = platform_get_irq(pdev, 0);
7169 ++ ret = platform_get_irq(pdev, 0);
7170 ++ if (ret < 0)
7171 ++ return ret;
7172 ++ id->irq = ret;
7173 +
7174 + id->adap.owner = THIS_MODULE;
7175 + id->adap.dev.of_node = pdev->dev.of_node;
7176 +diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
7177 +index a08554c1a5704..bdff0e6345d9a 100644
7178 +--- a/drivers/i2c/busses/i2c-emev2.c
7179 ++++ b/drivers/i2c/busses/i2c-emev2.c
7180 +@@ -395,7 +395,10 @@ static int em_i2c_probe(struct platform_device *pdev)
7181 +
7182 + em_i2c_reset(&priv->adap);
7183 +
7184 +- priv->irq = platform_get_irq(pdev, 0);
7185 ++ ret = platform_get_irq(pdev, 0);
7186 ++ if (ret < 0)
7187 ++ goto err_clk;
7188 ++ priv->irq = ret;
7189 + ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
7190 + "em_i2c", priv);
7191 + if (ret)
7192 +diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
7193 +index 98a89301ed2a6..8e987945ed450 100644
7194 +--- a/drivers/i2c/busses/i2c-img-scb.c
7195 ++++ b/drivers/i2c/busses/i2c-img-scb.c
7196 +@@ -1057,7 +1057,7 @@ static int img_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
7197 + atomic = true;
7198 + }
7199 +
7200 +- ret = pm_runtime_get_sync(adap->dev.parent);
7201 ++ ret = pm_runtime_resume_and_get(adap->dev.parent);
7202 + if (ret < 0)
7203 + return ret;
7204 +
7205 +@@ -1158,7 +1158,7 @@ static int img_i2c_init(struct img_i2c *i2c)
7206 + u32 rev;
7207 + int ret;
7208 +
7209 +- ret = pm_runtime_get_sync(i2c->adap.dev.parent);
7210 ++ ret = pm_runtime_resume_and_get(i2c->adap.dev.parent);
7211 + if (ret < 0)
7212 + return ret;
7213 +
7214 +diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
7215 +index 9db6ccded5e9e..8b9ba055c4186 100644
7216 +--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
7217 ++++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
7218 +@@ -259,7 +259,7 @@ static int lpi2c_imx_master_enable(struct lpi2c_imx_struct *lpi2c_imx)
7219 + unsigned int temp;
7220 + int ret;
7221 +
7222 +- ret = pm_runtime_get_sync(lpi2c_imx->adapter.dev.parent);
7223 ++ ret = pm_runtime_resume_and_get(lpi2c_imx->adapter.dev.parent);
7224 + if (ret < 0)
7225 + return ret;
7226 +
7227 +diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
7228 +index e6f8d6e45a15a..72af4b4d13180 100644
7229 +--- a/drivers/i2c/busses/i2c-imx.c
7230 ++++ b/drivers/i2c/busses/i2c-imx.c
7231 +@@ -1036,7 +1036,7 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
7232 + struct imx_i2c_struct *i2c_imx = i2c_get_adapdata(adapter);
7233 + int result;
7234 +
7235 +- result = pm_runtime_get_sync(i2c_imx->adapter.dev.parent);
7236 ++ result = pm_runtime_resume_and_get(i2c_imx->adapter.dev.parent);
7237 + if (result < 0)
7238 + return result;
7239 +
7240 +@@ -1280,7 +1280,7 @@ static int i2c_imx_remove(struct platform_device *pdev)
7241 + struct imx_i2c_struct *i2c_imx = platform_get_drvdata(pdev);
7242 + int irq, ret;
7243 +
7244 +- ret = pm_runtime_get_sync(&pdev->dev);
7245 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
7246 + if (ret < 0)
7247 + return ret;
7248 +
7249 +diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
7250 +index 2a946c2079284..e181db3fd2cce 100644
7251 +--- a/drivers/i2c/busses/i2c-jz4780.c
7252 ++++ b/drivers/i2c/busses/i2c-jz4780.c
7253 +@@ -826,7 +826,10 @@ static int jz4780_i2c_probe(struct platform_device *pdev)
7254 +
7255 + jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
7256 +
7257 +- i2c->irq = platform_get_irq(pdev, 0);
7258 ++ ret = platform_get_irq(pdev, 0);
7259 ++ if (ret < 0)
7260 ++ goto err;
7261 ++ i2c->irq = ret;
7262 + ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
7263 + dev_name(&pdev->dev), i2c);
7264 + if (ret)
7265 +diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c
7266 +index 2fb0532d8a161..ab261d762dea3 100644
7267 +--- a/drivers/i2c/busses/i2c-mlxbf.c
7268 ++++ b/drivers/i2c/busses/i2c-mlxbf.c
7269 +@@ -2376,6 +2376,8 @@ static int mlxbf_i2c_probe(struct platform_device *pdev)
7270 + mlxbf_i2c_init_slave(pdev, priv);
7271 +
7272 + irq = platform_get_irq(pdev, 0);
7273 ++ if (irq < 0)
7274 ++ return irq;
7275 + ret = devm_request_irq(dev, irq, mlxbf_smbus_irq,
7276 + IRQF_ONESHOT | IRQF_SHARED | IRQF_PROBE_SHARED,
7277 + dev_name(dev), priv);
7278 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
7279 +index 2ffd2f354d0ae..86f70c7513192 100644
7280 +--- a/drivers/i2c/busses/i2c-mt65xx.c
7281 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
7282 +@@ -479,7 +479,7 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
7283 + {
7284 + u16 control_reg;
7285 +
7286 +- if (i2c->dev_comp->dma_sync) {
7287 ++ if (i2c->dev_comp->apdma_sync) {
7288 + writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
7289 + udelay(10);
7290 + writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
7291 +diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
7292 +index 12ac4212aded8..d4f6c6d60683a 100644
7293 +--- a/drivers/i2c/busses/i2c-omap.c
7294 ++++ b/drivers/i2c/busses/i2c-omap.c
7295 +@@ -1404,9 +1404,9 @@ omap_i2c_probe(struct platform_device *pdev)
7296 + pm_runtime_set_autosuspend_delay(omap->dev, OMAP_I2C_PM_TIMEOUT);
7297 + pm_runtime_use_autosuspend(omap->dev);
7298 +
7299 +- r = pm_runtime_get_sync(omap->dev);
7300 ++ r = pm_runtime_resume_and_get(omap->dev);
7301 + if (r < 0)
7302 +- goto err_free_mem;
7303 ++ goto err_disable_pm;
7304 +
7305 + /*
7306 + * Read the Rev hi bit-[15:14] ie scheme this is 1 indicates ver2.
7307 +@@ -1513,8 +1513,8 @@ err_unuse_clocks:
7308 + omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
7309 + pm_runtime_dont_use_autosuspend(omap->dev);
7310 + pm_runtime_put_sync(omap->dev);
7311 ++err_disable_pm:
7312 + pm_runtime_disable(&pdev->dev);
7313 +-err_free_mem:
7314 +
7315 + return r;
7316 + }
7317 +@@ -1525,7 +1525,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
7318 + int ret;
7319 +
7320 + i2c_del_adapter(&omap->adapter);
7321 +- ret = pm_runtime_get_sync(&pdev->dev);
7322 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
7323 + if (ret < 0)
7324 + return ret;
7325 +
7326 +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
7327 +index ad6630e3cc779..8722ca23f889b 100644
7328 +--- a/drivers/i2c/busses/i2c-rcar.c
7329 ++++ b/drivers/i2c/busses/i2c-rcar.c
7330 +@@ -625,20 +625,11 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
7331 + * generated. It turned out that taking a spinlock at the beginning of the ISR
7332 + * was already causing repeated messages. Thus, this driver was converted to
7333 + * the now lockless behaviour. Please keep this in mind when hacking the driver.
7334 ++ * R-Car Gen3 seems to have this fixed but earlier versions than R-Car Gen2 are
7335 ++ * likely affected. Therefore, we have different interrupt handler entries.
7336 + */
7337 +-static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
7338 ++static irqreturn_t rcar_i2c_irq(int irq, struct rcar_i2c_priv *priv, u32 msr)
7339 + {
7340 +- struct rcar_i2c_priv *priv = ptr;
7341 +- u32 msr;
7342 +-
7343 +- /* Clear START or STOP immediately, except for REPSTART after read */
7344 +- if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
7345 +- rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
7346 +-
7347 +- msr = rcar_i2c_read(priv, ICMSR);
7348 +-
7349 +- /* Only handle interrupts that are currently enabled */
7350 +- msr &= rcar_i2c_read(priv, ICMIER);
7351 + if (!msr) {
7352 + if (rcar_i2c_slave_irq(priv))
7353 + return IRQ_HANDLED;
7354 +@@ -682,6 +673,41 @@ out:
7355 + return IRQ_HANDLED;
7356 + }
7357 +
7358 ++static irqreturn_t rcar_i2c_gen2_irq(int irq, void *ptr)
7359 ++{
7360 ++ struct rcar_i2c_priv *priv = ptr;
7361 ++ u32 msr;
7362 ++
7363 ++ /* Clear START or STOP immediately, except for REPSTART after read */
7364 ++ if (likely(!(priv->flags & ID_P_REP_AFTER_RD)))
7365 ++ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
7366 ++
7367 ++ /* Only handle interrupts that are currently enabled */
7368 ++ msr = rcar_i2c_read(priv, ICMSR);
7369 ++ msr &= rcar_i2c_read(priv, ICMIER);
7370 ++
7371 ++ return rcar_i2c_irq(irq, priv, msr);
7372 ++}
7373 ++
7374 ++static irqreturn_t rcar_i2c_gen3_irq(int irq, void *ptr)
7375 ++{
7376 ++ struct rcar_i2c_priv *priv = ptr;
7377 ++ u32 msr;
7378 ++
7379 ++ /* Only handle interrupts that are currently enabled */
7380 ++ msr = rcar_i2c_read(priv, ICMSR);
7381 ++ msr &= rcar_i2c_read(priv, ICMIER);
7382 ++
7383 ++ /*
7384 ++ * Clear START or STOP immediately, except for REPSTART after read or
7385 ++ * if a spurious interrupt was detected.
7386 ++ */
7387 ++ if (likely(!(priv->flags & ID_P_REP_AFTER_RD) && msr))
7388 ++ rcar_i2c_write(priv, ICMCR, RCAR_BUS_PHASE_DATA);
7389 ++
7390 ++ return rcar_i2c_irq(irq, priv, msr);
7391 ++}
7392 ++
7393 + static struct dma_chan *rcar_i2c_request_dma_chan(struct device *dev,
7394 + enum dma_transfer_direction dir,
7395 + dma_addr_t port_addr)
7396 +@@ -928,6 +954,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
7397 + struct rcar_i2c_priv *priv;
7398 + struct i2c_adapter *adap;
7399 + struct device *dev = &pdev->dev;
7400 ++ unsigned long irqflags = 0;
7401 ++ irqreturn_t (*irqhandler)(int irq, void *ptr) = rcar_i2c_gen3_irq;
7402 + int ret;
7403 +
7404 + /* Otherwise logic will break because some bytes must always use PIO */
7405 +@@ -976,6 +1004,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
7406 +
7407 + rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
7408 +
7409 ++ if (priv->devtype < I2C_RCAR_GEN3) {
7410 ++ irqflags |= IRQF_NO_THREAD;
7411 ++ irqhandler = rcar_i2c_gen2_irq;
7412 ++ }
7413 ++
7414 + if (priv->devtype == I2C_RCAR_GEN3) {
7415 + priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
7416 + if (!IS_ERR(priv->rstc)) {
7417 +@@ -994,8 +1027,11 @@ static int rcar_i2c_probe(struct platform_device *pdev)
7418 + if (of_property_read_bool(dev->of_node, "smbus"))
7419 + priv->flags |= ID_P_HOST_NOTIFY;
7420 +
7421 +- priv->irq = platform_get_irq(pdev, 0);
7422 +- ret = devm_request_irq(dev, priv->irq, rcar_i2c_irq, 0, dev_name(dev), priv);
7423 ++ ret = platform_get_irq(pdev, 0);
7424 ++ if (ret < 0)
7425 ++ goto out_pm_disable;
7426 ++ priv->irq = ret;
7427 ++ ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
7428 + if (ret < 0) {
7429 + dev_err(dev, "cannot get irq %d\n", priv->irq);
7430 + goto out_pm_disable;
7431 +diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
7432 +index c2005c789d2b0..319d1fa617c88 100644
7433 +--- a/drivers/i2c/busses/i2c-sh7760.c
7434 ++++ b/drivers/i2c/busses/i2c-sh7760.c
7435 +@@ -471,7 +471,10 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
7436 + goto out2;
7437 + }
7438 +
7439 +- id->irq = platform_get_irq(pdev, 0);
7440 ++ ret = platform_get_irq(pdev, 0);
7441 ++ if (ret < 0)
7442 ++ goto out3;
7443 ++ id->irq = ret;
7444 +
7445 + id->adap.nr = pdev->id;
7446 + id->adap.algo = &sh7760_i2c_algo;
7447 +diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
7448 +index 2917fecf6c80d..8ead7e021008c 100644
7449 +--- a/drivers/i2c/busses/i2c-sprd.c
7450 ++++ b/drivers/i2c/busses/i2c-sprd.c
7451 +@@ -290,7 +290,7 @@ static int sprd_i2c_master_xfer(struct i2c_adapter *i2c_adap,
7452 + struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
7453 + int im, ret;
7454 +
7455 +- ret = pm_runtime_get_sync(i2c_dev->dev);
7456 ++ ret = pm_runtime_resume_and_get(i2c_dev->dev);
7457 + if (ret < 0)
7458 + return ret;
7459 +
7460 +@@ -576,7 +576,7 @@ static int sprd_i2c_remove(struct platform_device *pdev)
7461 + struct sprd_i2c *i2c_dev = platform_get_drvdata(pdev);
7462 + int ret;
7463 +
7464 +- ret = pm_runtime_get_sync(i2c_dev->dev);
7465 ++ ret = pm_runtime_resume_and_get(i2c_dev->dev);
7466 + if (ret < 0)
7467 + return ret;
7468 +
7469 +diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
7470 +index 6747353345475..1e800b65e20a0 100644
7471 +--- a/drivers/i2c/busses/i2c-stm32f7.c
7472 ++++ b/drivers/i2c/busses/i2c-stm32f7.c
7473 +@@ -1652,7 +1652,7 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
7474 + i2c_dev->msg_id = 0;
7475 + f7_msg->smbus = false;
7476 +
7477 +- ret = pm_runtime_get_sync(i2c_dev->dev);
7478 ++ ret = pm_runtime_resume_and_get(i2c_dev->dev);
7479 + if (ret < 0)
7480 + return ret;
7481 +
7482 +@@ -1698,7 +1698,7 @@ static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
7483 + f7_msg->read_write = read_write;
7484 + f7_msg->smbus = true;
7485 +
7486 +- ret = pm_runtime_get_sync(dev);
7487 ++ ret = pm_runtime_resume_and_get(dev);
7488 + if (ret < 0)
7489 + return ret;
7490 +
7491 +@@ -1799,7 +1799,7 @@ static int stm32f7_i2c_reg_slave(struct i2c_client *slave)
7492 + if (ret)
7493 + return ret;
7494 +
7495 +- ret = pm_runtime_get_sync(dev);
7496 ++ ret = pm_runtime_resume_and_get(dev);
7497 + if (ret < 0)
7498 + return ret;
7499 +
7500 +@@ -1880,7 +1880,7 @@ static int stm32f7_i2c_unreg_slave(struct i2c_client *slave)
7501 +
7502 + WARN_ON(!i2c_dev->slave[id]);
7503 +
7504 +- ret = pm_runtime_get_sync(i2c_dev->dev);
7505 ++ ret = pm_runtime_resume_and_get(i2c_dev->dev);
7506 + if (ret < 0)
7507 + return ret;
7508 +
7509 +@@ -2277,7 +2277,7 @@ static int stm32f7_i2c_regs_backup(struct stm32f7_i2c_dev *i2c_dev)
7510 + int ret;
7511 + struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
7512 +
7513 +- ret = pm_runtime_get_sync(i2c_dev->dev);
7514 ++ ret = pm_runtime_resume_and_get(i2c_dev->dev);
7515 + if (ret < 0)
7516 + return ret;
7517 +
7518 +@@ -2299,7 +2299,7 @@ static int stm32f7_i2c_regs_restore(struct stm32f7_i2c_dev *i2c_dev)
7519 + int ret;
7520 + struct stm32f7_i2c_regs *backup_regs = &i2c_dev->backup_regs;
7521 +
7522 +- ret = pm_runtime_get_sync(i2c_dev->dev);
7523 ++ ret = pm_runtime_resume_and_get(i2c_dev->dev);
7524 + if (ret < 0)
7525 + return ret;
7526 +
7527 +diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
7528 +index 087b2951942eb..2a8568b97c14d 100644
7529 +--- a/drivers/i2c/busses/i2c-xiic.c
7530 ++++ b/drivers/i2c/busses/i2c-xiic.c
7531 +@@ -706,7 +706,7 @@ static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
7532 + dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
7533 + xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
7534 +
7535 +- err = pm_runtime_get_sync(i2c->dev);
7536 ++ err = pm_runtime_resume_and_get(i2c->dev);
7537 + if (err < 0)
7538 + return err;
7539 +
7540 +@@ -873,7 +873,7 @@ static int xiic_i2c_remove(struct platform_device *pdev)
7541 + /* remove adapter & data */
7542 + i2c_del_adapter(&i2c->adap);
7543 +
7544 +- ret = pm_runtime_get_sync(i2c->dev);
7545 ++ ret = pm_runtime_resume_and_get(i2c->dev);
7546 + if (ret < 0)
7547 + return ret;
7548 +
7549 +diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
7550 +index b61bf53ec07af..1c6b78ad5ade4 100644
7551 +--- a/drivers/i3c/master.c
7552 ++++ b/drivers/i3c/master.c
7553 +@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master,
7554 +
7555 + ret = i3c_master_bus_init(master);
7556 + if (ret)
7557 +- goto err_destroy_wq;
7558 ++ goto err_put_dev;
7559 +
7560 + ret = device_add(&master->dev);
7561 + if (ret)
7562 +@@ -2568,9 +2568,6 @@ err_del_dev:
7563 + err_cleanup_bus:
7564 + i3c_master_bus_cleanup(master);
7565 +
7566 +-err_destroy_wq:
7567 +- destroy_workqueue(master->wq);
7568 +-
7569 + err_put_dev:
7570 + put_device(&master->dev);
7571 +
7572 +diff --git a/drivers/iio/accel/adis16201.c b/drivers/iio/accel/adis16201.c
7573 +index f955cccb3e779..84bbdfd2f2ba3 100644
7574 +--- a/drivers/iio/accel/adis16201.c
7575 ++++ b/drivers/iio/accel/adis16201.c
7576 +@@ -215,7 +215,7 @@ static const struct iio_chan_spec adis16201_channels[] = {
7577 + ADIS_AUX_ADC_CHAN(ADIS16201_AUX_ADC_REG, ADIS16201_SCAN_AUX_ADC, 0, 12),
7578 + ADIS_INCLI_CHAN(X, ADIS16201_XINCL_OUT_REG, ADIS16201_SCAN_INCLI_X,
7579 + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
7580 +- ADIS_INCLI_CHAN(X, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
7581 ++ ADIS_INCLI_CHAN(Y, ADIS16201_YINCL_OUT_REG, ADIS16201_SCAN_INCLI_Y,
7582 + BIT(IIO_CHAN_INFO_CALIBBIAS), 0, 14),
7583 + IIO_CHAN_SOFT_TIMESTAMP(7)
7584 + };
7585 +diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
7586 +index 86fda6182543b..e39b679126a2a 100644
7587 +--- a/drivers/iio/adc/Kconfig
7588 ++++ b/drivers/iio/adc/Kconfig
7589 +@@ -249,7 +249,7 @@ config AD799X
7590 + config AD9467
7591 + tristate "Analog Devices AD9467 High Speed ADC driver"
7592 + depends on SPI
7593 +- select ADI_AXI_ADC
7594 ++ depends on ADI_AXI_ADC
7595 + help
7596 + Say yes here to build support for Analog Devices:
7597 + * AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
7598 +diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
7599 +index 66c55ae67791b..bf55726702443 100644
7600 +--- a/drivers/iio/adc/ad7476.c
7601 ++++ b/drivers/iio/adc/ad7476.c
7602 +@@ -316,25 +316,15 @@ static int ad7476_probe(struct spi_device *spi)
7603 + spi_message_init(&st->msg);
7604 + spi_message_add_tail(&st->xfer, &st->msg);
7605 +
7606 +- ret = iio_triggered_buffer_setup(indio_dev, NULL,
7607 +- &ad7476_trigger_handler, NULL);
7608 ++ ret = devm_iio_triggered_buffer_setup(&spi->dev, indio_dev, NULL,
7609 ++ &ad7476_trigger_handler, NULL);
7610 + if (ret)
7611 +- goto error_disable_reg;
7612 ++ return ret;
7613 +
7614 + if (st->chip_info->reset)
7615 + st->chip_info->reset(st);
7616 +
7617 +- ret = iio_device_register(indio_dev);
7618 +- if (ret)
7619 +- goto error_ring_unregister;
7620 +- return 0;
7621 +-
7622 +-error_ring_unregister:
7623 +- iio_triggered_buffer_cleanup(indio_dev);
7624 +-error_disable_reg:
7625 +- regulator_disable(st->reg);
7626 +-
7627 +- return ret;
7628 ++ return devm_iio_device_register(&spi->dev, indio_dev);
7629 + }
7630 +
7631 + static const struct spi_device_id ad7476_id[] = {
7632 +diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
7633 +index 18a1898e3e348..ae391ec4a7275 100644
7634 +--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
7635 ++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
7636 +@@ -723,12 +723,16 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
7637 + }
7638 + }
7639 +
7640 +-static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val)
7641 ++static int inv_mpu6050_write_gyro_scale(struct inv_mpu6050_state *st, int val,
7642 ++ int val2)
7643 + {
7644 + int result, i;
7645 +
7646 ++ if (val != 0)
7647 ++ return -EINVAL;
7648 ++
7649 + for (i = 0; i < ARRAY_SIZE(gyro_scale_6050); ++i) {
7650 +- if (gyro_scale_6050[i] == val) {
7651 ++ if (gyro_scale_6050[i] == val2) {
7652 + result = inv_mpu6050_set_gyro_fsr(st, i);
7653 + if (result)
7654 + return result;
7655 +@@ -759,13 +763,17 @@ static int inv_write_raw_get_fmt(struct iio_dev *indio_dev,
7656 + return -EINVAL;
7657 + }
7658 +
7659 +-static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val)
7660 ++static int inv_mpu6050_write_accel_scale(struct inv_mpu6050_state *st, int val,
7661 ++ int val2)
7662 + {
7663 + int result, i;
7664 + u8 d;
7665 +
7666 ++ if (val != 0)
7667 ++ return -EINVAL;
7668 ++
7669 + for (i = 0; i < ARRAY_SIZE(accel_scale); ++i) {
7670 +- if (accel_scale[i] == val) {
7671 ++ if (accel_scale[i] == val2) {
7672 + d = (i << INV_MPU6050_ACCL_CONFIG_FSR_SHIFT);
7673 + result = regmap_write(st->map, st->reg->accl_config, d);
7674 + if (result)
7675 +@@ -806,10 +814,10 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
7676 + case IIO_CHAN_INFO_SCALE:
7677 + switch (chan->type) {
7678 + case IIO_ANGL_VEL:
7679 +- result = inv_mpu6050_write_gyro_scale(st, val2);
7680 ++ result = inv_mpu6050_write_gyro_scale(st, val, val2);
7681 + break;
7682 + case IIO_ACCEL:
7683 +- result = inv_mpu6050_write_accel_scale(st, val2);
7684 ++ result = inv_mpu6050_write_accel_scale(st, val, val2);
7685 + break;
7686 + default:
7687 + result = -EINVAL;
7688 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
7689 +index bbba0cd42c89b..ee568bdf3c788 100644
7690 +--- a/drivers/infiniband/core/cm.c
7691 ++++ b/drivers/infiniband/core/cm.c
7692 +@@ -2137,7 +2137,8 @@ static int cm_req_handler(struct cm_work *work)
7693 + goto destroy;
7694 + }
7695 +
7696 +- cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
7697 ++ if (cm_id_priv->av.ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE)
7698 ++ cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
7699 +
7700 + memset(&work->path[0], 0, sizeof(work->path[0]));
7701 + if (cm_req_has_alt_path(req_msg))
7702 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
7703 +index e3638f80e1d52..6af066a2c8c06 100644
7704 +--- a/drivers/infiniband/core/cma.c
7705 ++++ b/drivers/infiniband/core/cma.c
7706 +@@ -463,7 +463,6 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
7707 + id_priv->id.route.addr.dev_addr.transport =
7708 + rdma_node_get_transport(cma_dev->device->node_type);
7709 + list_add_tail(&id_priv->list, &cma_dev->id_list);
7710 +- rdma_restrack_add(&id_priv->res);
7711 +
7712 + trace_cm_id_attach(id_priv, cma_dev->device);
7713 + }
7714 +@@ -700,6 +699,7 @@ static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
7715 + mutex_lock(&lock);
7716 + cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
7717 + mutex_unlock(&lock);
7718 ++ rdma_restrack_add(&id_priv->res);
7719 + return 0;
7720 + }
7721 +
7722 +@@ -754,8 +754,10 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
7723 + }
7724 +
7725 + out:
7726 +- if (!ret)
7727 ++ if (!ret) {
7728 + cma_attach_to_dev(id_priv, cma_dev);
7729 ++ rdma_restrack_add(&id_priv->res);
7730 ++ }
7731 +
7732 + mutex_unlock(&lock);
7733 + return ret;
7734 +@@ -816,6 +818,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
7735 +
7736 + found:
7737 + cma_attach_to_dev(id_priv, cma_dev);
7738 ++ rdma_restrack_add(&id_priv->res);
7739 + mutex_unlock(&lock);
7740 + addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
7741 + memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
7742 +@@ -2529,6 +2532,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
7743 + rdma_addr_size(cma_src_addr(id_priv)));
7744 +
7745 + _cma_attach_to_dev(dev_id_priv, cma_dev);
7746 ++ rdma_restrack_add(&dev_id_priv->res);
7747 + cma_id_get(id_priv);
7748 + dev_id_priv->internal_id = 1;
7749 + dev_id_priv->afonly = id_priv->afonly;
7750 +@@ -3169,6 +3173,7 @@ port_found:
7751 + ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
7752 + id_priv->id.port_num = p;
7753 + cma_attach_to_dev(id_priv, cma_dev);
7754 ++ rdma_restrack_add(&id_priv->res);
7755 + cma_set_loopback(cma_src_addr(id_priv));
7756 + out:
7757 + mutex_unlock(&lock);
7758 +@@ -3201,6 +3206,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
7759 + if (status)
7760 + pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
7761 + status);
7762 ++ rdma_restrack_add(&id_priv->res);
7763 + } else if (status) {
7764 + pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
7765 + }
7766 +@@ -3812,6 +3818,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
7767 + if (ret)
7768 + goto err2;
7769 +
7770 ++ if (!cma_any_addr(addr))
7771 ++ rdma_restrack_add(&id_priv->res);
7772 + return 0;
7773 + err2:
7774 + if (id_priv->cma_dev)
7775 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
7776 +index 995d4633b0a1c..d4d4959c2434c 100644
7777 +--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
7778 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
7779 +@@ -2784,6 +2784,7 @@ do_rq:
7780 + dev_err(&cq->hwq.pdev->dev,
7781 + "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
7782 + cqe_cons, rq->max_wqe);
7783 ++ rc = -EINVAL;
7784 + goto done;
7785 + }
7786 +
7787 +diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
7788 +index fa7878336100a..3ca47004b7527 100644
7789 +--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
7790 ++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
7791 +@@ -854,6 +854,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
7792 +
7793 + unmap_io:
7794 + pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
7795 ++ dpit->dbr_bar_reg_iomem = NULL;
7796 + return -ENOMEM;
7797 + }
7798 +
7799 +diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
7800 +index 5c95c789f302d..e800e8e8bed5a 100644
7801 +--- a/drivers/infiniband/hw/cxgb4/resource.c
7802 ++++ b/drivers/infiniband/hw/cxgb4/resource.c
7803 +@@ -216,7 +216,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
7804 + goto out;
7805 + entry->qid = qid;
7806 + list_add_tail(&entry->entry, &uctx->cqids);
7807 +- for (i = qid; i & rdev->qpmask; i++) {
7808 ++ for (i = qid + 1; i & rdev->qpmask; i++) {
7809 + entry = kmalloc(sizeof(*entry), GFP_KERNEL);
7810 + if (!entry)
7811 + goto out;
7812 +diff --git a/drivers/infiniband/hw/hfi1/firmware.c b/drivers/infiniband/hw/hfi1/firmware.c
7813 +index 0e83d4b61e463..2cf102b5abd44 100644
7814 +--- a/drivers/infiniband/hw/hfi1/firmware.c
7815 ++++ b/drivers/infiniband/hw/hfi1/firmware.c
7816 +@@ -1916,6 +1916,7 @@ int parse_platform_config(struct hfi1_devdata *dd)
7817 + dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n",
7818 + __func__, (ptr -
7819 + (u32 *)dd->platform_config.data));
7820 ++ ret = -EINVAL;
7821 + goto bail;
7822 + }
7823 + /* Jump the CRC DWORD */
7824 +diff --git a/drivers/infiniband/hw/hfi1/mmu_rb.c b/drivers/infiniband/hw/hfi1/mmu_rb.c
7825 +index f3fb28e3d5d74..d213f65d4cdd0 100644
7826 +--- a/drivers/infiniband/hw/hfi1/mmu_rb.c
7827 ++++ b/drivers/infiniband/hw/hfi1/mmu_rb.c
7828 +@@ -89,7 +89,7 @@ int hfi1_mmu_rb_register(void *ops_arg,
7829 + struct mmu_rb_handler *h;
7830 + int ret;
7831 +
7832 +- h = kmalloc(sizeof(*h), GFP_KERNEL);
7833 ++ h = kzalloc(sizeof(*h), GFP_KERNEL);
7834 + if (!h)
7835 + return -ENOMEM;
7836 +
7837 +diff --git a/drivers/infiniband/hw/i40iw/i40iw_pble.c b/drivers/infiniband/hw/i40iw/i40iw_pble.c
7838 +index 5f97643e22e53..ae7d227edad2f 100644
7839 +--- a/drivers/infiniband/hw/i40iw/i40iw_pble.c
7840 ++++ b/drivers/infiniband/hw/i40iw/i40iw_pble.c
7841 +@@ -392,12 +392,9 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
7842 + i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
7843 + pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
7844 + pble_rsrc->unallocated_pble -= (chunk->size >> 3);
7845 +- list_add(&chunk->list, &pble_rsrc->pinfo.clist);
7846 + sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
7847 + sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
7848 +- if (sd_entry->valid)
7849 +- return 0;
7850 +- if (dev->is_pf) {
7851 ++ if (dev->is_pf && !sd_entry->valid) {
7852 + ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
7853 + sd_reg_val, idx->sd_idx,
7854 + sd_entry->entry_type, true);
7855 +@@ -408,6 +405,7 @@ static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
7856 + }
7857 +
7858 + sd_entry->valid = true;
7859 ++ list_add(&chunk->list, &pble_rsrc->pinfo.clist);
7860 + return 0;
7861 + error:
7862 + kfree(chunk);
7863 +diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
7864 +index 492cfe063bcad..13d50b1781660 100644
7865 +--- a/drivers/infiniband/hw/mlx5/fs.c
7866 ++++ b/drivers/infiniband/hw/mlx5/fs.c
7867 +@@ -1528,8 +1528,8 @@ static struct mlx5_ib_flow_handler *raw_fs_rule_add(
7868 + dst_num++;
7869 + }
7870 +
7871 +- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher,
7872 +- flow_context, flow_act,
7873 ++ handler = _create_raw_flow_rule(dev, ft_prio, dst_num ? dst : NULL,
7874 ++ fs_matcher, flow_context, flow_act,
7875 + cmd_in, inlen, dst_num);
7876 +
7877 + if (IS_ERR(handler)) {
7878 +@@ -1885,8 +1885,9 @@ static int get_dests(struct uverbs_attr_bundle *attrs,
7879 + else
7880 + *dest_id = mqp->raw_packet_qp.rq.tirn;
7881 + *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
7882 +- } else if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
7883 +- fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
7884 ++ } else if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS ||
7885 ++ fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_TX) &&
7886 ++ !(*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP)) {
7887 + *dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
7888 + }
7889 +
7890 +diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
7891 +index 75caeec378bda..6d2715f65d788 100644
7892 +--- a/drivers/infiniband/hw/mlx5/qp.c
7893 ++++ b/drivers/infiniband/hw/mlx5/qp.c
7894 +@@ -3079,6 +3079,19 @@ enum {
7895 + MLX5_PATH_FLAG_COUNTER = 1 << 2,
7896 + };
7897 +
7898 ++static int mlx5_to_ib_rate_map(u8 rate)
7899 ++{
7900 ++ static const int rates[] = { IB_RATE_PORT_CURRENT, IB_RATE_56_GBPS,
7901 ++ IB_RATE_25_GBPS, IB_RATE_100_GBPS,
7902 ++ IB_RATE_200_GBPS, IB_RATE_50_GBPS,
7903 ++ IB_RATE_400_GBPS };
7904 ++
7905 ++ if (rate < ARRAY_SIZE(rates))
7906 ++ return rates[rate];
7907 ++
7908 ++ return rate - MLX5_STAT_RATE_OFFSET;
7909 ++}
7910 ++
7911 + static int ib_to_mlx5_rate_map(u8 rate)
7912 + {
7913 + switch (rate) {
7914 +@@ -4420,7 +4433,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
7915 + rdma_ah_set_path_bits(ah_attr, MLX5_GET(ads, path, mlid));
7916 +
7917 + static_rate = MLX5_GET(ads, path, stat_rate);
7918 +- rdma_ah_set_static_rate(ah_attr, static_rate ? static_rate - 5 : 0);
7919 ++ rdma_ah_set_static_rate(ah_attr, mlx5_to_ib_rate_map(static_rate));
7920 + if (MLX5_GET(ads, path, grh) ||
7921 + ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
7922 + rdma_ah_set_grh(ah_attr, NULL, MLX5_GET(ads, path, flow_label),
7923 +diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
7924 +index c4bc58736e489..1715fbe0719d8 100644
7925 +--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
7926 ++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
7927 +@@ -636,8 +636,10 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
7928 + memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
7929 +
7930 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
7931 +- &qp->iwarp_cm_flags))
7932 ++ &qp->iwarp_cm_flags)) {
7933 ++ rc = -ENODEV;
7934 + goto err; /* QP already being destroyed */
7935 ++ }
7936 +
7937 + rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
7938 + if (rc) {
7939 +diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
7940 +index df0d173d6acba..da2e867a1ed93 100644
7941 +--- a/drivers/infiniband/sw/rxe/rxe_av.c
7942 ++++ b/drivers/infiniband/sw/rxe/rxe_av.c
7943 +@@ -88,7 +88,7 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
7944 + type = RXE_NETWORK_TYPE_IPV4;
7945 + break;
7946 + case RDMA_NETWORK_IPV6:
7947 +- type = RXE_NETWORK_TYPE_IPV4;
7948 ++ type = RXE_NETWORK_TYPE_IPV6;
7949 + break;
7950 + default:
7951 + /* not reached - checked in rxe_av_chk_attr */
7952 +diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c
7953 +index 34a910cf0edbd..61c17db70d658 100644
7954 +--- a/drivers/infiniband/sw/siw/siw_mem.c
7955 ++++ b/drivers/infiniband/sw/siw/siw_mem.c
7956 +@@ -106,8 +106,6 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
7957 + mem->perms = rights & IWARP_ACCESS_MASK;
7958 + kref_init(&mem->ref);
7959 +
7960 +- mr->mem = mem;
7961 +-
7962 + get_random_bytes(&next, 4);
7963 + next &= 0x00ffffff;
7964 +
7965 +@@ -116,6 +114,8 @@ int siw_mr_add_mem(struct siw_mr *mr, struct ib_pd *pd, void *mem_obj,
7966 + kfree(mem);
7967 + return -ENOMEM;
7968 + }
7969 ++
7970 ++ mr->mem = mem;
7971 + /* Set the STag index part */
7972 + mem->stag = id << 8;
7973 + mr->base_mr.lkey = mr->base_mr.rkey = mem->stag;
7974 +diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
7975 +index bd478947b93a5..e653c83f8a356 100644
7976 +--- a/drivers/infiniband/ulp/isert/ib_isert.c
7977 ++++ b/drivers/infiniband/ulp/isert/ib_isert.c
7978 +@@ -438,23 +438,23 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
7979 + isert_init_conn(isert_conn);
7980 + isert_conn->cm_id = cma_id;
7981 +
7982 +- ret = isert_alloc_login_buf(isert_conn, cma_id->device);
7983 +- if (ret)
7984 +- goto out;
7985 +-
7986 + device = isert_device_get(cma_id);
7987 + if (IS_ERR(device)) {
7988 + ret = PTR_ERR(device);
7989 +- goto out_rsp_dma_map;
7990 ++ goto out;
7991 + }
7992 + isert_conn->device = device;
7993 +
7994 ++ ret = isert_alloc_login_buf(isert_conn, cma_id->device);
7995 ++ if (ret)
7996 ++ goto out_conn_dev;
7997 ++
7998 + isert_set_nego_params(isert_conn, &event->param.conn);
7999 +
8000 + isert_conn->qp = isert_create_qp(isert_conn, cma_id);
8001 + if (IS_ERR(isert_conn->qp)) {
8002 + ret = PTR_ERR(isert_conn->qp);
8003 +- goto out_conn_dev;
8004 ++ goto out_rsp_dma_map;
8005 + }
8006 +
8007 + ret = isert_login_post_recv(isert_conn);
8008 +@@ -473,10 +473,10 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
8009 +
8010 + out_destroy_qp:
8011 + isert_destroy_qp(isert_conn);
8012 +-out_conn_dev:
8013 +- isert_device_put(device);
8014 + out_rsp_dma_map:
8015 + isert_free_login_buf(isert_conn);
8016 ++out_conn_dev:
8017 ++ isert_device_put(device);
8018 + out:
8019 + kfree(isert_conn);
8020 + rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
8021 +diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
8022 +index 6ff97fbf87566..7db550ba25d7f 100644
8023 +--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
8024 ++++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
8025 +@@ -2803,8 +2803,8 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
8026 + } while (!changed && old_state != RTRS_CLT_DEAD);
8027 +
8028 + if (likely(changed)) {
8029 +- rtrs_clt_destroy_sess_files(sess, sysfs_self);
8030 + rtrs_clt_remove_path_from_arr(sess);
8031 ++ rtrs_clt_destroy_sess_files(sess, sysfs_self);
8032 + kobject_put(&sess->kobj);
8033 + }
8034 +
8035 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
8036 +index 53a8becac8276..07ecc7dc1822b 100644
8037 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
8038 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
8039 +@@ -2378,6 +2378,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
8040 + pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
8041 + dev_name(&sdev->device->dev), port_num);
8042 + mutex_unlock(&sport->mutex);
8043 ++ ret = -EINVAL;
8044 + goto reject;
8045 + }
8046 +
8047 +diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
8048 +index 3c215f0a6052b..fa502c0e2e31b 100644
8049 +--- a/drivers/iommu/amd/init.c
8050 ++++ b/drivers/iommu/amd/init.c
8051 +@@ -1840,7 +1840,7 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
8052 + * IVHD and MMIO conflict.
8053 + */
8054 + if (features != iommu->features)
8055 +- pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).",
8056 ++ pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx).\n",
8057 + features, iommu->features);
8058 + }
8059 +
8060 +diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
8061 +index d4b7f40ccb029..57e5d223c4673 100644
8062 +--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
8063 ++++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
8064 +@@ -115,7 +115,7 @@
8065 + #define GERROR_PRIQ_ABT_ERR (1 << 3)
8066 + #define GERROR_EVTQ_ABT_ERR (1 << 2)
8067 + #define GERROR_CMDQ_ERR (1 << 0)
8068 +-#define GERROR_ERR_MASK 0xfd
8069 ++#define GERROR_ERR_MASK 0x1fd
8070 +
8071 + #define ARM_SMMU_GERRORN 0x64
8072 +
8073 +diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
8074 +index 7e3db4c0324d3..db9bf5ac07228 100644
8075 +--- a/drivers/iommu/intel/iommu.c
8076 ++++ b/drivers/iommu/intel/iommu.c
8077 +@@ -656,7 +656,14 @@ static int domain_update_iommu_snooping(struct intel_iommu *skip)
8078 + rcu_read_lock();
8079 + for_each_active_iommu(iommu, drhd) {
8080 + if (iommu != skip) {
8081 +- if (!ecap_sc_support(iommu->ecap)) {
8082 ++ /*
8083 ++ * If the hardware is operating in the scalable mode,
8084 ++ * the snooping control is always supported since we
8085 ++ * always set PASID-table-entry.PGSNP bit if the domain
8086 ++ * is managed outside (UNMANAGED).
8087 ++ */
8088 ++ if (!sm_supported(iommu) &&
8089 ++ !ecap_sc_support(iommu->ecap)) {
8090 + ret = 0;
8091 + break;
8092 + }
8093 +@@ -1021,8 +1028,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
8094 +
8095 + domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
8096 + pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
8097 +- if (domain_use_first_level(domain))
8098 ++ if (domain_use_first_level(domain)) {
8099 + pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
8100 ++ if (domain->domain.type == IOMMU_DOMAIN_DMA)
8101 ++ pteval |= DMA_FL_PTE_ACCESS;
8102 ++ }
8103 + if (cmpxchg64(&pte->val, 0ULL, pteval))
8104 + /* Someone else set it while we were thinking; use theirs. */
8105 + free_pgtable_page(tmp_page);
8106 +@@ -1338,6 +1348,11 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
8107 + readl, (sts & DMA_GSTS_RTPS), sts);
8108 +
8109 + raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
8110 ++
8111 ++ iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
8112 ++ if (sm_supported(iommu))
8113 ++ qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
8114 ++ iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
8115 + }
8116 +
8117 + void iommu_flush_write_buffer(struct intel_iommu *iommu)
8118 +@@ -2347,14 +2362,19 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
8119 + return -EINVAL;
8120 +
8121 + attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
8122 +- if (domain_use_first_level(domain))
8123 +- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
8124 ++ attr |= DMA_FL_PTE_PRESENT;
8125 ++ if (domain_use_first_level(domain)) {
8126 ++ attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
8127 +
8128 +- if (!sg) {
8129 +- sg_res = nr_pages;
8130 +- pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
8131 ++ if (domain->domain.type == IOMMU_DOMAIN_DMA) {
8132 ++ attr |= DMA_FL_PTE_ACCESS;
8133 ++ if (prot & DMA_PTE_WRITE)
8134 ++ attr |= DMA_FL_PTE_DIRTY;
8135 ++ }
8136 + }
8137 +
8138 ++ pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
8139 ++
8140 + while (nr_pages > 0) {
8141 + uint64_t tmp;
8142 +
8143 +@@ -2506,6 +2526,10 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn
8144 + (((u16)bus) << 8) | devfn,
8145 + DMA_CCMD_MASK_NOBIT,
8146 + DMA_CCMD_DEVICE_INVL);
8147 ++
8148 ++ if (sm_supported(iommu))
8149 ++ qi_flush_pasid_cache(iommu, did_old, QI_PC_ALL_PASIDS, 0);
8150 ++
8151 + iommu->flush.flush_iotlb(iommu,
8152 + did_old,
8153 + 0,
8154 +@@ -2599,6 +2623,9 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
8155 +
8156 + flags |= (level == 5) ? PASID_FLAG_FL5LP : 0;
8157 +
8158 ++ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
8159 ++ flags |= PASID_FLAG_PAGE_SNOOP;
8160 ++
8161 + return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
8162 + domain->iommu_did[iommu->seq_id],
8163 + flags);
8164 +@@ -3369,8 +3396,6 @@ static int __init init_dmars(void)
8165 + register_pasid_allocator(iommu);
8166 + #endif
8167 + iommu_set_root_entry(iommu);
8168 +- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
8169 +- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
8170 + }
8171 +
8172 + #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
8173 +@@ -4148,12 +4173,7 @@ static int init_iommu_hw(void)
8174 + }
8175 +
8176 + iommu_flush_write_buffer(iommu);
8177 +-
8178 + iommu_set_root_entry(iommu);
8179 +-
8180 +- iommu->flush.flush_context(iommu, 0, 0, 0,
8181 +- DMA_CCMD_GLOBAL_INVL);
8182 +- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
8183 + iommu_enable_translation(iommu);
8184 + iommu_disable_protect_mem_regions(iommu);
8185 + }
8186 +@@ -4481,8 +4501,6 @@ static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
8187 + goto disable_iommu;
8188 +
8189 + iommu_set_root_entry(iommu);
8190 +- iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
8191 +- iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
8192 + iommu_enable_translation(iommu);
8193 +
8194 + iommu_disable_protect_mem_regions(iommu);
8195 +diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
8196 +index b92af83b79bdc..ce4ef2d245e3b 100644
8197 +--- a/drivers/iommu/intel/pasid.c
8198 ++++ b/drivers/iommu/intel/pasid.c
8199 +@@ -411,6 +411,16 @@ static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
8200 + pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
8201 + }
8202 +
8203 ++/*
8204 ++ * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode
8205 ++ * PASID entry.
8206 ++ */
8207 ++static inline void
8208 ++pasid_set_pgsnp(struct pasid_entry *pe)
8209 ++{
8210 ++ pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24);
8211 ++}
8212 ++
8213 + /*
8214 + * Setup the First Level Page table Pointer field (Bit 140~191)
8215 + * of a scalable mode PASID entry.
8216 +@@ -579,6 +589,9 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu,
8217 + }
8218 + }
8219 +
8220 ++ if (flags & PASID_FLAG_PAGE_SNOOP)
8221 ++ pasid_set_pgsnp(pte);
8222 ++
8223 + pasid_set_domain_id(pte, did);
8224 + pasid_set_address_width(pte, iommu->agaw);
8225 + pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
8226 +@@ -657,6 +670,9 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
8227 + pasid_set_fault_enable(pte);
8228 + pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
8229 +
8230 ++ if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
8231 ++ pasid_set_pgsnp(pte);
8232 ++
8233 + /*
8234 + * Since it is a second level only translation setup, we should
8235 + * set SRE bit as well (addresses are expected to be GPAs).
8236 +diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
8237 +index 444c0bec221a4..086ebd6973199 100644
8238 +--- a/drivers/iommu/intel/pasid.h
8239 ++++ b/drivers/iommu/intel/pasid.h
8240 +@@ -48,6 +48,7 @@
8241 + */
8242 + #define PASID_FLAG_SUPERVISOR_MODE BIT(0)
8243 + #define PASID_FLAG_NESTED BIT(1)
8244 ++#define PASID_FLAG_PAGE_SNOOP BIT(2)
8245 +
8246 + /*
8247 + * The PASID_FLAG_FL5LP flag Indicates using 5-level paging for first-
8248 +diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
8249 +index b200a3acc6ed9..6168dec7cb40d 100644
8250 +--- a/drivers/iommu/intel/svm.c
8251 ++++ b/drivers/iommu/intel/svm.c
8252 +@@ -899,7 +899,7 @@ intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc)
8253 + /* Fill in event data for device specific processing */
8254 + memset(&event, 0, sizeof(struct iommu_fault_event));
8255 + event.fault.type = IOMMU_FAULT_PAGE_REQ;
8256 +- event.fault.prm.addr = desc->addr;
8257 ++ event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
8258 + event.fault.prm.pasid = desc->pasid;
8259 + event.fault.prm.grpid = desc->prg_index;
8260 + event.fault.prm.perm = prq_to_iommu_prot(desc);
8261 +@@ -959,7 +959,17 @@ static irqreturn_t prq_event_thread(int irq, void *d)
8262 + ((unsigned long long *)req)[1]);
8263 + goto no_pasid;
8264 + }
8265 +-
8266 ++ /* We shall not receive page request for supervisor SVM */
8267 ++ if (req->pm_req && (req->rd_req | req->wr_req)) {
8268 ++ pr_err("Unexpected page request in Privilege Mode");
8269 ++ /* No need to find the matching sdev as for bad_req */
8270 ++ goto no_pasid;
8271 ++ }
8272 ++ /* DMA read with exec requeset is not supported. */
8273 ++ if (req->exe_req && req->rd_req) {
8274 ++ pr_err("Execution request not supported\n");
8275 ++ goto no_pasid;
8276 ++ }
8277 + if (!svm || svm->pasid != req->pasid) {
8278 + rcu_read_lock();
8279 + svm = ioasid_find(NULL, req->pasid, NULL);
8280 +@@ -1061,12 +1071,12 @@ no_pasid:
8281 + QI_PGRP_RESP_TYPE;
8282 + resp.qw1 = QI_PGRP_IDX(req->prg_index) |
8283 + QI_PGRP_LPIG(req->lpig);
8284 ++ resp.qw2 = 0;
8285 ++ resp.qw3 = 0;
8286 +
8287 + if (req->priv_data_present)
8288 + memcpy(&resp.qw2, req->priv_data,
8289 + sizeof(req->priv_data));
8290 +- resp.qw2 = 0;
8291 +- resp.qw3 = 0;
8292 + qi_submit_sync(iommu, &resp, 1, 0);
8293 + }
8294 + prq_advance:
8295 +diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
8296 +index 0d9adce6d812f..9b8664d388af0 100644
8297 +--- a/drivers/iommu/iommu.c
8298 ++++ b/drivers/iommu/iommu.c
8299 +@@ -2872,10 +2872,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
8300 +
8301 + int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
8302 + {
8303 +- const struct iommu_ops *ops = dev->bus->iommu_ops;
8304 ++ if (dev->iommu && dev->iommu->iommu_dev) {
8305 ++ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
8306 +
8307 +- if (ops && ops->dev_enable_feat)
8308 +- return ops->dev_enable_feat(dev, feat);
8309 ++ if (ops->dev_enable_feat)
8310 ++ return ops->dev_enable_feat(dev, feat);
8311 ++ }
8312 +
8313 + return -ENODEV;
8314 + }
8315 +@@ -2888,10 +2890,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
8316 + */
8317 + int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
8318 + {
8319 +- const struct iommu_ops *ops = dev->bus->iommu_ops;
8320 ++ if (dev->iommu && dev->iommu->iommu_dev) {
8321 ++ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
8322 +
8323 +- if (ops && ops->dev_disable_feat)
8324 +- return ops->dev_disable_feat(dev, feat);
8325 ++ if (ops->dev_disable_feat)
8326 ++ return ops->dev_disable_feat(dev, feat);
8327 ++ }
8328 +
8329 + return -EBUSY;
8330 + }
8331 +@@ -2899,10 +2903,12 @@ EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
8332 +
8333 + bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
8334 + {
8335 +- const struct iommu_ops *ops = dev->bus->iommu_ops;
8336 ++ if (dev->iommu && dev->iommu->iommu_dev) {
8337 ++ const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
8338 +
8339 +- if (ops && ops->dev_feat_enabled)
8340 +- return ops->dev_feat_enabled(dev, feat);
8341 ++ if (ops->dev_feat_enabled)
8342 ++ return ops->dev_feat_enabled(dev, feat);
8343 ++ }
8344 +
8345 + return false;
8346 + }
8347 +diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
8348 +index 563a9b3662941..e81e89a81cb5b 100644
8349 +--- a/drivers/irqchip/irq-gic-v3-mbi.c
8350 ++++ b/drivers/irqchip/irq-gic-v3-mbi.c
8351 +@@ -303,7 +303,7 @@ int __init mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent)
8352 + reg = of_get_property(np, "mbi-alias", NULL);
8353 + if (reg) {
8354 + mbi_phys_base = of_translate_address(np, reg);
8355 +- if (mbi_phys_base == OF_BAD_ADDR) {
8356 ++ if (mbi_phys_base == (phys_addr_t)OF_BAD_ADDR) {
8357 + ret = -ENXIO;
8358 + goto err_free_mbi;
8359 + }
8360 +diff --git a/drivers/mailbox/sprd-mailbox.c b/drivers/mailbox/sprd-mailbox.c
8361 +index 4c325301a2fe8..94d9067dc8d09 100644
8362 +--- a/drivers/mailbox/sprd-mailbox.c
8363 ++++ b/drivers/mailbox/sprd-mailbox.c
8364 +@@ -60,6 +60,8 @@ struct sprd_mbox_priv {
8365 + struct clk *clk;
8366 + u32 outbox_fifo_depth;
8367 +
8368 ++ struct mutex lock;
8369 ++ u32 refcnt;
8370 + struct mbox_chan chan[SPRD_MBOX_CHAN_MAX];
8371 + };
8372 +
8373 +@@ -115,7 +117,11 @@ static irqreturn_t sprd_mbox_outbox_isr(int irq, void *data)
8374 + id = readl(priv->outbox_base + SPRD_MBOX_ID);
8375 +
8376 + chan = &priv->chan[id];
8377 +- mbox_chan_received_data(chan, (void *)msg);
8378 ++ if (chan->cl)
8379 ++ mbox_chan_received_data(chan, (void *)msg);
8380 ++ else
8381 ++ dev_warn_ratelimited(priv->dev,
8382 ++ "message's been dropped at ch[%d]\n", id);
8383 +
8384 + /* Trigger to update outbox FIFO pointer */
8385 + writel(0x1, priv->outbox_base + SPRD_MBOX_TRIGGER);
8386 +@@ -215,18 +221,22 @@ static int sprd_mbox_startup(struct mbox_chan *chan)
8387 + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
8388 + u32 val;
8389 +
8390 +- /* Select outbox FIFO mode and reset the outbox FIFO status */
8391 +- writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
8392 ++ mutex_lock(&priv->lock);
8393 ++ if (priv->refcnt++ == 0) {
8394 ++ /* Select outbox FIFO mode and reset the outbox FIFO status */
8395 ++ writel(0x0, priv->outbox_base + SPRD_MBOX_FIFO_RST);
8396 +
8397 +- /* Enable inbox FIFO overflow and delivery interrupt */
8398 +- val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
8399 +- val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
8400 +- writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
8401 ++ /* Enable inbox FIFO overflow and delivery interrupt */
8402 ++ val = readl(priv->inbox_base + SPRD_MBOX_IRQ_MSK);
8403 ++ val &= ~(SPRD_INBOX_FIFO_OVERFLOW_IRQ | SPRD_INBOX_FIFO_DELIVER_IRQ);
8404 ++ writel(val, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
8405 +
8406 +- /* Enable outbox FIFO not empty interrupt */
8407 +- val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
8408 +- val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
8409 +- writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
8410 ++ /* Enable outbox FIFO not empty interrupt */
8411 ++ val = readl(priv->outbox_base + SPRD_MBOX_IRQ_MSK);
8412 ++ val &= ~SPRD_OUTBOX_FIFO_NOT_EMPTY_IRQ;
8413 ++ writel(val, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
8414 ++ }
8415 ++ mutex_unlock(&priv->lock);
8416 +
8417 + return 0;
8418 + }
8419 +@@ -235,9 +245,13 @@ static void sprd_mbox_shutdown(struct mbox_chan *chan)
8420 + {
8421 + struct sprd_mbox_priv *priv = to_sprd_mbox_priv(chan->mbox);
8422 +
8423 +- /* Disable inbox & outbox interrupt */
8424 +- writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
8425 +- writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
8426 ++ mutex_lock(&priv->lock);
8427 ++ if (--priv->refcnt == 0) {
8428 ++ /* Disable inbox & outbox interrupt */
8429 ++ writel(SPRD_INBOX_FIFO_IRQ_MASK, priv->inbox_base + SPRD_MBOX_IRQ_MSK);
8430 ++ writel(SPRD_OUTBOX_FIFO_IRQ_MASK, priv->outbox_base + SPRD_MBOX_IRQ_MSK);
8431 ++ }
8432 ++ mutex_unlock(&priv->lock);
8433 + }
8434 +
8435 + static const struct mbox_chan_ops sprd_mbox_ops = {
8436 +@@ -266,6 +280,7 @@ static int sprd_mbox_probe(struct platform_device *pdev)
8437 + return -ENOMEM;
8438 +
8439 + priv->dev = dev;
8440 ++ mutex_init(&priv->lock);
8441 +
8442 + /*
8443 + * The Spreadtrum mailbox uses an inbox to send messages to the target
8444 +diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
8445 +index 200c5d0f08bf5..ea3130e116801 100644
8446 +--- a/drivers/md/md-bitmap.c
8447 ++++ b/drivers/md/md-bitmap.c
8448 +@@ -1722,6 +1722,8 @@ void md_bitmap_flush(struct mddev *mddev)
8449 + md_bitmap_daemon_work(mddev);
8450 + bitmap->daemon_lastrun -= sleep;
8451 + md_bitmap_daemon_work(mddev);
8452 ++ if (mddev->bitmap_info.external)
8453 ++ md_super_wait(mddev);
8454 + md_bitmap_update_sb(bitmap);
8455 + }
8456 +
8457 +diff --git a/drivers/md/md.c b/drivers/md/md.c
8458 +index 7a0a228d64bbe..288d26013de27 100644
8459 +--- a/drivers/md/md.c
8460 ++++ b/drivers/md/md.c
8461 +@@ -748,7 +748,34 @@ void mddev_init(struct mddev *mddev)
8462 + }
8463 + EXPORT_SYMBOL_GPL(mddev_init);
8464 +
8465 ++static struct mddev *mddev_find_locked(dev_t unit)
8466 ++{
8467 ++ struct mddev *mddev;
8468 ++
8469 ++ list_for_each_entry(mddev, &all_mddevs, all_mddevs)
8470 ++ if (mddev->unit == unit)
8471 ++ return mddev;
8472 ++
8473 ++ return NULL;
8474 ++}
8475 ++
8476 + static struct mddev *mddev_find(dev_t unit)
8477 ++{
8478 ++ struct mddev *mddev;
8479 ++
8480 ++ if (MAJOR(unit) != MD_MAJOR)
8481 ++ unit &= ~((1 << MdpMinorShift) - 1);
8482 ++
8483 ++ spin_lock(&all_mddevs_lock);
8484 ++ mddev = mddev_find_locked(unit);
8485 ++ if (mddev)
8486 ++ mddev_get(mddev);
8487 ++ spin_unlock(&all_mddevs_lock);
8488 ++
8489 ++ return mddev;
8490 ++}
8491 ++
8492 ++static struct mddev *mddev_find_or_alloc(dev_t unit)
8493 + {
8494 + struct mddev *mddev, *new = NULL;
8495 +
8496 +@@ -759,13 +786,13 @@ static struct mddev *mddev_find(dev_t unit)
8497 + spin_lock(&all_mddevs_lock);
8498 +
8499 + if (unit) {
8500 +- list_for_each_entry(mddev, &all_mddevs, all_mddevs)
8501 +- if (mddev->unit == unit) {
8502 +- mddev_get(mddev);
8503 +- spin_unlock(&all_mddevs_lock);
8504 +- kfree(new);
8505 +- return mddev;
8506 +- }
8507 ++ mddev = mddev_find_locked(unit);
8508 ++ if (mddev) {
8509 ++ mddev_get(mddev);
8510 ++ spin_unlock(&all_mddevs_lock);
8511 ++ kfree(new);
8512 ++ return mddev;
8513 ++ }
8514 +
8515 + if (new) {
8516 + list_add(&new->all_mddevs, &all_mddevs);
8517 +@@ -791,12 +818,7 @@ static struct mddev *mddev_find(dev_t unit)
8518 + return NULL;
8519 + }
8520 +
8521 +- is_free = 1;
8522 +- list_for_each_entry(mddev, &all_mddevs, all_mddevs)
8523 +- if (mddev->unit == dev) {
8524 +- is_free = 0;
8525 +- break;
8526 +- }
8527 ++ is_free = !mddev_find_locked(dev);
8528 + }
8529 + new->unit = dev;
8530 + new->md_minor = MINOR(dev);
8531 +@@ -5656,7 +5678,7 @@ static int md_alloc(dev_t dev, char *name)
8532 + * writing to /sys/module/md_mod/parameters/new_array.
8533 + */
8534 + static DEFINE_MUTEX(disks_mutex);
8535 +- struct mddev *mddev = mddev_find(dev);
8536 ++ struct mddev *mddev = mddev_find_or_alloc(dev);
8537 + struct gendisk *disk;
8538 + int partitioned;
8539 + int shift;
8540 +@@ -6539,11 +6561,9 @@ static void autorun_devices(int part)
8541 +
8542 + md_probe(dev, NULL, NULL);
8543 + mddev = mddev_find(dev);
8544 +- if (!mddev || !mddev->gendisk) {
8545 +- if (mddev)
8546 +- mddev_put(mddev);
8547 ++ if (!mddev)
8548 + break;
8549 +- }
8550 ++
8551 + if (mddev_lock(mddev))
8552 + pr_warn("md: %s locked, cannot run\n", mdname(mddev));
8553 + else if (mddev->raid_disks || mddev->major_version
8554 +@@ -7837,8 +7857,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
8555 + /* Wait until bdev->bd_disk is definitely gone */
8556 + if (work_pending(&mddev->del_work))
8557 + flush_workqueue(md_misc_wq);
8558 +- /* Then retry the open from the top */
8559 +- return -ERESTARTSYS;
8560 ++ return -EBUSY;
8561 + }
8562 + BUG_ON(mddev != bdev->bd_disk->private_data);
8563 +
8564 +@@ -8168,7 +8187,11 @@ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
8565 + loff_t l = *pos;
8566 + struct mddev *mddev;
8567 +
8568 +- if (l >= 0x10000)
8569 ++ if (l == 0x10000) {
8570 ++ ++*pos;
8571 ++ return (void *)2;
8572 ++ }
8573 ++ if (l > 0x10000)
8574 + return NULL;
8575 + if (!l--)
8576 + /* header */
8577 +@@ -9267,11 +9290,11 @@ void md_check_recovery(struct mddev *mddev)
8578 + }
8579 +
8580 + if (mddev_is_clustered(mddev)) {
8581 +- struct md_rdev *rdev;
8582 ++ struct md_rdev *rdev, *tmp;
8583 + /* kick the device if another node issued a
8584 + * remove disk.
8585 + */
8586 +- rdev_for_each(rdev, mddev) {
8587 ++ rdev_for_each_safe(rdev, tmp, mddev) {
8588 + if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
8589 + rdev->raid_disk < 0)
8590 + md_kick_rdev_from_array(rdev);
8591 +@@ -9588,7 +9611,7 @@ err_wq:
8592 + static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8593 + {
8594 + struct mdp_superblock_1 *sb = page_address(rdev->sb_page);
8595 +- struct md_rdev *rdev2;
8596 ++ struct md_rdev *rdev2, *tmp;
8597 + int role, ret;
8598 + char b[BDEVNAME_SIZE];
8599 +
8600 +@@ -9605,7 +9628,7 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
8601 + }
8602 +
8603 + /* Check for change of roles in the active devices */
8604 +- rdev_for_each(rdev2, mddev) {
8605 ++ rdev_for_each_safe(rdev2, tmp, mddev) {
8606 + if (test_bit(Faulty, &rdev2->flags))
8607 + continue;
8608 +
8609 +diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
8610 +index 21fb16cc5ca1e..e43edb0d76f4b 100644
8611 +--- a/drivers/media/common/saa7146/saa7146_core.c
8612 ++++ b/drivers/media/common/saa7146/saa7146_core.c
8613 +@@ -253,7 +253,7 @@ int saa7146_pgtable_build_single(struct pci_dev *pci, struct saa7146_pgtable *pt
8614 + i, sg_dma_address(list), sg_dma_len(list),
8615 + list->offset);
8616 + */
8617 +- for (p = 0; p * 4096 < list->length; p++, ptr++) {
8618 ++ for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++) {
8619 + *ptr = cpu_to_le32(sg_dma_address(list) + p * 4096);
8620 + nr_pages++;
8621 + }
8622 +diff --git a/drivers/media/common/saa7146/saa7146_video.c b/drivers/media/common/saa7146/saa7146_video.c
8623 +index ccd15b4d4920b..0d1be4042a403 100644
8624 +--- a/drivers/media/common/saa7146/saa7146_video.c
8625 ++++ b/drivers/media/common/saa7146/saa7146_video.c
8626 +@@ -247,9 +247,8 @@ static int saa7146_pgtable_build(struct saa7146_dev *dev, struct saa7146_buf *bu
8627 +
8628 + /* walk all pages, copy all page addresses to ptr1 */
8629 + for (i = 0; i < length; i++, list++) {
8630 +- for (p = 0; p * 4096 < list->length; p++, ptr1++) {
8631 ++ for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr1++)
8632 + *ptr1 = cpu_to_le32(sg_dma_address(list) - list->offset);
8633 +- }
8634 + }
8635 + /*
8636 + ptr1 = pt1->cpu;
8637 +diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
8638 +index ad6d9d564a87e..c120cffb52ad4 100644
8639 +--- a/drivers/media/dvb-frontends/m88ds3103.c
8640 ++++ b/drivers/media/dvb-frontends/m88ds3103.c
8641 +@@ -1904,8 +1904,8 @@ static int m88ds3103_probe(struct i2c_client *client,
8642 +
8643 + dev->dt_client = i2c_new_dummy_device(client->adapter,
8644 + dev->dt_addr);
8645 +- if (!dev->dt_client) {
8646 +- ret = -ENODEV;
8647 ++ if (IS_ERR(dev->dt_client)) {
8648 ++ ret = PTR_ERR(dev->dt_client);
8649 + goto err_kfree;
8650 + }
8651 + }
8652 +diff --git a/drivers/media/i2c/imx219.c b/drivers/media/i2c/imx219.c
8653 +index 0ae66091a6962..4771d0ef2c46f 100644
8654 +--- a/drivers/media/i2c/imx219.c
8655 ++++ b/drivers/media/i2c/imx219.c
8656 +@@ -1026,29 +1026,47 @@ static int imx219_start_streaming(struct imx219 *imx219)
8657 + const struct imx219_reg_list *reg_list;
8658 + int ret;
8659 +
8660 ++ ret = pm_runtime_get_sync(&client->dev);
8661 ++ if (ret < 0) {
8662 ++ pm_runtime_put_noidle(&client->dev);
8663 ++ return ret;
8664 ++ }
8665 ++
8666 + /* Apply default values of current mode */
8667 + reg_list = &imx219->mode->reg_list;
8668 + ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
8669 + if (ret) {
8670 + dev_err(&client->dev, "%s failed to set mode\n", __func__);
8671 +- return ret;
8672 ++ goto err_rpm_put;
8673 + }
8674 +
8675 + ret = imx219_set_framefmt(imx219);
8676 + if (ret) {
8677 + dev_err(&client->dev, "%s failed to set frame format: %d\n",
8678 + __func__, ret);
8679 +- return ret;
8680 ++ goto err_rpm_put;
8681 + }
8682 +
8683 + /* Apply customized values from user */
8684 + ret = __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
8685 + if (ret)
8686 +- return ret;
8687 ++ goto err_rpm_put;
8688 +
8689 + /* set stream on register */
8690 +- return imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
8691 +- IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
8692 ++ ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
8693 ++ IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
8694 ++ if (ret)
8695 ++ goto err_rpm_put;
8696 ++
8697 ++ /* vflip and hflip cannot change during streaming */
8698 ++ __v4l2_ctrl_grab(imx219->vflip, true);
8699 ++ __v4l2_ctrl_grab(imx219->hflip, true);
8700 ++
8701 ++ return 0;
8702 ++
8703 ++err_rpm_put:
8704 ++ pm_runtime_put(&client->dev);
8705 ++ return ret;
8706 + }
8707 +
8708 + static void imx219_stop_streaming(struct imx219 *imx219)
8709 +@@ -1061,12 +1079,16 @@ static void imx219_stop_streaming(struct imx219 *imx219)
8710 + IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
8711 + if (ret)
8712 + dev_err(&client->dev, "%s failed to set stream\n", __func__);
8713 ++
8714 ++ __v4l2_ctrl_grab(imx219->vflip, false);
8715 ++ __v4l2_ctrl_grab(imx219->hflip, false);
8716 ++
8717 ++ pm_runtime_put(&client->dev);
8718 + }
8719 +
8720 + static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
8721 + {
8722 + struct imx219 *imx219 = to_imx219(sd);
8723 +- struct i2c_client *client = v4l2_get_subdevdata(sd);
8724 + int ret = 0;
8725 +
8726 + mutex_lock(&imx219->mutex);
8727 +@@ -1076,36 +1098,23 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
8728 + }
8729 +
8730 + if (enable) {
8731 +- ret = pm_runtime_get_sync(&client->dev);
8732 +- if (ret < 0) {
8733 +- pm_runtime_put_noidle(&client->dev);
8734 +- goto err_unlock;
8735 +- }
8736 +-
8737 + /*
8738 + * Apply default & customized values
8739 + * and then start streaming.
8740 + */
8741 + ret = imx219_start_streaming(imx219);
8742 + if (ret)
8743 +- goto err_rpm_put;
8744 ++ goto err_unlock;
8745 + } else {
8746 + imx219_stop_streaming(imx219);
8747 +- pm_runtime_put(&client->dev);
8748 + }
8749 +
8750 + imx219->streaming = enable;
8751 +
8752 +- /* vflip and hflip cannot change during streaming */
8753 +- __v4l2_ctrl_grab(imx219->vflip, enable);
8754 +- __v4l2_ctrl_grab(imx219->hflip, enable);
8755 +-
8756 + mutex_unlock(&imx219->mutex);
8757 +
8758 + return ret;
8759 +
8760 +-err_rpm_put:
8761 +- pm_runtime_put(&client->dev);
8762 + err_unlock:
8763 + mutex_unlock(&imx219->mutex);
8764 +
8765 +diff --git a/drivers/media/pci/saa7134/saa7134-core.c b/drivers/media/pci/saa7134/saa7134-core.c
8766 +index 391572a6ec76a..efb757d5168a6 100644
8767 +--- a/drivers/media/pci/saa7134/saa7134-core.c
8768 ++++ b/drivers/media/pci/saa7134/saa7134-core.c
8769 +@@ -243,7 +243,7 @@ int saa7134_pgtable_build(struct pci_dev *pci, struct saa7134_pgtable *pt,
8770 +
8771 + ptr = pt->cpu + startpage;
8772 + for (i = 0; i < length; i++, list = sg_next(list)) {
8773 +- for (p = 0; p * 4096 < list->length; p++, ptr++)
8774 ++ for (p = 0; p * 4096 < sg_dma_len(list); p++, ptr++)
8775 + *ptr = cpu_to_le32(sg_dma_address(list) +
8776 + list->offset + p * 4096);
8777 + }
8778 +diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
8779 +index f2c4dadd6a0eb..7bb6babdcade0 100644
8780 +--- a/drivers/media/platform/aspeed-video.c
8781 ++++ b/drivers/media/platform/aspeed-video.c
8782 +@@ -514,8 +514,8 @@ static void aspeed_video_off(struct aspeed_video *video)
8783 + aspeed_video_write(video, VE_INTERRUPT_STATUS, 0xffffffff);
8784 +
8785 + /* Turn off the relevant clocks */
8786 +- clk_disable(video->vclk);
8787 + clk_disable(video->eclk);
8788 ++ clk_disable(video->vclk);
8789 +
8790 + clear_bit(VIDEO_CLOCKS_ON, &video->flags);
8791 + }
8792 +@@ -526,8 +526,8 @@ static void aspeed_video_on(struct aspeed_video *video)
8793 + return;
8794 +
8795 + /* Turn on the relevant clocks */
8796 +- clk_enable(video->eclk);
8797 + clk_enable(video->vclk);
8798 ++ clk_enable(video->eclk);
8799 +
8800 + set_bit(VIDEO_CLOCKS_ON, &video->flags);
8801 + }
8802 +@@ -1719,8 +1719,11 @@ static int aspeed_video_probe(struct platform_device *pdev)
8803 + return rc;
8804 +
8805 + rc = aspeed_video_setup_video(video);
8806 +- if (rc)
8807 ++ if (rc) {
8808 ++ clk_unprepare(video->vclk);
8809 ++ clk_unprepare(video->eclk);
8810 + return rc;
8811 ++ }
8812 +
8813 + return 0;
8814 + }
8815 +diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
8816 +index d5bfd6fff85b4..fd5993b3e6743 100644
8817 +--- a/drivers/media/platform/qcom/venus/core.c
8818 ++++ b/drivers/media/platform/qcom/venus/core.c
8819 +@@ -195,11 +195,11 @@ static int venus_probe(struct platform_device *pdev)
8820 + if (IS_ERR(core->base))
8821 + return PTR_ERR(core->base);
8822 +
8823 +- core->video_path = of_icc_get(dev, "video-mem");
8824 ++ core->video_path = devm_of_icc_get(dev, "video-mem");
8825 + if (IS_ERR(core->video_path))
8826 + return PTR_ERR(core->video_path);
8827 +
8828 +- core->cpucfg_path = of_icc_get(dev, "cpu-cfg");
8829 ++ core->cpucfg_path = devm_of_icc_get(dev, "cpu-cfg");
8830 + if (IS_ERR(core->cpucfg_path))
8831 + return PTR_ERR(core->cpucfg_path);
8832 +
8833 +@@ -334,9 +334,6 @@ static int venus_remove(struct platform_device *pdev)
8834 +
8835 + hfi_destroy(core);
8836 +
8837 +- icc_put(core->video_path);
8838 +- icc_put(core->cpucfg_path);
8839 +-
8840 + v4l2_device_unregister(&core->v4l2_dev);
8841 + mutex_destroy(&core->pm_lock);
8842 + mutex_destroy(&core->lock);
8843 +diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
8844 +index b55de9ab64d8b..3181d0781b613 100644
8845 +--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
8846 ++++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
8847 +@@ -151,8 +151,10 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
8848 + }
8849 +
8850 + subdev = sun6i_video_remote_subdev(video, NULL);
8851 +- if (!subdev)
8852 ++ if (!subdev) {
8853 ++ ret = -EINVAL;
8854 + goto stop_media_pipeline;
8855 ++ }
8856 +
8857 + config.pixelformat = video->fmt.fmt.pix.pixelformat;
8858 + config.code = video->mbus_code;
8859 +diff --git a/drivers/media/test-drivers/vivid/vivid-vid-out.c b/drivers/media/test-drivers/vivid/vivid-vid-out.c
8860 +index ee3446e3217cc..cd6c247547d66 100644
8861 +--- a/drivers/media/test-drivers/vivid/vivid-vid-out.c
8862 ++++ b/drivers/media/test-drivers/vivid/vivid-vid-out.c
8863 +@@ -1025,7 +1025,7 @@ int vivid_vid_out_s_fbuf(struct file *file, void *fh,
8864 + return -EINVAL;
8865 + }
8866 + dev->fbuf_out_flags &= ~(chroma_flags | alpha_flags);
8867 +- dev->fbuf_out_flags = a->flags & (chroma_flags | alpha_flags);
8868 ++ dev->fbuf_out_flags |= a->flags & (chroma_flags | alpha_flags);
8869 + return 0;
8870 + }
8871 +
8872 +diff --git a/drivers/media/tuners/m88rs6000t.c b/drivers/media/tuners/m88rs6000t.c
8873 +index b3505f4024764..8647c50b66e50 100644
8874 +--- a/drivers/media/tuners/m88rs6000t.c
8875 ++++ b/drivers/media/tuners/m88rs6000t.c
8876 +@@ -525,7 +525,7 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
8877 + PGA2_cri = PGA2_GC >> 2;
8878 + PGA2_crf = PGA2_GC & 0x03;
8879 +
8880 +- for (i = 0; i <= RF_GC; i++)
8881 ++ for (i = 0; i <= RF_GC && i < ARRAY_SIZE(RFGS); i++)
8882 + RFG += RFGS[i];
8883 +
8884 + if (RF_GC == 0)
8885 +@@ -537,12 +537,12 @@ static int m88rs6000t_get_rf_strength(struct dvb_frontend *fe, u16 *strength)
8886 + if (RF_GC == 3)
8887 + RFG += 100;
8888 +
8889 +- for (i = 0; i <= IF_GC; i++)
8890 ++ for (i = 0; i <= IF_GC && i < ARRAY_SIZE(IFGS); i++)
8891 + IFG += IFGS[i];
8892 +
8893 + TIAG = TIA_GC * TIA_GS;
8894 +
8895 +- for (i = 0; i <= BB_GC; i++)
8896 ++ for (i = 0; i <= BB_GC && i < ARRAY_SIZE(BBGS); i++)
8897 + BBG += BBGS[i];
8898 +
8899 + PGA2G = PGA2_cri * PGA2_cri_GS + PGA2_crf * PGA2_crf_GS;
8900 +diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
8901 +index 3d8c54b826e99..41f8410d08d65 100644
8902 +--- a/drivers/media/v4l2-core/v4l2-ctrls.c
8903 ++++ b/drivers/media/v4l2-core/v4l2-ctrls.c
8904 +@@ -2356,7 +2356,15 @@ void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl)
8905 + if (hdl == NULL || hdl->buckets == NULL)
8906 + return;
8907 +
8908 +- if (!hdl->req_obj.req && !list_empty(&hdl->requests)) {
8909 ++ /*
8910 ++ * If the main handler is freed and it is used by handler objects in
8911 ++ * outstanding requests, then unbind and put those objects before
8912 ++ * freeing the main handler.
8913 ++ *
8914 ++ * The main handler can be identified by having a NULL ops pointer in
8915 ++ * the request object.
8916 ++ */
8917 ++ if (!hdl->req_obj.ops && !list_empty(&hdl->requests)) {
8918 + struct v4l2_ctrl_handler *req, *next_req;
8919 +
8920 + list_for_each_entry_safe(req, next_req, &hdl->requests, requests) {
8921 +@@ -3402,8 +3410,8 @@ static void v4l2_ctrl_request_unbind(struct media_request_object *obj)
8922 + container_of(obj, struct v4l2_ctrl_handler, req_obj);
8923 + struct v4l2_ctrl_handler *main_hdl = obj->priv;
8924 +
8925 +- list_del_init(&hdl->requests);
8926 + mutex_lock(main_hdl->lock);
8927 ++ list_del_init(&hdl->requests);
8928 + if (hdl->request_is_queued) {
8929 + list_del_init(&hdl->requests_queued);
8930 + hdl->request_is_queued = false;
8931 +@@ -3462,8 +3470,11 @@ static int v4l2_ctrl_request_bind(struct media_request *req,
8932 + if (!ret) {
8933 + ret = media_request_object_bind(req, &req_ops,
8934 + from, false, &hdl->req_obj);
8935 +- if (!ret)
8936 ++ if (!ret) {
8937 ++ mutex_lock(from->lock);
8938 + list_add_tail(&hdl->requests, &from->requests);
8939 ++ mutex_unlock(from->lock);
8940 ++ }
8941 + }
8942 + return ret;
8943 + }
8944 +diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
8945 +index cfa730cfd1453..f80c2ea39ca4c 100644
8946 +--- a/drivers/memory/omap-gpmc.c
8947 ++++ b/drivers/memory/omap-gpmc.c
8948 +@@ -1009,8 +1009,8 @@ EXPORT_SYMBOL(gpmc_cs_request);
8949 +
8950 + void gpmc_cs_free(int cs)
8951 + {
8952 +- struct gpmc_cs_data *gpmc = &gpmc_cs[cs];
8953 +- struct resource *res = &gpmc->mem;
8954 ++ struct gpmc_cs_data *gpmc;
8955 ++ struct resource *res;
8956 +
8957 + spin_lock(&gpmc_mem_lock);
8958 + if (cs >= gpmc_cs_num || cs < 0 || !gpmc_cs_reserved(cs)) {
8959 +@@ -1018,6 +1018,9 @@ void gpmc_cs_free(int cs)
8960 + spin_unlock(&gpmc_mem_lock);
8961 + return;
8962 + }
8963 ++ gpmc = &gpmc_cs[cs];
8964 ++ res = &gpmc->mem;
8965 ++
8966 + gpmc_cs_disable_mem(cs);
8967 + if (res->flags)
8968 + release_resource(res);
8969 +diff --git a/drivers/memory/pl353-smc.c b/drivers/memory/pl353-smc.c
8970 +index 73bd3023202f0..b42804b1801e6 100644
8971 +--- a/drivers/memory/pl353-smc.c
8972 ++++ b/drivers/memory/pl353-smc.c
8973 +@@ -63,7 +63,7 @@
8974 + /* ECC memory config register specific constants */
8975 + #define PL353_SMC_ECC_MEMCFG_MODE_MASK 0xC
8976 + #define PL353_SMC_ECC_MEMCFG_MODE_SHIFT 2
8977 +-#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0xC
8978 ++#define PL353_SMC_ECC_MEMCFG_PGSIZE_MASK 0x3
8979 +
8980 + #define PL353_SMC_DC_UPT_NAND_REGS ((4 << 23) | /* CS: NAND chip */ \
8981 + (2 << 21)) /* UpdateRegs operation */
8982 +diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
8983 +index da0fdb4c75959..1fe6c35b7503e 100644
8984 +--- a/drivers/memory/renesas-rpc-if.c
8985 ++++ b/drivers/memory/renesas-rpc-if.c
8986 +@@ -193,10 +193,10 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
8987 + }
8988 +
8989 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dirmap");
8990 +- rpc->size = resource_size(res);
8991 + rpc->dirmap = devm_ioremap_resource(&pdev->dev, res);
8992 + if (IS_ERR(rpc->dirmap))
8993 + rpc->dirmap = NULL;
8994 ++ rpc->size = resource_size(res);
8995 +
8996 + rpc->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
8997 +
8998 +diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c
8999 +index c5ee4121a4d22..3d230f07eaf21 100644
9000 +--- a/drivers/memory/samsung/exynos5422-dmc.c
9001 ++++ b/drivers/memory/samsung/exynos5422-dmc.c
9002 +@@ -1298,7 +1298,9 @@ static int exynos5_dmc_init_clks(struct exynos5_dmc *dmc)
9003 +
9004 + dmc->curr_volt = target_volt;
9005 +
9006 +- clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
9007 ++ ret = clk_set_parent(dmc->mout_mx_mspll_ccore, dmc->mout_spll);
9008 ++ if (ret)
9009 ++ return ret;
9010 +
9011 + clk_prepare_enable(dmc->fout_bpll);
9012 + clk_prepare_enable(dmc->mout_bpll);
9013 +diff --git a/drivers/mfd/stm32-timers.c b/drivers/mfd/stm32-timers.c
9014 +index add6033591242..44ed2fce03196 100644
9015 +--- a/drivers/mfd/stm32-timers.c
9016 ++++ b/drivers/mfd/stm32-timers.c
9017 +@@ -158,13 +158,18 @@ static const struct regmap_config stm32_timers_regmap_cfg = {
9018 +
9019 + static void stm32_timers_get_arr_size(struct stm32_timers *ddata)
9020 + {
9021 ++ u32 arr;
9022 ++
9023 ++ /* Backup ARR to restore it after getting the maximum value */
9024 ++ regmap_read(ddata->regmap, TIM_ARR, &arr);
9025 ++
9026 + /*
9027 + * Only the available bits will be written so when readback
9028 + * we get the maximum value of auto reload register
9029 + */
9030 + regmap_write(ddata->regmap, TIM_ARR, ~0L);
9031 + regmap_read(ddata->regmap, TIM_ARR, &ddata->max_arr);
9032 +- regmap_write(ddata->regmap, TIM_ARR, 0x0);
9033 ++ regmap_write(ddata->regmap, TIM_ARR, arr);
9034 + }
9035 +
9036 + static int stm32_timers_dma_probe(struct device *dev,
9037 +diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
9038 +index dd65cedf3b125..9d14bf444481b 100644
9039 +--- a/drivers/misc/lis3lv02d/lis3lv02d.c
9040 ++++ b/drivers/misc/lis3lv02d/lis3lv02d.c
9041 +@@ -208,7 +208,7 @@ static int lis3_3dc_rates[16] = {0, 1, 10, 25, 50, 100, 200, 400, 1600, 5000};
9042 + static int lis3_3dlh_rates[4] = {50, 100, 400, 1000};
9043 +
9044 + /* ODR is Output Data Rate */
9045 +-static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
9046 ++static int lis3lv02d_get_odr_index(struct lis3lv02d *lis3)
9047 + {
9048 + u8 ctrl;
9049 + int shift;
9050 +@@ -216,15 +216,23 @@ static int lis3lv02d_get_odr(struct lis3lv02d *lis3)
9051 + lis3->read(lis3, CTRL_REG1, &ctrl);
9052 + ctrl &= lis3->odr_mask;
9053 + shift = ffs(lis3->odr_mask) - 1;
9054 +- return lis3->odrs[(ctrl >> shift)];
9055 ++ return (ctrl >> shift);
9056 + }
9057 +
9058 + static int lis3lv02d_get_pwron_wait(struct lis3lv02d *lis3)
9059 + {
9060 +- int div = lis3lv02d_get_odr(lis3);
9061 ++ int odr_idx = lis3lv02d_get_odr_index(lis3);
9062 ++ int div = lis3->odrs[odr_idx];
9063 +
9064 +- if (WARN_ONCE(div == 0, "device returned spurious data"))
9065 ++ if (div == 0) {
9066 ++ if (odr_idx == 0) {
9067 ++ /* Power-down mode, not sampling no need to sleep */
9068 ++ return 0;
9069 ++ }
9070 ++
9071 ++ dev_err(&lis3->pdev->dev, "Error unknown odrs-index: %d\n", odr_idx);
9072 + return -ENXIO;
9073 ++ }
9074 +
9075 + /* LIS3 power on delay is quite long */
9076 + msleep(lis3->pwron_delay / div);
9077 +@@ -816,9 +824,12 @@ static ssize_t lis3lv02d_rate_show(struct device *dev,
9078 + struct device_attribute *attr, char *buf)
9079 + {
9080 + struct lis3lv02d *lis3 = dev_get_drvdata(dev);
9081 ++ int odr_idx;
9082 +
9083 + lis3lv02d_sysfs_poweron(lis3);
9084 +- return sprintf(buf, "%d\n", lis3lv02d_get_odr(lis3));
9085 ++
9086 ++ odr_idx = lis3lv02d_get_odr_index(lis3);
9087 ++ return sprintf(buf, "%d\n", lis3->odrs[odr_idx]);
9088 + }
9089 +
9090 + static ssize_t lis3lv02d_rate_set(struct device *dev,
9091 +diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
9092 +index 345addd9306de..fa8a7fce4481b 100644
9093 +--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
9094 ++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
9095 +@@ -326,7 +326,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
9096 + bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn)
9097 + {
9098 + int result;
9099 +- struct vmci_notify_bm_set_msg bitmap_set_msg;
9100 ++ struct vmci_notify_bm_set_msg bitmap_set_msg = { };
9101 +
9102 + bitmap_set_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
9103 + VMCI_SET_NOTIFY_BITMAP);
9104 +diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
9105 +index cc8eeb361fcdb..1018dc77269d4 100644
9106 +--- a/drivers/misc/vmw_vmci/vmci_guest.c
9107 ++++ b/drivers/misc/vmw_vmci/vmci_guest.c
9108 +@@ -168,7 +168,7 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
9109 + VMCI_UTIL_NUM_RESOURCES * sizeof(u32);
9110 + struct vmci_datagram *check_msg;
9111 +
9112 +- check_msg = kmalloc(msg_size, GFP_KERNEL);
9113 ++ check_msg = kzalloc(msg_size, GFP_KERNEL);
9114 + if (!check_msg) {
9115 + dev_err(&pdev->dev, "%s: Insufficient memory\n", __func__);
9116 + return -ENOMEM;
9117 +diff --git a/drivers/mtd/maps/physmap-core.c b/drivers/mtd/maps/physmap-core.c
9118 +index 001ed5deb622a..4f63b8430c710 100644
9119 +--- a/drivers/mtd/maps/physmap-core.c
9120 ++++ b/drivers/mtd/maps/physmap-core.c
9121 +@@ -69,8 +69,10 @@ static int physmap_flash_remove(struct platform_device *dev)
9122 + int i, err = 0;
9123 +
9124 + info = platform_get_drvdata(dev);
9125 +- if (!info)
9126 ++ if (!info) {
9127 ++ err = -EINVAL;
9128 + goto out;
9129 ++ }
9130 +
9131 + if (info->cmtd) {
9132 + err = mtd_device_unregister(info->cmtd);
9133 +diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
9134 +index b40f46a43fc66..69fb5dafa9ad6 100644
9135 +--- a/drivers/mtd/mtdchar.c
9136 ++++ b/drivers/mtd/mtdchar.c
9137 +@@ -651,16 +651,12 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
9138 + case MEMGETINFO:
9139 + case MEMREADOOB:
9140 + case MEMREADOOB64:
9141 +- case MEMLOCK:
9142 +- case MEMUNLOCK:
9143 + case MEMISLOCKED:
9144 + case MEMGETOOBSEL:
9145 + case MEMGETBADBLOCK:
9146 +- case MEMSETBADBLOCK:
9147 + case OTPSELECT:
9148 + case OTPGETREGIONCOUNT:
9149 + case OTPGETREGIONINFO:
9150 +- case OTPLOCK:
9151 + case ECCGETLAYOUT:
9152 + case ECCGETSTATS:
9153 + case MTDFILEMODE:
9154 +@@ -671,9 +667,13 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
9155 + /* "dangerous" commands */
9156 + case MEMERASE:
9157 + case MEMERASE64:
9158 ++ case MEMLOCK:
9159 ++ case MEMUNLOCK:
9160 ++ case MEMSETBADBLOCK:
9161 + case MEMWRITEOOB:
9162 + case MEMWRITEOOB64:
9163 + case MEMWRITE:
9164 ++ case OTPLOCK:
9165 + if (!(file->f_mode & FMODE_WRITE))
9166 + return -EPERM;
9167 + break;
9168 +diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
9169 +index b07cbb0661fb1..1c8c407286783 100644
9170 +--- a/drivers/mtd/mtdcore.c
9171 ++++ b/drivers/mtd/mtdcore.c
9172 +@@ -820,6 +820,9 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
9173 +
9174 + /* Prefer parsed partitions over driver-provided fallback */
9175 + ret = parse_mtd_partitions(mtd, types, parser_data);
9176 ++ if (ret == -EPROBE_DEFER)
9177 ++ goto out;
9178 ++
9179 + if (ret > 0)
9180 + ret = 0;
9181 + else if (nr_parts)
9182 +diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
9183 +index c3575b686f796..95d47422bbf20 100644
9184 +--- a/drivers/mtd/mtdpart.c
9185 ++++ b/drivers/mtd/mtdpart.c
9186 +@@ -331,7 +331,7 @@ static int __del_mtd_partitions(struct mtd_info *mtd)
9187 +
9188 + list_for_each_entry_safe(child, next, &mtd->partitions, part.node) {
9189 + if (mtd_has_partitions(child))
9190 +- del_mtd_partitions(child);
9191 ++ __del_mtd_partitions(child);
9192 +
9193 + pr_info("Deleting %s MTD partition\n", child->name);
9194 + ret = del_mtd_device(child);
9195 +diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
9196 +index 2da39ab892869..909b14cc8e55c 100644
9197 +--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
9198 ++++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
9199 +@@ -2688,6 +2688,12 @@ static int brcmnand_attach_chip(struct nand_chip *chip)
9200 +
9201 + ret = brcmstb_choose_ecc_layout(host);
9202 +
9203 ++ /* If OOB is written with ECC enabled it will cause ECC errors */
9204 ++ if (is_hamming_ecc(host->ctrl, &host->hwcfg)) {
9205 ++ chip->ecc.write_oob = brcmnand_write_oob_raw;
9206 ++ chip->ecc.read_oob = brcmnand_read_oob_raw;
9207 ++ }
9208 ++
9209 + return ret;
9210 + }
9211 +
9212 +diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c
9213 +index c88421a1c078d..ce05dd4088e9d 100644
9214 +--- a/drivers/mtd/nand/raw/fsmc_nand.c
9215 ++++ b/drivers/mtd/nand/raw/fsmc_nand.c
9216 +@@ -1078,11 +1078,13 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
9217 + host->read_dma_chan = dma_request_channel(mask, filter, NULL);
9218 + if (!host->read_dma_chan) {
9219 + dev_err(&pdev->dev, "Unable to get read dma channel\n");
9220 ++ ret = -ENODEV;
9221 + goto disable_clk;
9222 + }
9223 + host->write_dma_chan = dma_request_channel(mask, filter, NULL);
9224 + if (!host->write_dma_chan) {
9225 + dev_err(&pdev->dev, "Unable to get write dma channel\n");
9226 ++ ret = -ENODEV;
9227 + goto release_dma_read_chan;
9228 + }
9229 + }
9230 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
9231 +index 31a6210eb5d44..a6658567d55c0 100644
9232 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
9233 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
9234 +@@ -2447,7 +2447,7 @@ static int gpmi_nand_init(struct gpmi_nand_data *this)
9235 + this->bch_geometry.auxiliary_size = 128;
9236 + ret = gpmi_alloc_dma_buffer(this);
9237 + if (ret)
9238 +- goto err_out;
9239 ++ return ret;
9240 +
9241 + nand_controller_init(&this->base);
9242 + this->base.ops = &gpmi_nand_controller_ops;
9243 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
9244 +index dfc17a28a06b9..b99d2e9d1e2c4 100644
9245 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
9246 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
9247 +@@ -2874,7 +2874,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
9248 + struct device *dev = nandc->dev;
9249 + struct device_node *dn = dev->of_node, *child;
9250 + struct qcom_nand_host *host;
9251 +- int ret;
9252 ++ int ret = -ENODEV;
9253 +
9254 + for_each_available_child_of_node(dn, child) {
9255 + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
9256 +@@ -2892,10 +2892,7 @@ static int qcom_probe_nand_devices(struct qcom_nand_controller *nandc)
9257 + list_add_tail(&host->node, &nandc->host_list);
9258 + }
9259 +
9260 +- if (list_empty(&nandc->host_list))
9261 +- return -ENODEV;
9262 +-
9263 +- return 0;
9264 ++ return ret;
9265 + }
9266 +
9267 + /* parse custom DT properties here */
9268 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
9269 +index a59c1f1fb31ed..7ddc2e2e4976a 100644
9270 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
9271 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
9272 +@@ -1731,14 +1731,16 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
9273 +
9274 + cons = rxcmp->rx_cmp_opaque;
9275 + if (unlikely(cons != rxr->rx_next_cons)) {
9276 +- int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
9277 ++ int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
9278 +
9279 + /* 0xffff is forced error, don't print it */
9280 + if (rxr->rx_next_cons != 0xffff)
9281 + netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
9282 + cons, rxr->rx_next_cons);
9283 + bnxt_sched_reset(bp, rxr);
9284 +- return rc1;
9285 ++ if (rc1)
9286 ++ return rc1;
9287 ++ goto next_rx_no_prod_no_len;
9288 + }
9289 + rx_buf = &rxr->rx_buf_ring[cons];
9290 + data = rx_buf->data;
9291 +@@ -9546,7 +9548,9 @@ static ssize_t bnxt_show_temp(struct device *dev,
9292 + if (!rc)
9293 + len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
9294 + mutex_unlock(&bp->hwrm_cmd_lock);
9295 +- return rc ?: len;
9296 ++ if (rc)
9297 ++ return rc;
9298 ++ return len;
9299 + }
9300 + static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
9301 +
9302 +diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
9303 +index e6d4ad99cc387..3f1c189646f4e 100644
9304 +--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
9305 ++++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
9306 +@@ -521,7 +521,7 @@
9307 + #define CN23XX_BAR1_INDEX_OFFSET 3
9308 +
9309 + #define CN23XX_PEM_BAR1_INDEX_REG(port, idx) \
9310 +- (CN23XX_PEM_BAR1_INDEX_START + ((port) << CN23XX_PEM_OFFSET) + \
9311 ++ (CN23XX_PEM_BAR1_INDEX_START + (((u64)port) << CN23XX_PEM_OFFSET) + \
9312 + ((idx) << CN23XX_BAR1_INDEX_OFFSET))
9313 +
9314 + /*############################ DPI #########################*/
9315 +diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
9316 +index 7a141ce32e86c..0ccd5b40ef5c9 100644
9317 +--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
9318 ++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
9319 +@@ -776,7 +776,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
9320 + mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
9321 + mbx.rq.qs_num = qs->vnic_id;
9322 + mbx.rq.rq_num = qidx;
9323 +- mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
9324 ++ mbx.rq.cfg = ((u64)rq->caching << 26) | (rq->cq_qs << 19) |
9325 + (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
9326 + (rq->cont_qs_rbdr_idx << 8) |
9327 + (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
9328 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
9329 +index 83b46440408ba..bde8494215c41 100644
9330 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
9331 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
9332 +@@ -174,31 +174,31 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
9333 + WORD_MASK, f->fs.nat_lip[15] |
9334 + f->fs.nat_lip[14] << 8 |
9335 + f->fs.nat_lip[13] << 16 |
9336 +- f->fs.nat_lip[12] << 24, 1);
9337 ++ (u64)f->fs.nat_lip[12] << 24, 1);
9338 +
9339 + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
9340 + WORD_MASK, f->fs.nat_lip[11] |
9341 + f->fs.nat_lip[10] << 8 |
9342 + f->fs.nat_lip[9] << 16 |
9343 +- f->fs.nat_lip[8] << 24, 1);
9344 ++ (u64)f->fs.nat_lip[8] << 24, 1);
9345 +
9346 + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
9347 + WORD_MASK, f->fs.nat_lip[7] |
9348 + f->fs.nat_lip[6] << 8 |
9349 + f->fs.nat_lip[5] << 16 |
9350 +- f->fs.nat_lip[4] << 24, 1);
9351 ++ (u64)f->fs.nat_lip[4] << 24, 1);
9352 +
9353 + set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
9354 + WORD_MASK, f->fs.nat_lip[3] |
9355 + f->fs.nat_lip[2] << 8 |
9356 + f->fs.nat_lip[1] << 16 |
9357 +- f->fs.nat_lip[0] << 24, 1);
9358 ++ (u64)f->fs.nat_lip[0] << 24, 1);
9359 + } else {
9360 + set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
9361 + WORD_MASK, f->fs.nat_lip[3] |
9362 + f->fs.nat_lip[2] << 8 |
9363 + f->fs.nat_lip[1] << 16 |
9364 +- f->fs.nat_lip[0] << 24, 1);
9365 ++ (u64)f->fs.nat_lip[0] << 25, 1);
9366 + }
9367 + }
9368 +
9369 +@@ -208,25 +208,25 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
9370 + WORD_MASK, f->fs.nat_fip[15] |
9371 + f->fs.nat_fip[14] << 8 |
9372 + f->fs.nat_fip[13] << 16 |
9373 +- f->fs.nat_fip[12] << 24, 1);
9374 ++ (u64)f->fs.nat_fip[12] << 24, 1);
9375 +
9376 + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
9377 + WORD_MASK, f->fs.nat_fip[11] |
9378 + f->fs.nat_fip[10] << 8 |
9379 + f->fs.nat_fip[9] << 16 |
9380 +- f->fs.nat_fip[8] << 24, 1);
9381 ++ (u64)f->fs.nat_fip[8] << 24, 1);
9382 +
9383 + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
9384 + WORD_MASK, f->fs.nat_fip[7] |
9385 + f->fs.nat_fip[6] << 8 |
9386 + f->fs.nat_fip[5] << 16 |
9387 +- f->fs.nat_fip[4] << 24, 1);
9388 ++ (u64)f->fs.nat_fip[4] << 24, 1);
9389 +
9390 + set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
9391 + WORD_MASK, f->fs.nat_fip[3] |
9392 + f->fs.nat_fip[2] << 8 |
9393 + f->fs.nat_fip[1] << 16 |
9394 +- f->fs.nat_fip[0] << 24, 1);
9395 ++ (u64)f->fs.nat_fip[0] << 24, 1);
9396 +
9397 + } else {
9398 + set_tcb_field(adap, f, tid,
9399 +@@ -234,13 +234,13 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
9400 + WORD_MASK, f->fs.nat_fip[3] |
9401 + f->fs.nat_fip[2] << 8 |
9402 + f->fs.nat_fip[1] << 16 |
9403 +- f->fs.nat_fip[0] << 24, 1);
9404 ++ (u64)f->fs.nat_fip[0] << 24, 1);
9405 + }
9406 + }
9407 +
9408 + set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
9409 + (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
9410 +- (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
9411 ++ (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
9412 + 1);
9413 + }
9414 +
9415 +diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
9416 +index 67c436400352f..de7b318422330 100644
9417 +--- a/drivers/net/ethernet/freescale/Makefile
9418 ++++ b/drivers/net/ethernet/freescale/Makefile
9419 +@@ -24,6 +24,4 @@ obj-$(CONFIG_FSL_DPAA_ETH) += dpaa/
9420 +
9421 + obj-$(CONFIG_FSL_DPAA2_ETH) += dpaa2/
9422 +
9423 +-obj-$(CONFIG_FSL_ENETC) += enetc/
9424 +-obj-$(CONFIG_FSL_ENETC_MDIO) += enetc/
9425 +-obj-$(CONFIG_FSL_ENETC_VF) += enetc/
9426 ++obj-y += enetc/
9427 +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
9428 +index a362516a31853..070bef303d184 100644
9429 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
9430 ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
9431 +@@ -3526,7 +3526,6 @@ static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
9432 +
9433 + static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
9434 + {
9435 +- struct hnae3_ring_chain_node vector_ring_chain;
9436 + struct hnae3_handle *h = priv->ae_handle;
9437 + struct hns3_enet_tqp_vector *tqp_vector;
9438 + int ret;
9439 +@@ -3558,6 +3557,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
9440 + }
9441 +
9442 + for (i = 0; i < priv->vector_num; i++) {
9443 ++ struct hnae3_ring_chain_node vector_ring_chain;
9444 ++
9445 + tqp_vector = &priv->tqp_vector[i];
9446 +
9447 + tqp_vector->rx_group.total_bytes = 0;
9448 +diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
9449 +index da4b286d13377..feb69fcd908e3 100644
9450 +--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
9451 ++++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
9452 +@@ -436,7 +436,8 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
9453 + netif_carrier_on(port->dev);
9454 + if (!delayed_work_pending(caching_dw))
9455 + queue_delayed_work(prestera_wq, caching_dw, 0);
9456 +- } else {
9457 ++ } else if (netif_running(port->dev) &&
9458 ++ netif_carrier_ok(port->dev)) {
9459 + netif_carrier_off(port->dev);
9460 + if (delayed_work_pending(caching_dw))
9461 + cancel_delayed_work(caching_dw);
9462 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
9463 +index cc67366495b09..bed154e9a1ef9 100644
9464 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
9465 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
9466 +@@ -850,7 +850,7 @@ mlx5_fpga_ipsec_release_sa_ctx(struct mlx5_fpga_ipsec_sa_ctx *sa_ctx)
9467 + return;
9468 + }
9469 +
9470 +- if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action &
9471 ++ if (sa_ctx->fpga_xfrm->accel_xfrm.attrs.action ==
9472 + MLX5_ACCEL_ESP_ACTION_DECRYPT)
9473 + ida_simple_remove(&fipsec->halloc, sa_ctx->sa_handle);
9474 +
9475 +diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
9476 +index 97d2b03208de0..7a8187458724d 100644
9477 +--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
9478 ++++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
9479 +@@ -364,6 +364,7 @@ int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
9480 +
9481 + attrs.split = eth_port.is_split;
9482 + attrs.splittable = !attrs.split;
9483 ++ attrs.lanes = eth_port.port_lanes;
9484 + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
9485 + attrs.phys.port_number = eth_port.label_port;
9486 + attrs.phys.split_subport_number = eth_port.label_subport;
9487 +diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
9488 +index 117188e3c7de2..87b8c032195d0 100644
9489 +--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
9490 ++++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
9491 +@@ -1437,6 +1437,7 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
9492 + {
9493 + struct emac_tpd tpd;
9494 + u32 prod_idx;
9495 ++ int len;
9496 +
9497 + memset(&tpd, 0, sizeof(tpd));
9498 +
9499 +@@ -1456,9 +1457,10 @@ netdev_tx_t emac_mac_tx_buf_send(struct emac_adapter *adpt,
9500 + if (skb_network_offset(skb) != ETH_HLEN)
9501 + TPD_TYP_SET(&tpd, 1);
9502 +
9503 ++ len = skb->len;
9504 + emac_tx_fill_tpd(adpt, tx_q, skb, &tpd);
9505 +
9506 +- netdev_sent_queue(adpt->netdev, skb->len);
9507 ++ netdev_sent_queue(adpt->netdev, len);
9508 +
9509 + /* Make sure the are enough free descriptors to hold one
9510 + * maximum-sized SKB. We need one desc for each fragment,
9511 +diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
9512 +index bd30505fbc57a..f96eed67e1a2b 100644
9513 +--- a/drivers/net/ethernet/renesas/ravb_main.c
9514 ++++ b/drivers/net/ethernet/renesas/ravb_main.c
9515 +@@ -911,31 +911,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
9516 + int q = napi - priv->napi;
9517 + int mask = BIT(q);
9518 + int quota = budget;
9519 +- u32 ris0, tis;
9520 +
9521 +- for (;;) {
9522 +- tis = ravb_read(ndev, TIS);
9523 +- ris0 = ravb_read(ndev, RIS0);
9524 +- if (!((ris0 & mask) || (tis & mask)))
9525 +- break;
9526 ++ /* Processing RX Descriptor Ring */
9527 ++ /* Clear RX interrupt */
9528 ++ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
9529 ++ if (ravb_rx(ndev, &quota, q))
9530 ++ goto out;
9531 +
9532 +- /* Processing RX Descriptor Ring */
9533 +- if (ris0 & mask) {
9534 +- /* Clear RX interrupt */
9535 +- ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
9536 +- if (ravb_rx(ndev, &quota, q))
9537 +- goto out;
9538 +- }
9539 +- /* Processing TX Descriptor Ring */
9540 +- if (tis & mask) {
9541 +- spin_lock_irqsave(&priv->lock, flags);
9542 +- /* Clear TX interrupt */
9543 +- ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
9544 +- ravb_tx_free(ndev, q, true);
9545 +- netif_wake_subqueue(ndev, q);
9546 +- spin_unlock_irqrestore(&priv->lock, flags);
9547 +- }
9548 +- }
9549 ++ /* Processing RX Descriptor Ring */
9550 ++ spin_lock_irqsave(&priv->lock, flags);
9551 ++ /* Clear TX interrupt */
9552 ++ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
9553 ++ ravb_tx_free(ndev, q, true);
9554 ++ netif_wake_subqueue(ndev, q);
9555 ++ spin_unlock_irqrestore(&priv->lock, flags);
9556 +
9557 + napi_complete(napi);
9558 +
9559 +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
9560 +index da6886dcac37c..4fa72b573c172 100644
9561 +--- a/drivers/net/ethernet/sfc/ef10.c
9562 ++++ b/drivers/net/ethernet/sfc/ef10.c
9563 +@@ -2928,8 +2928,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
9564 +
9565 + /* Get the transmit queue */
9566 + tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
9567 +- tx_queue = efx_channel_get_tx_queue(channel,
9568 +- tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
9569 ++ tx_queue = channel->tx_queue + (tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
9570 +
9571 + if (!tx_queue->timestamping) {
9572 + /* Transmit completion */
9573 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9574 +index 6012eadae4604..5b9478dffe103 100644
9575 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9576 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
9577 +@@ -2727,8 +2727,15 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
9578 +
9579 + /* Enable TSO */
9580 + if (priv->tso) {
9581 +- for (chan = 0; chan < tx_cnt; chan++)
9582 ++ for (chan = 0; chan < tx_cnt; chan++) {
9583 ++ struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
9584 ++
9585 ++ /* TSO and TBS cannot co-exist */
9586 ++ if (tx_q->tbs & STMMAC_TBS_AVAIL)
9587 ++ continue;
9588 ++
9589 + stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
9590 ++ }
9591 + }
9592 +
9593 + /* Enable Split Header */
9594 +@@ -2820,9 +2827,8 @@ static int stmmac_open(struct net_device *dev)
9595 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
9596 + int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
9597 +
9598 ++ /* Setup per-TXQ tbs flag before TX descriptor alloc */
9599 + tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
9600 +- if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan))
9601 +- tx_q->tbs &= ~STMMAC_TBS_AVAIL;
9602 + }
9603 +
9604 + ret = alloc_dma_desc_resources(priv);
9605 +diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
9606 +index c7031e1960d4a..03055c96f0760 100644
9607 +--- a/drivers/net/ethernet/ti/davinci_emac.c
9608 ++++ b/drivers/net/ethernet/ti/davinci_emac.c
9609 +@@ -169,11 +169,11 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
9610 + /* EMAC mac_status register */
9611 + #define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
9612 + #define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
9613 +-#define EMAC_MACSTATUS_TXERRCH_MASK (0x7)
9614 ++#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000)
9615 + #define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
9616 + #define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
9617 + #define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
9618 +-#define EMAC_MACSTATUS_RXERRCH_MASK (0x7)
9619 ++#define EMAC_MACSTATUS_RXERRCH_MASK (0x700)
9620 + #define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
9621 +
9622 + /* EMAC RX register masks */
9623 +diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
9624 +index 2e52029235104..403358f2c8536 100644
9625 +--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
9626 ++++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
9627 +@@ -1086,7 +1086,7 @@ static int init_queues(struct port *port)
9628 + int i;
9629 +
9630 + if (!ports_open) {
9631 +- dma_pool = dma_pool_create(DRV_NAME, port->netdev->dev.parent,
9632 ++ dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
9633 + POOL_ALLOC_SIZE, 32, 0);
9634 + if (!dma_pool)
9635 + return -ENOMEM;
9636 +@@ -1436,6 +1436,9 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
9637 + ndev->netdev_ops = &ixp4xx_netdev_ops;
9638 + ndev->ethtool_ops = &ixp4xx_ethtool_ops;
9639 + ndev->tx_queue_len = 100;
9640 ++ /* Inherit the DMA masks from the platform device */
9641 ++ ndev->dev.dma_mask = dev->dma_mask;
9642 ++ ndev->dev.coherent_dma_mask = dev->coherent_dma_mask;
9643 +
9644 + netif_napi_add(ndev, &port->napi, eth_poll, NAPI_WEIGHT);
9645 +
9646 +diff --git a/drivers/net/fddi/Kconfig b/drivers/net/fddi/Kconfig
9647 +index f722079dfb6ae..f99c1048c97e3 100644
9648 +--- a/drivers/net/fddi/Kconfig
9649 ++++ b/drivers/net/fddi/Kconfig
9650 +@@ -40,17 +40,20 @@ config DEFXX
9651 +
9652 + config DEFXX_MMIO
9653 + bool
9654 +- prompt "Use MMIO instead of PIO" if PCI || EISA
9655 ++ prompt "Use MMIO instead of IOP" if PCI || EISA
9656 + depends on DEFXX
9657 +- default n if PCI || EISA
9658 ++ default n if EISA
9659 + default y
9660 + help
9661 + This instructs the driver to use EISA or PCI memory-mapped I/O
9662 +- (MMIO) as appropriate instead of programmed I/O ports (PIO).
9663 ++ (MMIO) as appropriate instead of programmed I/O ports (IOP).
9664 + Enabling this gives an improvement in processing time in parts
9665 +- of the driver, but it may cause problems with EISA (DEFEA)
9666 +- adapters. TURBOchannel does not have the concept of I/O ports,
9667 +- so MMIO is always used for these (DEFTA) adapters.
9668 ++ of the driver, but it requires a memory window to be configured
9669 ++ for EISA (DEFEA) adapters that may not always be available.
9670 ++ Conversely some PCIe host bridges do not support IOP, so MMIO
9671 ++ may be required to access PCI (DEFPA) adapters on downstream PCI
9672 ++ buses with some systems. TURBOchannel does not have the concept
9673 ++ of I/O ports, so MMIO is always used for these (DEFTA) adapters.
9674 +
9675 + If unsure, say N.
9676 +
9677 +diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
9678 +index 077c68498f048..c7ce6d5491afc 100644
9679 +--- a/drivers/net/fddi/defxx.c
9680 ++++ b/drivers/net/fddi/defxx.c
9681 +@@ -495,6 +495,25 @@ static const struct net_device_ops dfx_netdev_ops = {
9682 + .ndo_set_mac_address = dfx_ctl_set_mac_address,
9683 + };
9684 +
9685 ++static void dfx_register_res_alloc_err(const char *print_name, bool mmio,
9686 ++ bool eisa)
9687 ++{
9688 ++ pr_err("%s: Cannot use %s, no address set, aborting\n",
9689 ++ print_name, mmio ? "MMIO" : "I/O");
9690 ++ pr_err("%s: Recompile driver with \"CONFIG_DEFXX_MMIO=%c\"\n",
9691 ++ print_name, mmio ? 'n' : 'y');
9692 ++ if (eisa && mmio)
9693 ++ pr_err("%s: Or run ECU and set adapter's MMIO location\n",
9694 ++ print_name);
9695 ++}
9696 ++
9697 ++static void dfx_register_res_err(const char *print_name, bool mmio,
9698 ++ unsigned long start, unsigned long len)
9699 ++{
9700 ++ pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, aborting\n",
9701 ++ print_name, mmio ? "MMIO" : "I/O", len, start);
9702 ++}
9703 ++
9704 + /*
9705 + * ================
9706 + * = dfx_register =
9707 +@@ -568,15 +587,12 @@ static int dfx_register(struct device *bdev)
9708 + dev_set_drvdata(bdev, dev);
9709 +
9710 + dfx_get_bars(bdev, bar_start, bar_len);
9711 +- if (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0) {
9712 +- pr_err("%s: Cannot use MMIO, no address set, aborting\n",
9713 +- print_name);
9714 +- pr_err("%s: Run ECU and set adapter's MMIO location\n",
9715 +- print_name);
9716 +- pr_err("%s: Or recompile driver with \"CONFIG_DEFXX_MMIO=n\""
9717 +- "\n", print_name);
9718 ++ if (bar_len[0] == 0 ||
9719 ++ (dfx_bus_eisa && dfx_use_mmio && bar_start[0] == 0)) {
9720 ++ dfx_register_res_alloc_err(print_name, dfx_use_mmio,
9721 ++ dfx_bus_eisa);
9722 + err = -ENXIO;
9723 +- goto err_out;
9724 ++ goto err_out_disable;
9725 + }
9726 +
9727 + if (dfx_use_mmio)
9728 +@@ -585,18 +601,16 @@ static int dfx_register(struct device *bdev)
9729 + else
9730 + region = request_region(bar_start[0], bar_len[0], print_name);
9731 + if (!region) {
9732 +- pr_err("%s: Cannot reserve %s resource 0x%lx @ 0x%lx, "
9733 +- "aborting\n", dfx_use_mmio ? "MMIO" : "I/O", print_name,
9734 +- (long)bar_len[0], (long)bar_start[0]);
9735 ++ dfx_register_res_err(print_name, dfx_use_mmio,
9736 ++ bar_start[0], bar_len[0]);
9737 + err = -EBUSY;
9738 + goto err_out_disable;
9739 + }
9740 + if (bar_start[1] != 0) {
9741 + region = request_region(bar_start[1], bar_len[1], print_name);
9742 + if (!region) {
9743 +- pr_err("%s: Cannot reserve I/O resource "
9744 +- "0x%lx @ 0x%lx, aborting\n", print_name,
9745 +- (long)bar_len[1], (long)bar_start[1]);
9746 ++ dfx_register_res_err(print_name, 0,
9747 ++ bar_start[1], bar_len[1]);
9748 + err = -EBUSY;
9749 + goto err_out_csr_region;
9750 + }
9751 +@@ -604,9 +618,8 @@ static int dfx_register(struct device *bdev)
9752 + if (bar_start[2] != 0) {
9753 + region = request_region(bar_start[2], bar_len[2], print_name);
9754 + if (!region) {
9755 +- pr_err("%s: Cannot reserve I/O resource "
9756 +- "0x%lx @ 0x%lx, aborting\n", print_name,
9757 +- (long)bar_len[2], (long)bar_start[2]);
9758 ++ dfx_register_res_err(print_name, 0,
9759 ++ bar_start[2], bar_len[2]);
9760 + err = -EBUSY;
9761 + goto err_out_bh_region;
9762 + }
9763 +diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
9764 +index 11864ac101b8d..5ddb2dbb8572b 100644
9765 +--- a/drivers/net/geneve.c
9766 ++++ b/drivers/net/geneve.c
9767 +@@ -890,7 +890,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
9768 + __be16 sport;
9769 + int err;
9770 +
9771 +- if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
9772 ++ if (!pskb_inet_may_pull(skb))
9773 + return -EINVAL;
9774 +
9775 + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
9776 +@@ -987,7 +987,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
9777 + __be16 sport;
9778 + int err;
9779 +
9780 +- if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr)))
9781 ++ if (!pskb_inet_may_pull(skb))
9782 + return -EINVAL;
9783 +
9784 + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
9785 +diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c
9786 +index b7875b36097fe..574a8bca1ec46 100644
9787 +--- a/drivers/net/phy/intel-xway.c
9788 ++++ b/drivers/net/phy/intel-xway.c
9789 +@@ -11,6 +11,18 @@
9790 +
9791 + #define XWAY_MDIO_IMASK 0x19 /* interrupt mask */
9792 + #define XWAY_MDIO_ISTAT 0x1A /* interrupt status */
9793 ++#define XWAY_MDIO_LED 0x1B /* led control */
9794 ++
9795 ++/* bit 15:12 are reserved */
9796 ++#define XWAY_MDIO_LED_LED3_EN BIT(11) /* Enable the integrated function of LED3 */
9797 ++#define XWAY_MDIO_LED_LED2_EN BIT(10) /* Enable the integrated function of LED2 */
9798 ++#define XWAY_MDIO_LED_LED1_EN BIT(9) /* Enable the integrated function of LED1 */
9799 ++#define XWAY_MDIO_LED_LED0_EN BIT(8) /* Enable the integrated function of LED0 */
9800 ++/* bit 7:4 are reserved */
9801 ++#define XWAY_MDIO_LED_LED3_DA BIT(3) /* Direct Access to LED3 */
9802 ++#define XWAY_MDIO_LED_LED2_DA BIT(2) /* Direct Access to LED2 */
9803 ++#define XWAY_MDIO_LED_LED1_DA BIT(1) /* Direct Access to LED1 */
9804 ++#define XWAY_MDIO_LED_LED0_DA BIT(0) /* Direct Access to LED0 */
9805 +
9806 + #define XWAY_MDIO_INIT_WOL BIT(15) /* Wake-On-LAN */
9807 + #define XWAY_MDIO_INIT_MSRE BIT(14)
9808 +@@ -159,6 +171,15 @@ static int xway_gphy_config_init(struct phy_device *phydev)
9809 + /* Clear all pending interrupts */
9810 + phy_read(phydev, XWAY_MDIO_ISTAT);
9811 +
9812 ++ /* Ensure that integrated led function is enabled for all leds */
9813 ++ err = phy_write(phydev, XWAY_MDIO_LED,
9814 ++ XWAY_MDIO_LED_LED0_EN |
9815 ++ XWAY_MDIO_LED_LED1_EN |
9816 ++ XWAY_MDIO_LED_LED2_EN |
9817 ++ XWAY_MDIO_LED_LED3_EN);
9818 ++ if (err)
9819 ++ return err;
9820 ++
9821 + phy_write_mmd(phydev, MDIO_MMD_VEND2, XWAY_MMD_LEDCH,
9822 + XWAY_MMD_LEDCH_NACS_NONE |
9823 + XWAY_MMD_LEDCH_SBF_F02HZ |
9824 +diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
9825 +index 823a89354466d..91616182c311f 100644
9826 +--- a/drivers/net/phy/marvell.c
9827 ++++ b/drivers/net/phy/marvell.c
9828 +@@ -861,22 +861,28 @@ static int m88e1111_get_downshift(struct phy_device *phydev, u8 *data)
9829 +
9830 + static int m88e1111_set_downshift(struct phy_device *phydev, u8 cnt)
9831 + {
9832 +- int val;
9833 ++ int val, err;
9834 +
9835 + if (cnt > MII_M1111_PHY_EXT_CR_DOWNSHIFT_MAX)
9836 + return -E2BIG;
9837 +
9838 +- if (!cnt)
9839 +- return phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
9840 +- MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
9841 ++ if (!cnt) {
9842 ++ err = phy_clear_bits(phydev, MII_M1111_PHY_EXT_CR,
9843 ++ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN);
9844 ++ } else {
9845 ++ val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
9846 ++ val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
9847 +
9848 +- val = MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN;
9849 +- val |= FIELD_PREP(MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK, cnt - 1);
9850 ++ err = phy_modify(phydev, MII_M1111_PHY_EXT_CR,
9851 ++ MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
9852 ++ MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
9853 ++ val);
9854 ++ }
9855 +
9856 +- return phy_modify(phydev, MII_M1111_PHY_EXT_CR,
9857 +- MII_M1111_PHY_EXT_CR_DOWNSHIFT_EN |
9858 +- MII_M1111_PHY_EXT_CR_DOWNSHIFT_MASK,
9859 +- val);
9860 ++ if (err < 0)
9861 ++ return err;
9862 ++
9863 ++ return genphy_soft_reset(phydev);
9864 + }
9865 +
9866 + static int m88e1111_get_tunable(struct phy_device *phydev,
9867 +@@ -919,22 +925,28 @@ static int m88e1011_get_downshift(struct phy_device *phydev, u8 *data)
9868 +
9869 + static int m88e1011_set_downshift(struct phy_device *phydev, u8 cnt)
9870 + {
9871 +- int val;
9872 ++ int val, err;
9873 +
9874 + if (cnt > MII_M1011_PHY_SCR_DOWNSHIFT_MAX)
9875 + return -E2BIG;
9876 +
9877 +- if (!cnt)
9878 +- return phy_clear_bits(phydev, MII_M1011_PHY_SCR,
9879 +- MII_M1011_PHY_SCR_DOWNSHIFT_EN);
9880 ++ if (!cnt) {
9881 ++ err = phy_clear_bits(phydev, MII_M1011_PHY_SCR,
9882 ++ MII_M1011_PHY_SCR_DOWNSHIFT_EN);
9883 ++ } else {
9884 ++ val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
9885 ++ val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
9886 +
9887 +- val = MII_M1011_PHY_SCR_DOWNSHIFT_EN;
9888 +- val |= FIELD_PREP(MII_M1011_PHY_SCR_DOWNSHIFT_MASK, cnt - 1);
9889 ++ err = phy_modify(phydev, MII_M1011_PHY_SCR,
9890 ++ MII_M1011_PHY_SCR_DOWNSHIFT_EN |
9891 ++ MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
9892 ++ val);
9893 ++ }
9894 +
9895 +- return phy_modify(phydev, MII_M1011_PHY_SCR,
9896 +- MII_M1011_PHY_SCR_DOWNSHIFT_EN |
9897 +- MII_M1011_PHY_SCR_DOWNSHIFT_MASK,
9898 +- val);
9899 ++ if (err < 0)
9900 ++ return err;
9901 ++
9902 ++ return genphy_soft_reset(phydev);
9903 + }
9904 +
9905 + static int m88e1011_get_tunable(struct phy_device *phydev,
9906 +diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
9907 +index 10722fed666de..caf7291ffaf83 100644
9908 +--- a/drivers/net/phy/smsc.c
9909 ++++ b/drivers/net/phy/smsc.c
9910 +@@ -152,10 +152,13 @@ static int lan87xx_config_aneg(struct phy_device *phydev)
9911 + return genphy_config_aneg(phydev);
9912 + }
9913 +
9914 +-static int lan87xx_config_aneg_ext(struct phy_device *phydev)
9915 ++static int lan95xx_config_aneg_ext(struct phy_device *phydev)
9916 + {
9917 + int rc;
9918 +
9919 ++ if (phydev->phy_id != 0x0007c0f0) /* not (LAN9500A or LAN9505A) */
9920 ++ return lan87xx_config_aneg(phydev);
9921 ++
9922 + /* Extend Manual AutoMDIX timer */
9923 + rc = phy_read(phydev, PHY_EDPD_CONFIG);
9924 + if (rc < 0)
9925 +@@ -408,7 +411,7 @@ static struct phy_driver smsc_phy_driver[] = {
9926 + .read_status = lan87xx_read_status,
9927 + .config_init = smsc_phy_config_init,
9928 + .soft_reset = smsc_phy_reset,
9929 +- .config_aneg = lan87xx_config_aneg_ext,
9930 ++ .config_aneg = lan95xx_config_aneg_ext,
9931 +
9932 + /* IRQ related */
9933 + .ack_interrupt = smsc_phy_ack_interrupt,
9934 +diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
9935 +index 857912ae84d71..409e5a7ad8e26 100644
9936 +--- a/drivers/net/wan/hdlc_fr.c
9937 ++++ b/drivers/net/wan/hdlc_fr.c
9938 +@@ -415,7 +415,7 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
9939 +
9940 + if (pad > 0) { /* Pad the frame with zeros */
9941 + if (__skb_pad(skb, pad, false))
9942 +- goto out;
9943 ++ goto drop;
9944 + skb_put(skb, pad);
9945 + }
9946 + }
9947 +@@ -448,9 +448,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
9948 + return NETDEV_TX_OK;
9949 +
9950 + drop:
9951 +- kfree_skb(skb);
9952 +-out:
9953 + dev->stats.tx_dropped++;
9954 ++ kfree_skb(skb);
9955 + return NETDEV_TX_OK;
9956 + }
9957 +
9958 +diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
9959 +index 605c01fb73f15..f6562a343cb4e 100644
9960 +--- a/drivers/net/wan/lapbether.c
9961 ++++ b/drivers/net/wan/lapbether.c
9962 +@@ -51,6 +51,8 @@ struct lapbethdev {
9963 + struct list_head node;
9964 + struct net_device *ethdev; /* link to ethernet device */
9965 + struct net_device *axdev; /* lapbeth device (lapb#) */
9966 ++ bool up;
9967 ++ spinlock_t up_lock; /* Protects "up" */
9968 + };
9969 +
9970 + static LIST_HEAD(lapbeth_devices);
9971 +@@ -98,8 +100,9 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
9972 + rcu_read_lock();
9973 + lapbeth = lapbeth_get_x25_dev(dev);
9974 + if (!lapbeth)
9975 +- goto drop_unlock;
9976 +- if (!netif_running(lapbeth->axdev))
9977 ++ goto drop_unlock_rcu;
9978 ++ spin_lock_bh(&lapbeth->up_lock);
9979 ++ if (!lapbeth->up)
9980 + goto drop_unlock;
9981 +
9982 + len = skb->data[0] + skb->data[1] * 256;
9983 +@@ -114,11 +117,14 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
9984 + goto drop_unlock;
9985 + }
9986 + out:
9987 ++ spin_unlock_bh(&lapbeth->up_lock);
9988 + rcu_read_unlock();
9989 + return 0;
9990 + drop_unlock:
9991 + kfree_skb(skb);
9992 + goto out;
9993 ++drop_unlock_rcu:
9994 ++ rcu_read_unlock();
9995 + drop:
9996 + kfree_skb(skb);
9997 + return 0;
9998 +@@ -148,13 +154,11 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
9999 + static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
10000 + struct net_device *dev)
10001 + {
10002 ++ struct lapbethdev *lapbeth = netdev_priv(dev);
10003 + int err;
10004 +
10005 +- /*
10006 +- * Just to be *really* sure not to send anything if the interface
10007 +- * is down, the ethernet device may have gone.
10008 +- */
10009 +- if (!netif_running(dev))
10010 ++ spin_lock_bh(&lapbeth->up_lock);
10011 ++ if (!lapbeth->up)
10012 + goto drop;
10013 +
10014 + /* There should be a pseudo header of 1 byte added by upper layers.
10015 +@@ -185,6 +189,7 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
10016 + goto drop;
10017 + }
10018 + out:
10019 ++ spin_unlock_bh(&lapbeth->up_lock);
10020 + return NETDEV_TX_OK;
10021 + drop:
10022 + kfree_skb(skb);
10023 +@@ -276,6 +281,7 @@ static const struct lapb_register_struct lapbeth_callbacks = {
10024 + */
10025 + static int lapbeth_open(struct net_device *dev)
10026 + {
10027 ++ struct lapbethdev *lapbeth = netdev_priv(dev);
10028 + int err;
10029 +
10030 + if ((err = lapb_register(dev, &lapbeth_callbacks)) != LAPB_OK) {
10031 +@@ -283,13 +289,22 @@ static int lapbeth_open(struct net_device *dev)
10032 + return -ENODEV;
10033 + }
10034 +
10035 ++ spin_lock_bh(&lapbeth->up_lock);
10036 ++ lapbeth->up = true;
10037 ++ spin_unlock_bh(&lapbeth->up_lock);
10038 ++
10039 + return 0;
10040 + }
10041 +
10042 + static int lapbeth_close(struct net_device *dev)
10043 + {
10044 ++ struct lapbethdev *lapbeth = netdev_priv(dev);
10045 + int err;
10046 +
10047 ++ spin_lock_bh(&lapbeth->up_lock);
10048 ++ lapbeth->up = false;
10049 ++ spin_unlock_bh(&lapbeth->up_lock);
10050 ++
10051 + if ((err = lapb_unregister(dev)) != LAPB_OK)
10052 + pr_err("lapb_unregister error: %d\n", err);
10053 +
10054 +@@ -347,6 +362,9 @@ static int lapbeth_new_device(struct net_device *dev)
10055 + dev_hold(dev);
10056 + lapbeth->ethdev = dev;
10057 +
10058 ++ lapbeth->up = false;
10059 ++ spin_lock_init(&lapbeth->up_lock);
10060 ++
10061 + rc = -EIO;
10062 + if (register_netdevice(ndev))
10063 + goto fail;
10064 +diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
10065 +index 31df6dd04bf6f..540dd59112a5c 100644
10066 +--- a/drivers/net/wireless/ath/ath10k/htc.c
10067 ++++ b/drivers/net/wireless/ath/ath10k/htc.c
10068 +@@ -665,7 +665,7 @@ static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
10069 +
10070 + ath10k_dbg(ar, ATH10K_DBG_HTC,
10071 + "bundle tx status %d eid %d req count %d count %d len %d\n",
10072 +- ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, bundle_skb->len);
10073 ++ ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
10074 + return ret;
10075 + }
10076 +
10077 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
10078 +index e7072fc4f487a..4f2fbc610d798 100644
10079 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
10080 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
10081 +@@ -592,6 +592,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
10082 + GFP_ATOMIC
10083 + );
10084 + break;
10085 ++ default:
10086 ++ kfree(tb);
10087 ++ return;
10088 + }
10089 +
10090 + exit:
10091 +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
10092 +index db0c6fa9c9dc4..ff61ae34ecdf0 100644
10093 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
10094 ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
10095 +@@ -246,7 +246,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
10096 + if (unlikely(r)) {
10097 + ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
10098 + reg_offset, r);
10099 +- return -EIO;
10100 ++ return -1;
10101 + }
10102 +
10103 + return be32_to_cpu(val);
10104 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
10105 +index 6609ce122e6e5..c86faebbc4594 100644
10106 +--- a/drivers/net/wireless/ath/ath9k/hw.c
10107 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
10108 +@@ -287,7 +287,7 @@ static bool ath9k_hw_read_revisions(struct ath_hw *ah)
10109 +
10110 + srev = REG_READ(ah, AR_SREV);
10111 +
10112 +- if (srev == -EIO) {
10113 ++ if (srev == -1) {
10114 + ath_err(ath9k_hw_common(ah),
10115 + "Failed to read SREV register");
10116 + return false;
10117 +diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
10118 +index a0cf78c418ac9..903de34028efb 100644
10119 +--- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
10120 ++++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
10121 +@@ -633,8 +633,10 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
10122 + }
10123 +
10124 + if (ext->alg != IW_ENCODE_ALG_NONE) {
10125 +- memcpy(sec.keys[idx], ext->key, ext->key_len);
10126 +- sec.key_sizes[idx] = ext->key_len;
10127 ++ int key_len = clamp_val(ext->key_len, 0, SCM_KEY_LEN);
10128 ++
10129 ++ memcpy(sec.keys[idx], ext->key, key_len);
10130 ++ sec.key_sizes[idx] = key_len;
10131 + sec.flags |= (1 << idx);
10132 + if (ext->alg == IW_ENCODE_ALG_WEP) {
10133 + sec.encode_alg[idx] = SEC_ALG_WEP;
10134 +diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
10135 +index 23efd7075df6a..27b7d4b779e0b 100644
10136 +--- a/drivers/net/wireless/marvell/mwl8k.c
10137 ++++ b/drivers/net/wireless/marvell/mwl8k.c
10138 +@@ -1469,6 +1469,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
10139 + txq->skb = kcalloc(MWL8K_TX_DESCS, sizeof(*txq->skb), GFP_KERNEL);
10140 + if (txq->skb == NULL) {
10141 + pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
10142 ++ txq->txd = NULL;
10143 + return -ENOMEM;
10144 + }
10145 +
10146 +diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
10147 +index 665a03ebf9efd..0fdfead45c77c 100644
10148 +--- a/drivers/net/wireless/mediatek/mt76/dma.c
10149 ++++ b/drivers/net/wireless/mediatek/mt76/dma.c
10150 +@@ -318,7 +318,7 @@ mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
10151 + struct sk_buff *skb, u32 tx_info)
10152 + {
10153 + struct mt76_queue *q = dev->q_tx[qid];
10154 +- struct mt76_queue_buf buf;
10155 ++ struct mt76_queue_buf buf = {};
10156 + dma_addr_t addr;
10157 +
10158 + if (q->queued + 1 >= q->ndesc - 1)
10159 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
10160 +index f1f954ff46856..5795e44f8a529 100644
10161 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
10162 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
10163 +@@ -688,7 +688,7 @@ mt7615_txp_skb_unmap_fw(struct mt76_dev *dev, struct mt7615_fw_txp *txp)
10164 + {
10165 + int i;
10166 +
10167 +- for (i = 1; i < txp->nbuf; i++)
10168 ++ for (i = 0; i < txp->nbuf; i++)
10169 + dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
10170 + le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
10171 + }
10172 +@@ -1817,10 +1817,8 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
10173 + int i, aggr;
10174 + u32 val, val2;
10175 +
10176 +- memset(mib, 0, sizeof(*mib));
10177 +-
10178 +- mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
10179 +- MT_MIB_SDR3_FCS_ERR_MASK);
10180 ++ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
10181 ++ MT_MIB_SDR3_FCS_ERR_MASK);
10182 +
10183 + val = mt76_get_field(dev, MT_MIB_SDR14(ext_phy),
10184 + MT_MIB_AMPDU_MPDU_COUNT);
10185 +@@ -1833,24 +1831,16 @@ mt7615_mac_update_mib_stats(struct mt7615_phy *phy)
10186 + aggr = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
10187 + for (i = 0; i < 4; i++) {
10188 + val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
10189 +-
10190 +- val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
10191 +- if (val2 > mib->ack_fail_cnt)
10192 +- mib->ack_fail_cnt = val2;
10193 +-
10194 +- val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
10195 +- if (val2 > mib->ba_miss_cnt)
10196 +- mib->ba_miss_cnt = val2;
10197 ++ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
10198 ++ mib->ack_fail_cnt += FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK,
10199 ++ val);
10200 +
10201 + val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
10202 +- val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
10203 +- if (val2 > mib->rts_retries_cnt) {
10204 +- mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
10205 +- mib->rts_retries_cnt = val2;
10206 +- }
10207 ++ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
10208 ++ mib->rts_retries_cnt += FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK,
10209 ++ val);
10210 +
10211 + val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
10212 +-
10213 + dev->mt76.aggr_stats[aggr++] += val & 0xffff;
10214 + dev->mt76.aggr_stats[aggr++] += val >> 16;
10215 + }
10216 +@@ -2106,8 +2096,12 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
10217 + spin_lock_bh(&dev->token_lock);
10218 + idr_for_each_entry(&dev->token, txwi, id) {
10219 + mt7615_txp_skb_unmap(&dev->mt76, txwi);
10220 +- if (txwi->skb)
10221 +- dev_kfree_skb_any(txwi->skb);
10222 ++ if (txwi->skb) {
10223 ++ struct ieee80211_hw *hw;
10224 ++
10225 ++ hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
10226 ++ ieee80211_free_txskb(hw, txwi->skb);
10227 ++ }
10228 + mt76_put_txwi(&dev->mt76, txwi);
10229 + }
10230 + spin_unlock_bh(&dev->token_lock);
10231 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
10232 +index 3186b7b2ca483..88cdc2badeae7 100644
10233 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
10234 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
10235 +@@ -851,11 +851,17 @@ mt7615_get_stats(struct ieee80211_hw *hw,
10236 + struct mt7615_phy *phy = mt7615_hw_phy(hw);
10237 + struct mib_stats *mib = &phy->mib;
10238 +
10239 ++ mt7615_mutex_acquire(phy->dev);
10240 ++
10241 + stats->dot11RTSSuccessCount = mib->rts_cnt;
10242 + stats->dot11RTSFailureCount = mib->rts_retries_cnt;
10243 + stats->dot11FCSErrorCount = mib->fcs_err_cnt;
10244 + stats->dot11ACKFailureCount = mib->ack_fail_cnt;
10245 +
10246 ++ memset(mib, 0, sizeof(*mib));
10247 ++
10248 ++ mt7615_mutex_release(phy->dev);
10249 ++
10250 + return 0;
10251 + }
10252 +
10253 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
10254 +index 5b06294d654aa..4cee76691786e 100644
10255 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
10256 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
10257 +@@ -161,11 +161,11 @@ struct mt7615_vif {
10258 + };
10259 +
10260 + struct mib_stats {
10261 +- u16 ack_fail_cnt;
10262 +- u16 fcs_err_cnt;
10263 +- u16 rts_cnt;
10264 +- u16 rts_retries_cnt;
10265 +- u16 ba_miss_cnt;
10266 ++ u32 ack_fail_cnt;
10267 ++ u32 fcs_err_cnt;
10268 ++ u32 rts_cnt;
10269 ++ u32 rts_retries_cnt;
10270 ++ u32 ba_miss_cnt;
10271 + unsigned long aggr_per;
10272 + };
10273 +
10274 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
10275 +index 7b81aef3684ed..726e4781d9d9f 100644
10276 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
10277 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
10278 +@@ -161,10 +161,9 @@ void mt7615_unregister_device(struct mt7615_dev *dev)
10279 + mt76_unregister_device(&dev->mt76);
10280 + if (mcu_running)
10281 + mt7615_mcu_exit(dev);
10282 +- mt7615_dma_cleanup(dev);
10283 +
10284 + mt7615_tx_token_put(dev);
10285 +-
10286 ++ mt7615_dma_cleanup(dev);
10287 + tasklet_disable(&dev->irq_tasklet);
10288 +
10289 + mt76_free_device(&dev->mt76);
10290 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
10291 +index 595519c582558..d7d61a5b66a3c 100644
10292 +--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
10293 ++++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
10294 +@@ -195,11 +195,14 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
10295 + int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
10296 + struct mt76_queue *q = dev->q_tx[qid];
10297 + struct mt76_sdio *sdio = &dev->sdio;
10298 ++ u8 pad;
10299 +
10300 + while (q->first != q->head) {
10301 + struct mt76_queue_entry *e = &q->entry[q->first];
10302 + struct sk_buff *iter;
10303 +
10304 ++ smp_rmb();
10305 ++
10306 + if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
10307 + __skb_put_zero(e->skb, 4);
10308 + err = __mt7663s_xmit_queue(dev, e->skb->data,
10309 +@@ -210,7 +213,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
10310 + goto next;
10311 + }
10312 +
10313 +- if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
10314 ++ pad = roundup(e->skb->len, 4) - e->skb->len;
10315 ++ if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
10316 + break;
10317 +
10318 + if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz,
10319 +@@ -228,6 +232,11 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
10320 + len += iter->len;
10321 + nframes++;
10322 + }
10323 ++
10324 ++ if (unlikely(pad)) {
10325 ++ memset(sdio->xmit_buf[qid] + len, 0, pad);
10326 ++ len += pad;
10327 ++ }
10328 + next:
10329 + q->first = (q->first + 1) % q->ndesc;
10330 + e->done = true;
10331 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
10332 +index 8f2ad32ade180..e4d7eb33a9f44 100644
10333 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
10334 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
10335 +@@ -124,7 +124,7 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
10336 + range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
10337 +
10338 + for (i = 0; i < ARRAY_SIZE(bound); i++)
10339 +- bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i) + 1;
10340 ++ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
10341 +
10342 + seq_printf(file, "\nPhy %d\n", ext_phy);
10343 +
10344 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
10345 +index 6f159d99a5965..1e14d7782841e 100644
10346 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
10347 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
10348 +@@ -856,7 +856,7 @@ void mt7915_txp_skb_unmap(struct mt76_dev *dev,
10349 + int i;
10350 +
10351 + txp = mt7915_txwi_to_txp(dev, t);
10352 +- for (i = 1; i < txp->nbuf; i++)
10353 ++ for (i = 0; i < txp->nbuf; i++)
10354 + dma_unmap_single(dev->dev, le32_to_cpu(txp->buf[i]),
10355 + le16_to_cpu(txp->len[i]), DMA_TO_DEVICE);
10356 + }
10357 +@@ -1277,39 +1277,30 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
10358 + bool ext_phy = phy != &dev->phy;
10359 + int i, aggr0, aggr1;
10360 +
10361 +- memset(mib, 0, sizeof(*mib));
10362 +-
10363 +- mib->fcs_err_cnt = mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
10364 +- MT_MIB_SDR3_FCS_ERR_MASK);
10365 ++ mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
10366 ++ MT_MIB_SDR3_FCS_ERR_MASK);
10367 +
10368 + aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
10369 + for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
10370 +- u32 val, val2;
10371 ++ u32 val;
10372 +
10373 + val = mt76_rr(dev, MT_MIB_MB_SDR1(ext_phy, i));
10374 +-
10375 +- val2 = FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
10376 +- if (val2 > mib->ack_fail_cnt)
10377 +- mib->ack_fail_cnt = val2;
10378 +-
10379 +- val2 = FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
10380 +- if (val2 > mib->ba_miss_cnt)
10381 +- mib->ba_miss_cnt = val2;
10382 ++ mib->ba_miss_cnt += FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
10383 ++ mib->ack_fail_cnt +=
10384 ++ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
10385 +
10386 + val = mt76_rr(dev, MT_MIB_MB_SDR0(ext_phy, i));
10387 +- val2 = FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
10388 +- if (val2 > mib->rts_retries_cnt) {
10389 +- mib->rts_cnt = FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
10390 +- mib->rts_retries_cnt = val2;
10391 +- }
10392 ++ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
10393 ++ mib->rts_retries_cnt +=
10394 ++ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
10395 +
10396 + val = mt76_rr(dev, MT_TX_AGG_CNT(ext_phy, i));
10397 +- val2 = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
10398 +-
10399 + dev->mt76.aggr_stats[aggr0++] += val & 0xffff;
10400 + dev->mt76.aggr_stats[aggr0++] += val >> 16;
10401 +- dev->mt76.aggr_stats[aggr1++] += val2 & 0xffff;
10402 +- dev->mt76.aggr_stats[aggr1++] += val2 >> 16;
10403 ++
10404 ++ val = mt76_rr(dev, MT_TX_AGG_CNT2(ext_phy, i));
10405 ++ dev->mt76.aggr_stats[aggr1++] += val & 0xffff;
10406 ++ dev->mt76.aggr_stats[aggr1++] += val >> 16;
10407 + }
10408 + }
10409 +
10410 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
10411 +index c48158392057e..e78d3efa3fdf4 100644
10412 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
10413 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
10414 +@@ -651,13 +651,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
10415 + struct ieee80211_low_level_stats *stats)
10416 + {
10417 + struct mt7915_phy *phy = mt7915_hw_phy(hw);
10418 ++ struct mt7915_dev *dev = mt7915_hw_dev(hw);
10419 + struct mib_stats *mib = &phy->mib;
10420 +
10421 ++ mutex_lock(&dev->mt76.mutex);
10422 + stats->dot11RTSSuccessCount = mib->rts_cnt;
10423 + stats->dot11RTSFailureCount = mib->rts_retries_cnt;
10424 + stats->dot11FCSErrorCount = mib->fcs_err_cnt;
10425 + stats->dot11ACKFailureCount = mib->ack_fail_cnt;
10426 +
10427 ++ memset(mib, 0, sizeof(*mib));
10428 ++
10429 ++ mutex_unlock(&dev->mt76.mutex);
10430 ++
10431 + return 0;
10432 + }
10433 +
10434 +diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
10435 +index 4b8908fa7eda6..c84110e34ede1 100644
10436 +--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
10437 ++++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
10438 +@@ -99,11 +99,11 @@ struct mt7915_vif {
10439 + };
10440 +
10441 + struct mib_stats {
10442 +- u16 ack_fail_cnt;
10443 +- u16 fcs_err_cnt;
10444 +- u16 rts_cnt;
10445 +- u16 rts_retries_cnt;
10446 +- u16 ba_miss_cnt;
10447 ++ u32 ack_fail_cnt;
10448 ++ u32 fcs_err_cnt;
10449 ++ u32 rts_cnt;
10450 ++ u32 rts_retries_cnt;
10451 ++ u32 ba_miss_cnt;
10452 + };
10453 +
10454 + struct mt7915_phy {
10455 +diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
10456 +index 9a4d95a2a7072..439ea4158260e 100644
10457 +--- a/drivers/net/wireless/mediatek/mt76/sdio.c
10458 ++++ b/drivers/net/wireless/mediatek/mt76/sdio.c
10459 +@@ -215,6 +215,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
10460 +
10461 + q->entry[q->head].skb = tx_info.skb;
10462 + q->entry[q->head].buf_sz = len;
10463 ++
10464 ++ smp_wmb();
10465 ++
10466 + q->head = (q->head + 1) % q->ndesc;
10467 + q->queued++;
10468 +
10469 +diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.c b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
10470 +index c868582c5d225..aa3b64902cf9b 100644
10471 +--- a/drivers/net/wireless/mediatek/mt7601u/eeprom.c
10472 ++++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.c
10473 +@@ -99,7 +99,7 @@ mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom)
10474 + {
10475 + u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1);
10476 +
10477 +- return ~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
10478 ++ return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN);
10479 + }
10480 +
10481 + static void
10482 +diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
10483 +index 351ff909ab1c7..e14b9fc2c67ac 100644
10484 +--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
10485 ++++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
10486 +@@ -947,7 +947,7 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
10487 + for (i = 0; (i < 3) && (nint > 0); i++, nint--)
10488 + reg |= BIT(i);
10489 +
10490 +- ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
10491 ++ ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg);
10492 + if (ret) {
10493 + dev_err(&func->dev,
10494 + "Failed write reg (%08x)...\n",
10495 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
10496 +index 85093b3e53733..ed72a2aeb6c8e 100644
10497 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
10498 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c
10499 +@@ -249,7 +249,7 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
10500 + 0x824, 0x00030FE0,
10501 + 0x828, 0x00000000,
10502 + 0x82C, 0x002081DD,
10503 +- 0x830, 0x2AAA8E24,
10504 ++ 0x830, 0x2AAAEEC8,
10505 + 0x834, 0x0037A706,
10506 + 0x838, 0x06489B44,
10507 + 0x83C, 0x0000095B,
10508 +@@ -324,10 +324,10 @@ u32 RTL8821AE_PHY_REG_ARRAY[] = {
10509 + 0x9D8, 0x00000000,
10510 + 0x9DC, 0x00000000,
10511 + 0x9E0, 0x00005D00,
10512 +- 0x9E4, 0x00000002,
10513 ++ 0x9E4, 0x00000003,
10514 + 0x9E8, 0x00000001,
10515 + 0xA00, 0x00D047C8,
10516 +- 0xA04, 0x01FF000C,
10517 ++ 0xA04, 0x01FF800C,
10518 + 0xA08, 0x8C8A8300,
10519 + 0xA0C, 0x2E68000F,
10520 + 0xA10, 0x9500BB78,
10521 +@@ -1320,7 +1320,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10522 + 0x083, 0x00021800,
10523 + 0x084, 0x00028000,
10524 + 0x085, 0x00048000,
10525 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10526 ++ 0x086, 0x0009483A,
10527 ++ 0xA0000000, 0x00000000,
10528 + 0x086, 0x00094838,
10529 ++ 0xB0000000, 0x00000000,
10530 + 0x087, 0x00044980,
10531 + 0x088, 0x00048000,
10532 + 0x089, 0x0000D480,
10533 +@@ -1409,36 +1413,32 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10534 + 0x03C, 0x000CA000,
10535 + 0x0EF, 0x00000000,
10536 + 0x0EF, 0x00001100,
10537 +- 0xFF0F0104, 0xABCD,
10538 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10539 + 0x034, 0x0004ADF3,
10540 + 0x034, 0x00049DF0,
10541 +- 0xFF0F0204, 0xCDEF,
10542 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10543 + 0x034, 0x0004ADF3,
10544 + 0x034, 0x00049DF0,
10545 +- 0xFF0F0404, 0xCDEF,
10546 +- 0x034, 0x0004ADF3,
10547 +- 0x034, 0x00049DF0,
10548 +- 0xFF0F0200, 0xCDEF,
10549 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
10550 + 0x034, 0x0004ADF5,
10551 + 0x034, 0x00049DF2,
10552 +- 0xFF0F02C0, 0xCDEF,
10553 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
10554 ++ 0x034, 0x0004A0F3,
10555 ++ 0x034, 0x000490B1,
10556 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10557 + 0x034, 0x0004A0F3,
10558 + 0x034, 0x000490B1,
10559 +- 0xCDCDCDCD, 0xCDCD,
10560 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
10561 ++ 0x034, 0x0004ADF5,
10562 ++ 0x034, 0x00049DF2,
10563 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10564 ++ 0x034, 0x0004ADF3,
10565 ++ 0x034, 0x00049DF0,
10566 ++ 0xA0000000, 0x00000000,
10567 + 0x034, 0x0004ADF7,
10568 + 0x034, 0x00049DF3,
10569 +- 0xFF0F0104, 0xDEAD,
10570 +- 0xFF0F0104, 0xABCD,
10571 +- 0x034, 0x00048DED,
10572 +- 0x034, 0x00047DEA,
10573 +- 0x034, 0x00046DE7,
10574 +- 0x034, 0x00045CE9,
10575 +- 0x034, 0x00044CE6,
10576 +- 0x034, 0x000438C6,
10577 +- 0x034, 0x00042886,
10578 +- 0x034, 0x00041486,
10579 +- 0x034, 0x00040447,
10580 +- 0xFF0F0204, 0xCDEF,
10581 ++ 0xB0000000, 0x00000000,
10582 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10583 + 0x034, 0x00048DED,
10584 + 0x034, 0x00047DEA,
10585 + 0x034, 0x00046DE7,
10586 +@@ -1448,7 +1448,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10587 + 0x034, 0x00042886,
10588 + 0x034, 0x00041486,
10589 + 0x034, 0x00040447,
10590 +- 0xFF0F0404, 0xCDEF,
10591 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10592 + 0x034, 0x00048DED,
10593 + 0x034, 0x00047DEA,
10594 + 0x034, 0x00046DE7,
10595 +@@ -1458,7 +1458,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10596 + 0x034, 0x00042886,
10597 + 0x034, 0x00041486,
10598 + 0x034, 0x00040447,
10599 +- 0xFF0F02C0, 0xCDEF,
10600 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
10601 ++ 0x034, 0x000480AE,
10602 ++ 0x034, 0x000470AB,
10603 ++ 0x034, 0x0004608B,
10604 ++ 0x034, 0x00045069,
10605 ++ 0x034, 0x00044048,
10606 ++ 0x034, 0x00043045,
10607 ++ 0x034, 0x00042026,
10608 ++ 0x034, 0x00041023,
10609 ++ 0x034, 0x00040002,
10610 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10611 + 0x034, 0x000480AE,
10612 + 0x034, 0x000470AB,
10613 + 0x034, 0x0004608B,
10614 +@@ -1468,7 +1478,17 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10615 + 0x034, 0x00042026,
10616 + 0x034, 0x00041023,
10617 + 0x034, 0x00040002,
10618 +- 0xCDCDCDCD, 0xCDCD,
10619 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10620 ++ 0x034, 0x00048DED,
10621 ++ 0x034, 0x00047DEA,
10622 ++ 0x034, 0x00046DE7,
10623 ++ 0x034, 0x00045CE9,
10624 ++ 0x034, 0x00044CE6,
10625 ++ 0x034, 0x000438C6,
10626 ++ 0x034, 0x00042886,
10627 ++ 0x034, 0x00041486,
10628 ++ 0x034, 0x00040447,
10629 ++ 0xA0000000, 0x00000000,
10630 + 0x034, 0x00048DEF,
10631 + 0x034, 0x00047DEC,
10632 + 0x034, 0x00046DE9,
10633 +@@ -1478,38 +1498,36 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10634 + 0x034, 0x0004248A,
10635 + 0x034, 0x0004108D,
10636 + 0x034, 0x0004008A,
10637 +- 0xFF0F0104, 0xDEAD,
10638 +- 0xFF0F0200, 0xABCD,
10639 ++ 0xB0000000, 0x00000000,
10640 ++ 0x80000210, 0x00000000, 0x40000000, 0x00000000,
10641 + 0x034, 0x0002ADF4,
10642 +- 0xFF0F02C0, 0xCDEF,
10643 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
10644 ++ 0x034, 0x0002A0F3,
10645 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10646 + 0x034, 0x0002A0F3,
10647 +- 0xCDCDCDCD, 0xCDCD,
10648 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
10649 ++ 0x034, 0x0002ADF4,
10650 ++ 0xA0000000, 0x00000000,
10651 + 0x034, 0x0002ADF7,
10652 +- 0xFF0F0200, 0xDEAD,
10653 +- 0xFF0F0104, 0xABCD,
10654 +- 0x034, 0x00029DF4,
10655 +- 0xFF0F0204, 0xCDEF,
10656 ++ 0xB0000000, 0x00000000,
10657 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10658 + 0x034, 0x00029DF4,
10659 +- 0xFF0F0404, 0xCDEF,
10660 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10661 + 0x034, 0x00029DF4,
10662 +- 0xFF0F0200, 0xCDEF,
10663 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
10664 + 0x034, 0x00029DF1,
10665 +- 0xFF0F02C0, 0xCDEF,
10666 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
10667 ++ 0x034, 0x000290F0,
10668 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10669 + 0x034, 0x000290F0,
10670 +- 0xCDCDCDCD, 0xCDCD,
10671 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
10672 ++ 0x034, 0x00029DF1,
10673 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10674 ++ 0x034, 0x00029DF4,
10675 ++ 0xA0000000, 0x00000000,
10676 + 0x034, 0x00029DF2,
10677 +- 0xFF0F0104, 0xDEAD,
10678 +- 0xFF0F0104, 0xABCD,
10679 +- 0x034, 0x00028DF1,
10680 +- 0x034, 0x00027DEE,
10681 +- 0x034, 0x00026DEB,
10682 +- 0x034, 0x00025CEC,
10683 +- 0x034, 0x00024CE9,
10684 +- 0x034, 0x000238CA,
10685 +- 0x034, 0x00022889,
10686 +- 0x034, 0x00021489,
10687 +- 0x034, 0x0002044A,
10688 +- 0xFF0F0204, 0xCDEF,
10689 ++ 0xB0000000, 0x00000000,
10690 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10691 + 0x034, 0x00028DF1,
10692 + 0x034, 0x00027DEE,
10693 + 0x034, 0x00026DEB,
10694 +@@ -1519,7 +1537,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10695 + 0x034, 0x00022889,
10696 + 0x034, 0x00021489,
10697 + 0x034, 0x0002044A,
10698 +- 0xFF0F0404, 0xCDEF,
10699 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10700 + 0x034, 0x00028DF1,
10701 + 0x034, 0x00027DEE,
10702 + 0x034, 0x00026DEB,
10703 +@@ -1529,7 +1547,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10704 + 0x034, 0x00022889,
10705 + 0x034, 0x00021489,
10706 + 0x034, 0x0002044A,
10707 +- 0xFF0F02C0, 0xCDEF,
10708 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
10709 + 0x034, 0x000280AF,
10710 + 0x034, 0x000270AC,
10711 + 0x034, 0x0002608B,
10712 +@@ -1539,7 +1557,27 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10713 + 0x034, 0x00022026,
10714 + 0x034, 0x00021023,
10715 + 0x034, 0x00020002,
10716 +- 0xCDCDCDCD, 0xCDCD,
10717 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10718 ++ 0x034, 0x000280AF,
10719 ++ 0x034, 0x000270AC,
10720 ++ 0x034, 0x0002608B,
10721 ++ 0x034, 0x00025069,
10722 ++ 0x034, 0x00024048,
10723 ++ 0x034, 0x00023045,
10724 ++ 0x034, 0x00022026,
10725 ++ 0x034, 0x00021023,
10726 ++ 0x034, 0x00020002,
10727 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10728 ++ 0x034, 0x00028DF1,
10729 ++ 0x034, 0x00027DEE,
10730 ++ 0x034, 0x00026DEB,
10731 ++ 0x034, 0x00025CEC,
10732 ++ 0x034, 0x00024CE9,
10733 ++ 0x034, 0x000238CA,
10734 ++ 0x034, 0x00022889,
10735 ++ 0x034, 0x00021489,
10736 ++ 0x034, 0x0002044A,
10737 ++ 0xA0000000, 0x00000000,
10738 + 0x034, 0x00028DEE,
10739 + 0x034, 0x00027DEB,
10740 + 0x034, 0x00026CCD,
10741 +@@ -1549,27 +1587,24 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10742 + 0x034, 0x00022849,
10743 + 0x034, 0x00021449,
10744 + 0x034, 0x0002004D,
10745 +- 0xFF0F0104, 0xDEAD,
10746 +- 0xFF0F02C0, 0xABCD,
10747 ++ 0xB0000000, 0x00000000,
10748 ++ 0x8000020c, 0x00000000, 0x40000000, 0x00000000,
10749 ++ 0x034, 0x0000A0D7,
10750 ++ 0x034, 0x000090D3,
10751 ++ 0x034, 0x000080B1,
10752 ++ 0x034, 0x000070AE,
10753 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10754 + 0x034, 0x0000A0D7,
10755 + 0x034, 0x000090D3,
10756 + 0x034, 0x000080B1,
10757 + 0x034, 0x000070AE,
10758 +- 0xCDCDCDCD, 0xCDCD,
10759 ++ 0xA0000000, 0x00000000,
10760 + 0x034, 0x0000ADF7,
10761 + 0x034, 0x00009DF4,
10762 + 0x034, 0x00008DF1,
10763 + 0x034, 0x00007DEE,
10764 +- 0xFF0F02C0, 0xDEAD,
10765 +- 0xFF0F0104, 0xABCD,
10766 +- 0x034, 0x00006DEB,
10767 +- 0x034, 0x00005CEC,
10768 +- 0x034, 0x00004CE9,
10769 +- 0x034, 0x000038CA,
10770 +- 0x034, 0x00002889,
10771 +- 0x034, 0x00001489,
10772 +- 0x034, 0x0000044A,
10773 +- 0xFF0F0204, 0xCDEF,
10774 ++ 0xB0000000, 0x00000000,
10775 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10776 + 0x034, 0x00006DEB,
10777 + 0x034, 0x00005CEC,
10778 + 0x034, 0x00004CE9,
10779 +@@ -1577,7 +1612,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10780 + 0x034, 0x00002889,
10781 + 0x034, 0x00001489,
10782 + 0x034, 0x0000044A,
10783 +- 0xFF0F0404, 0xCDEF,
10784 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10785 + 0x034, 0x00006DEB,
10786 + 0x034, 0x00005CEC,
10787 + 0x034, 0x00004CE9,
10788 +@@ -1585,7 +1620,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10789 + 0x034, 0x00002889,
10790 + 0x034, 0x00001489,
10791 + 0x034, 0x0000044A,
10792 +- 0xFF0F02C0, 0xCDEF,
10793 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
10794 + 0x034, 0x0000608D,
10795 + 0x034, 0x0000506B,
10796 + 0x034, 0x0000404A,
10797 +@@ -1593,7 +1628,23 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10798 + 0x034, 0x00002044,
10799 + 0x034, 0x00001025,
10800 + 0x034, 0x00000004,
10801 +- 0xCDCDCDCD, 0xCDCD,
10802 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10803 ++ 0x034, 0x0000608D,
10804 ++ 0x034, 0x0000506B,
10805 ++ 0x034, 0x0000404A,
10806 ++ 0x034, 0x00003047,
10807 ++ 0x034, 0x00002044,
10808 ++ 0x034, 0x00001025,
10809 ++ 0x034, 0x00000004,
10810 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10811 ++ 0x034, 0x00006DEB,
10812 ++ 0x034, 0x00005CEC,
10813 ++ 0x034, 0x00004CE9,
10814 ++ 0x034, 0x000038CA,
10815 ++ 0x034, 0x00002889,
10816 ++ 0x034, 0x00001489,
10817 ++ 0x034, 0x0000044A,
10818 ++ 0xA0000000, 0x00000000,
10819 + 0x034, 0x00006DCD,
10820 + 0x034, 0x00005CCD,
10821 + 0x034, 0x00004CCA,
10822 +@@ -1601,11 +1652,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10823 + 0x034, 0x00002888,
10824 + 0x034, 0x00001488,
10825 + 0x034, 0x00000486,
10826 +- 0xFF0F0104, 0xDEAD,
10827 ++ 0xB0000000, 0x00000000,
10828 + 0x0EF, 0x00000000,
10829 + 0x018, 0x0001712A,
10830 + 0x0EF, 0x00000040,
10831 +- 0xFF0F0104, 0xABCD,
10832 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10833 + 0x035, 0x00000187,
10834 + 0x035, 0x00008187,
10835 + 0x035, 0x00010187,
10836 +@@ -1615,7 +1666,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10837 + 0x035, 0x00040188,
10838 + 0x035, 0x00048188,
10839 + 0x035, 0x00050188,
10840 +- 0xFF0F0204, 0xCDEF,
10841 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10842 + 0x035, 0x00000187,
10843 + 0x035, 0x00008187,
10844 + 0x035, 0x00010187,
10845 +@@ -1625,7 +1676,37 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10846 + 0x035, 0x00040188,
10847 + 0x035, 0x00048188,
10848 + 0x035, 0x00050188,
10849 +- 0xFF0F0404, 0xCDEF,
10850 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
10851 ++ 0x035, 0x00000128,
10852 ++ 0x035, 0x00008128,
10853 ++ 0x035, 0x00010128,
10854 ++ 0x035, 0x000201C8,
10855 ++ 0x035, 0x000281C8,
10856 ++ 0x035, 0x000301C8,
10857 ++ 0x035, 0x000401C8,
10858 ++ 0x035, 0x000481C8,
10859 ++ 0x035, 0x000501C8,
10860 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10861 ++ 0x035, 0x00000145,
10862 ++ 0x035, 0x00008145,
10863 ++ 0x035, 0x00010145,
10864 ++ 0x035, 0x00020196,
10865 ++ 0x035, 0x00028196,
10866 ++ 0x035, 0x00030196,
10867 ++ 0x035, 0x000401C7,
10868 ++ 0x035, 0x000481C7,
10869 ++ 0x035, 0x000501C7,
10870 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
10871 ++ 0x035, 0x00000128,
10872 ++ 0x035, 0x00008128,
10873 ++ 0x035, 0x00010128,
10874 ++ 0x035, 0x000201C8,
10875 ++ 0x035, 0x000281C8,
10876 ++ 0x035, 0x000301C8,
10877 ++ 0x035, 0x000401C8,
10878 ++ 0x035, 0x000481C8,
10879 ++ 0x035, 0x000501C8,
10880 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10881 + 0x035, 0x00000187,
10882 + 0x035, 0x00008187,
10883 + 0x035, 0x00010187,
10884 +@@ -1635,7 +1716,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10885 + 0x035, 0x00040188,
10886 + 0x035, 0x00048188,
10887 + 0x035, 0x00050188,
10888 +- 0xCDCDCDCD, 0xCDCD,
10889 ++ 0xA0000000, 0x00000000,
10890 + 0x035, 0x00000145,
10891 + 0x035, 0x00008145,
10892 + 0x035, 0x00010145,
10893 +@@ -1645,11 +1726,11 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10894 + 0x035, 0x000401C7,
10895 + 0x035, 0x000481C7,
10896 + 0x035, 0x000501C7,
10897 +- 0xFF0F0104, 0xDEAD,
10898 ++ 0xB0000000, 0x00000000,
10899 + 0x0EF, 0x00000000,
10900 + 0x018, 0x0001712A,
10901 + 0x0EF, 0x00000010,
10902 +- 0xFF0F0104, 0xABCD,
10903 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10904 + 0x036, 0x00085733,
10905 + 0x036, 0x0008D733,
10906 + 0x036, 0x00095733,
10907 +@@ -1662,7 +1743,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10908 + 0x036, 0x000CE4B4,
10909 + 0x036, 0x000D64B4,
10910 + 0x036, 0x000DE4B4,
10911 +- 0xFF0F0204, 0xCDEF,
10912 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10913 + 0x036, 0x00085733,
10914 + 0x036, 0x0008D733,
10915 + 0x036, 0x00095733,
10916 +@@ -1675,7 +1756,46 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10917 + 0x036, 0x000CE4B4,
10918 + 0x036, 0x000D64B4,
10919 + 0x036, 0x000DE4B4,
10920 +- 0xFF0F0404, 0xCDEF,
10921 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
10922 ++ 0x036, 0x000063B5,
10923 ++ 0x036, 0x0000E3B5,
10924 ++ 0x036, 0x000163B5,
10925 ++ 0x036, 0x0001E3B5,
10926 ++ 0x036, 0x000263B5,
10927 ++ 0x036, 0x0002E3B5,
10928 ++ 0x036, 0x000363B5,
10929 ++ 0x036, 0x0003E3B5,
10930 ++ 0x036, 0x000463B5,
10931 ++ 0x036, 0x0004E3B5,
10932 ++ 0x036, 0x000563B5,
10933 ++ 0x036, 0x0005E3B5,
10934 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10935 ++ 0x036, 0x000056B3,
10936 ++ 0x036, 0x0000D6B3,
10937 ++ 0x036, 0x000156B3,
10938 ++ 0x036, 0x0001D6B3,
10939 ++ 0x036, 0x00026634,
10940 ++ 0x036, 0x0002E634,
10941 ++ 0x036, 0x00036634,
10942 ++ 0x036, 0x0003E634,
10943 ++ 0x036, 0x000467B4,
10944 ++ 0x036, 0x0004E7B4,
10945 ++ 0x036, 0x000567B4,
10946 ++ 0x036, 0x0005E7B4,
10947 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
10948 ++ 0x036, 0x000063B5,
10949 ++ 0x036, 0x0000E3B5,
10950 ++ 0x036, 0x000163B5,
10951 ++ 0x036, 0x0001E3B5,
10952 ++ 0x036, 0x000263B5,
10953 ++ 0x036, 0x0002E3B5,
10954 ++ 0x036, 0x000363B5,
10955 ++ 0x036, 0x0003E3B5,
10956 ++ 0x036, 0x000463B5,
10957 ++ 0x036, 0x0004E3B5,
10958 ++ 0x036, 0x000563B5,
10959 ++ 0x036, 0x0005E3B5,
10960 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
10961 + 0x036, 0x00085733,
10962 + 0x036, 0x0008D733,
10963 + 0x036, 0x00095733,
10964 +@@ -1688,7 +1808,7 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10965 + 0x036, 0x000CE4B4,
10966 + 0x036, 0x000D64B4,
10967 + 0x036, 0x000DE4B4,
10968 +- 0xCDCDCDCD, 0xCDCD,
10969 ++ 0xA0000000, 0x00000000,
10970 + 0x036, 0x000056B3,
10971 + 0x036, 0x0000D6B3,
10972 + 0x036, 0x000156B3,
10973 +@@ -1701,103 +1821,162 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
10974 + 0x036, 0x0004E7B4,
10975 + 0x036, 0x000567B4,
10976 + 0x036, 0x0005E7B4,
10977 +- 0xFF0F0104, 0xDEAD,
10978 ++ 0xB0000000, 0x00000000,
10979 + 0x0EF, 0x00000000,
10980 + 0x0EF, 0x00000008,
10981 +- 0xFF0F0104, 0xABCD,
10982 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
10983 + 0x03C, 0x000001C8,
10984 + 0x03C, 0x00000492,
10985 +- 0xFF0F0204, 0xCDEF,
10986 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
10987 + 0x03C, 0x000001C8,
10988 + 0x03C, 0x00000492,
10989 +- 0xFF0F0404, 0xCDEF,
10990 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
10991 ++ 0x03C, 0x000001B6,
10992 ++ 0x03C, 0x00000492,
10993 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
10994 ++ 0x03C, 0x0000022A,
10995 ++ 0x03C, 0x00000594,
10996 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
10997 ++ 0x03C, 0x000001B6,
10998 ++ 0x03C, 0x00000492,
10999 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11000 + 0x03C, 0x000001C8,
11001 + 0x03C, 0x00000492,
11002 +- 0xCDCDCDCD, 0xCDCD,
11003 ++ 0xA0000000, 0x00000000,
11004 + 0x03C, 0x0000022A,
11005 + 0x03C, 0x00000594,
11006 +- 0xFF0F0104, 0xDEAD,
11007 +- 0xFF0F0104, 0xABCD,
11008 ++ 0xB0000000, 0x00000000,
11009 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11010 + 0x03C, 0x00000800,
11011 +- 0xFF0F0204, 0xCDEF,
11012 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11013 + 0x03C, 0x00000800,
11014 +- 0xFF0F0404, 0xCDEF,
11015 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
11016 + 0x03C, 0x00000800,
11017 +- 0xFF0F02C0, 0xCDEF,
11018 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
11019 + 0x03C, 0x00000820,
11020 +- 0xCDCDCDCD, 0xCDCD,
11021 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11022 ++ 0x03C, 0x00000820,
11023 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11024 ++ 0x03C, 0x00000800,
11025 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11026 ++ 0x03C, 0x00000800,
11027 ++ 0xA0000000, 0x00000000,
11028 + 0x03C, 0x00000900,
11029 +- 0xFF0F0104, 0xDEAD,
11030 ++ 0xB0000000, 0x00000000,
11031 + 0x0EF, 0x00000000,
11032 + 0x018, 0x0001712A,
11033 + 0x0EF, 0x00000002,
11034 +- 0xFF0F0104, 0xABCD,
11035 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11036 + 0x008, 0x0004E400,
11037 +- 0xFF0F0204, 0xCDEF,
11038 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11039 + 0x008, 0x0004E400,
11040 +- 0xFF0F0404, 0xCDEF,
11041 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
11042 ++ 0x008, 0x00002000,
11043 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
11044 ++ 0x008, 0x00002000,
11045 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11046 ++ 0x008, 0x00002000,
11047 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11048 ++ 0x008, 0x00002000,
11049 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11050 + 0x008, 0x0004E400,
11051 +- 0xCDCDCDCD, 0xCDCD,
11052 ++ 0xA0000000, 0x00000000,
11053 + 0x008, 0x00002000,
11054 +- 0xFF0F0104, 0xDEAD,
11055 ++ 0xB0000000, 0x00000000,
11056 + 0x0EF, 0x00000000,
11057 + 0x0DF, 0x000000C0,
11058 +- 0x01F, 0x00040064,
11059 +- 0xFF0F0104, 0xABCD,
11060 ++ 0x01F, 0x00000064,
11061 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11062 + 0x058, 0x000A7284,
11063 + 0x059, 0x000600EC,
11064 +- 0xFF0F0204, 0xCDEF,
11065 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11066 + 0x058, 0x000A7284,
11067 + 0x059, 0x000600EC,
11068 +- 0xFF0F0404, 0xCDEF,
11069 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
11070 ++ 0x058, 0x00081184,
11071 ++ 0x059, 0x0006016C,
11072 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11073 ++ 0x058, 0x00081184,
11074 ++ 0x059, 0x0006016C,
11075 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11076 ++ 0x058, 0x00081184,
11077 ++ 0x059, 0x0006016C,
11078 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11079 + 0x058, 0x000A7284,
11080 + 0x059, 0x000600EC,
11081 +- 0xCDCDCDCD, 0xCDCD,
11082 ++ 0xA0000000, 0x00000000,
11083 + 0x058, 0x00081184,
11084 + 0x059, 0x0006016C,
11085 +- 0xFF0F0104, 0xDEAD,
11086 +- 0xFF0F0104, 0xABCD,
11087 ++ 0xB0000000, 0x00000000,
11088 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11089 + 0x061, 0x000E8D73,
11090 + 0x062, 0x00093FC5,
11091 +- 0xFF0F0204, 0xCDEF,
11092 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11093 + 0x061, 0x000E8D73,
11094 + 0x062, 0x00093FC5,
11095 +- 0xFF0F0404, 0xCDEF,
11096 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
11097 ++ 0x061, 0x000EFD83,
11098 ++ 0x062, 0x00093FCC,
11099 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11100 ++ 0x061, 0x000EAD53,
11101 ++ 0x062, 0x00093BC4,
11102 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11103 ++ 0x061, 0x000EFD83,
11104 ++ 0x062, 0x00093FCC,
11105 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11106 + 0x061, 0x000E8D73,
11107 + 0x062, 0x00093FC5,
11108 +- 0xCDCDCDCD, 0xCDCD,
11109 ++ 0xA0000000, 0x00000000,
11110 + 0x061, 0x000EAD53,
11111 + 0x062, 0x00093BC4,
11112 +- 0xFF0F0104, 0xDEAD,
11113 +- 0xFF0F0104, 0xABCD,
11114 ++ 0xB0000000, 0x00000000,
11115 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11116 + 0x063, 0x000110E9,
11117 +- 0xFF0F0204, 0xCDEF,
11118 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11119 + 0x063, 0x000110E9,
11120 +- 0xFF0F0404, 0xCDEF,
11121 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
11122 ++ 0x063, 0x000110EB,
11123 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
11124 + 0x063, 0x000110E9,
11125 +- 0xFF0F0200, 0xCDEF,
11126 +- 0x063, 0x000710E9,
11127 +- 0xFF0F02C0, 0xCDEF,
11128 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11129 + 0x063, 0x000110E9,
11130 +- 0xCDCDCDCD, 0xCDCD,
11131 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11132 ++ 0x063, 0x000110EB,
11133 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11134 ++ 0x063, 0x000110E9,
11135 ++ 0xA0000000, 0x00000000,
11136 + 0x063, 0x000714E9,
11137 +- 0xFF0F0104, 0xDEAD,
11138 +- 0xFF0F0104, 0xABCD,
11139 ++ 0xB0000000, 0x00000000,
11140 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11141 ++ 0x064, 0x0001C27C,
11142 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11143 ++ 0x064, 0x0001C27C,
11144 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
11145 + 0x064, 0x0001C27C,
11146 +- 0xFF0F0204, 0xCDEF,
11147 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11148 ++ 0x064, 0x0001C67C,
11149 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11150 + 0x064, 0x0001C27C,
11151 +- 0xFF0F0404, 0xCDEF,
11152 ++ 0x90000410, 0x00000000, 0x40000000, 0x00000000,
11153 + 0x064, 0x0001C27C,
11154 +- 0xCDCDCDCD, 0xCDCD,
11155 ++ 0xA0000000, 0x00000000,
11156 + 0x064, 0x0001C67C,
11157 +- 0xFF0F0104, 0xDEAD,
11158 +- 0xFF0F0200, 0xABCD,
11159 ++ 0xB0000000, 0x00000000,
11160 ++ 0x80000111, 0x00000000, 0x40000000, 0x00000000,
11161 ++ 0x065, 0x00091016,
11162 ++ 0x90000110, 0x00000000, 0x40000000, 0x00000000,
11163 ++ 0x065, 0x00091016,
11164 ++ 0x90000210, 0x00000000, 0x40000000, 0x00000000,
11165 + 0x065, 0x00093016,
11166 +- 0xFF0F02C0, 0xCDEF,
11167 ++ 0x9000020c, 0x00000000, 0x40000000, 0x00000000,
11168 + 0x065, 0x00093015,
11169 +- 0xCDCDCDCD, 0xCDCD,
11170 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11171 ++ 0x065, 0x00093015,
11172 ++ 0x90000200, 0x00000000, 0x40000000, 0x00000000,
11173 ++ 0x065, 0x00093016,
11174 ++ 0xA0000000, 0x00000000,
11175 + 0x065, 0x00091016,
11176 +- 0xFF0F0200, 0xDEAD,
11177 ++ 0xB0000000, 0x00000000,
11178 + 0x018, 0x00000006,
11179 + 0x0EF, 0x00002000,
11180 + 0x03B, 0x0003824B,
11181 +@@ -1895,9 +2074,10 @@ u32 RTL8821AE_RADIOA_ARRAY[] = {
11182 + 0x0B4, 0x0001214C,
11183 + 0x0B7, 0x0003000C,
11184 + 0x01C, 0x000539D2,
11185 ++ 0x0C4, 0x000AFE00,
11186 + 0x018, 0x0001F12A,
11187 +- 0x0FE, 0x00000000,
11188 +- 0x0FE, 0x00000000,
11189 ++ 0xFFE, 0x00000000,
11190 ++ 0xFFE, 0x00000000,
11191 + 0x018, 0x0001712A,
11192 +
11193 + };
11194 +@@ -2017,6 +2197,7 @@ u32 RTL8812AE_MAC_REG_ARRAY[] = {
11195 + u32 RTL8812AE_MAC_1T_ARRAYLEN = ARRAY_SIZE(RTL8812AE_MAC_REG_ARRAY);
11196 +
11197 + u32 RTL8821AE_MAC_REG_ARRAY[] = {
11198 ++ 0x421, 0x0000000F,
11199 + 0x428, 0x0000000A,
11200 + 0x429, 0x00000010,
11201 + 0x430, 0x00000000,
11202 +@@ -2485,7 +2666,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
11203 + 0x81C, 0xA6360001,
11204 + 0x81C, 0xA5380001,
11205 + 0x81C, 0xA43A0001,
11206 +- 0x81C, 0xA33C0001,
11207 ++ 0x81C, 0x683C0001,
11208 + 0x81C, 0x673E0001,
11209 + 0x81C, 0x66400001,
11210 + 0x81C, 0x65420001,
11211 +@@ -2519,7 +2700,66 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
11212 + 0x81C, 0x017A0001,
11213 + 0x81C, 0x017C0001,
11214 + 0x81C, 0x017E0001,
11215 +- 0xFF0F02C0, 0xABCD,
11216 ++ 0x8000020c, 0x00000000, 0x40000000, 0x00000000,
11217 ++ 0x81C, 0xFB000101,
11218 ++ 0x81C, 0xFA020101,
11219 ++ 0x81C, 0xF9040101,
11220 ++ 0x81C, 0xF8060101,
11221 ++ 0x81C, 0xF7080101,
11222 ++ 0x81C, 0xF60A0101,
11223 ++ 0x81C, 0xF50C0101,
11224 ++ 0x81C, 0xF40E0101,
11225 ++ 0x81C, 0xF3100101,
11226 ++ 0x81C, 0xF2120101,
11227 ++ 0x81C, 0xF1140101,
11228 ++ 0x81C, 0xF0160101,
11229 ++ 0x81C, 0xEF180101,
11230 ++ 0x81C, 0xEE1A0101,
11231 ++ 0x81C, 0xED1C0101,
11232 ++ 0x81C, 0xEC1E0101,
11233 ++ 0x81C, 0xEB200101,
11234 ++ 0x81C, 0xEA220101,
11235 ++ 0x81C, 0xE9240101,
11236 ++ 0x81C, 0xE8260101,
11237 ++ 0x81C, 0xE7280101,
11238 ++ 0x81C, 0xE62A0101,
11239 ++ 0x81C, 0xE52C0101,
11240 ++ 0x81C, 0xE42E0101,
11241 ++ 0x81C, 0xE3300101,
11242 ++ 0x81C, 0xA5320101,
11243 ++ 0x81C, 0xA4340101,
11244 ++ 0x81C, 0xA3360101,
11245 ++ 0x81C, 0x87380101,
11246 ++ 0x81C, 0x863A0101,
11247 ++ 0x81C, 0x853C0101,
11248 ++ 0x81C, 0x843E0101,
11249 ++ 0x81C, 0x69400101,
11250 ++ 0x81C, 0x68420101,
11251 ++ 0x81C, 0x67440101,
11252 ++ 0x81C, 0x66460101,
11253 ++ 0x81C, 0x49480101,
11254 ++ 0x81C, 0x484A0101,
11255 ++ 0x81C, 0x474C0101,
11256 ++ 0x81C, 0x2A4E0101,
11257 ++ 0x81C, 0x29500101,
11258 ++ 0x81C, 0x28520101,
11259 ++ 0x81C, 0x27540101,
11260 ++ 0x81C, 0x26560101,
11261 ++ 0x81C, 0x25580101,
11262 ++ 0x81C, 0x245A0101,
11263 ++ 0x81C, 0x235C0101,
11264 ++ 0x81C, 0x055E0101,
11265 ++ 0x81C, 0x04600101,
11266 ++ 0x81C, 0x03620101,
11267 ++ 0x81C, 0x02640101,
11268 ++ 0x81C, 0x01660101,
11269 ++ 0x81C, 0x01680101,
11270 ++ 0x81C, 0x016A0101,
11271 ++ 0x81C, 0x016C0101,
11272 ++ 0x81C, 0x016E0101,
11273 ++ 0x81C, 0x01700101,
11274 ++ 0x81C, 0x01720101,
11275 ++ 0x9000040c, 0x00000000, 0x40000000, 0x00000000,
11276 + 0x81C, 0xFB000101,
11277 + 0x81C, 0xFA020101,
11278 + 0x81C, 0xF9040101,
11279 +@@ -2578,7 +2818,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
11280 + 0x81C, 0x016E0101,
11281 + 0x81C, 0x01700101,
11282 + 0x81C, 0x01720101,
11283 +- 0xCDCDCDCD, 0xCDCD,
11284 ++ 0xA0000000, 0x00000000,
11285 + 0x81C, 0xFF000101,
11286 + 0x81C, 0xFF020101,
11287 + 0x81C, 0xFE040101,
11288 +@@ -2637,7 +2877,7 @@ u32 RTL8821AE_AGC_TAB_ARRAY[] = {
11289 + 0x81C, 0x046E0101,
11290 + 0x81C, 0x03700101,
11291 + 0x81C, 0x02720101,
11292 +- 0xFF0F02C0, 0xDEAD,
11293 ++ 0xB0000000, 0x00000000,
11294 + 0x81C, 0x01740101,
11295 + 0x81C, 0x01760101,
11296 + 0x81C, 0x01780101,
11297 +diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
11298 +index efbba9caef3bf..8bb6cc8ca74e5 100644
11299 +--- a/drivers/net/wireless/realtek/rtw88/debug.c
11300 ++++ b/drivers/net/wireless/realtek/rtw88/debug.c
11301 +@@ -270,7 +270,7 @@ static ssize_t rtw_debugfs_set_rsvd_page(struct file *filp,
11302 +
11303 + if (num != 2) {
11304 + rtw_warn(rtwdev, "invalid arguments\n");
11305 +- return num;
11306 ++ return -EINVAL;
11307 + }
11308 +
11309 + debugfs_priv->rsvd_page.page_offset = offset;
11310 +diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
11311 +index 5cd9cc42648eb..36e2f0dba00c0 100644
11312 +--- a/drivers/net/wireless/realtek/rtw88/phy.c
11313 ++++ b/drivers/net/wireless/realtek/rtw88/phy.c
11314 +@@ -1518,7 +1518,7 @@ void rtw_phy_load_tables(struct rtw_dev *rtwdev)
11315 + }
11316 + EXPORT_SYMBOL(rtw_phy_load_tables);
11317 +
11318 +-static u8 rtw_get_channel_group(u8 channel)
11319 ++static u8 rtw_get_channel_group(u8 channel, u8 rate)
11320 + {
11321 + switch (channel) {
11322 + default:
11323 +@@ -1562,6 +1562,7 @@ static u8 rtw_get_channel_group(u8 channel)
11324 + case 106:
11325 + return 4;
11326 + case 14:
11327 ++ return rate <= DESC_RATE11M ? 5 : 4;
11328 + case 108:
11329 + case 110:
11330 + case 112:
11331 +@@ -1813,7 +1814,7 @@ void rtw_get_tx_power_params(struct rtw_dev *rtwdev, u8 path, u8 rate, u8 bw,
11332 + s8 *remnant = &pwr_param->pwr_remnant;
11333 +
11334 + pwr_idx = &rtwdev->efuse.txpwr_idx_table[path];
11335 +- group = rtw_get_channel_group(ch);
11336 ++ group = rtw_get_channel_group(ch, rate);
11337 +
11338 + /* base power index for 2.4G/5G */
11339 + if (IS_CH_2G_BAND(ch)) {
11340 +diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
11341 +index e14d88e558f04..85abd0a2d1c90 100644
11342 +--- a/drivers/net/wireless/ti/wlcore/boot.c
11343 ++++ b/drivers/net/wireless/ti/wlcore/boot.c
11344 +@@ -72,6 +72,7 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
11345 + unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
11346 + wl->min_mr_fw_ver : wl->min_sr_fw_ver;
11347 + char min_fw_str[32] = "";
11348 ++ int off = 0;
11349 + int i;
11350 +
11351 + /* the chip must be exactly equal */
11352 +@@ -105,13 +106,15 @@ static int wlcore_validate_fw_ver(struct wl1271 *wl)
11353 + return 0;
11354 +
11355 + fail:
11356 +- for (i = 0; i < NUM_FW_VER; i++)
11357 ++ for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
11358 + if (min_ver[i] == WLCORE_FW_VER_IGNORE)
11359 +- snprintf(min_fw_str, sizeof(min_fw_str),
11360 +- "%s*.", min_fw_str);
11361 ++ off += snprintf(min_fw_str + off,
11362 ++ sizeof(min_fw_str) - off,
11363 ++ "*.");
11364 + else
11365 +- snprintf(min_fw_str, sizeof(min_fw_str),
11366 +- "%s%u.", min_fw_str, min_ver[i]);
11367 ++ off += snprintf(min_fw_str + off,
11368 ++ sizeof(min_fw_str) - off,
11369 ++ "%u.", min_ver[i]);
11370 +
11371 + wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
11372 + "Please use at least FW %s\n"
11373 +diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
11374 +index b143293e694f9..a9e13e6d65c50 100644
11375 +--- a/drivers/net/wireless/ti/wlcore/debugfs.h
11376 ++++ b/drivers/net/wireless/ti/wlcore/debugfs.h
11377 +@@ -78,13 +78,14 @@ static ssize_t sub## _ ##name## _read(struct file *file, \
11378 + struct wl1271 *wl = file->private_data; \
11379 + struct struct_type *stats = wl->stats.fw_stats; \
11380 + char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
11381 ++ int pos = 0; \
11382 + int i; \
11383 + \
11384 + wl1271_debugfs_update_stats(wl); \
11385 + \
11386 +- for (i = 0; i < len; i++) \
11387 +- snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
11388 +- buf, i, stats->sub.name[i]); \
11389 ++ for (i = 0; i < len && pos < sizeof(buf); i++) \
11390 ++ pos += snprintf(buf + pos, sizeof(buf) - pos, \
11391 ++ "[%d] = %d\n", i, stats->sub.name[i]); \
11392 + \
11393 + return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
11394 + } \
11395 +diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
11396 +index f7464bd6d57cb..18e3435ab8f33 100644
11397 +--- a/drivers/nfc/pn533/pn533.c
11398 ++++ b/drivers/nfc/pn533/pn533.c
11399 +@@ -706,6 +706,9 @@ static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a,
11400 + if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0)
11401 + return false;
11402 +
11403 ++ if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE)
11404 ++ return false;
11405 ++
11406 + return true;
11407 + }
11408 +
11409 +diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
11410 +index e812a0d0fdb3d..f750cf98ae264 100644
11411 +--- a/drivers/nvme/host/multipath.c
11412 ++++ b/drivers/nvme/host/multipath.c
11413 +@@ -667,6 +667,10 @@ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
11414 + if (desc.state) {
11415 + /* found the group desc: update */
11416 + nvme_update_ns_ana_state(&desc, ns);
11417 ++ } else {
11418 ++ /* group desc not found: trigger a re-read */
11419 ++ set_bit(NVME_NS_ANA_PENDING, &ns->flags);
11420 ++ queue_work(nvme_wq, &ns->ctrl->ana_work);
11421 + }
11422 + } else {
11423 + ns->ana_state = NVME_ANA_OPTIMIZED;
11424 +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
11425 +index 716039ea4450e..c1f3446216c5c 100644
11426 +--- a/drivers/nvme/host/pci.c
11427 ++++ b/drivers/nvme/host/pci.c
11428 +@@ -852,7 +852,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
11429 + return nvme_setup_prp_simple(dev, req,
11430 + &cmnd->rw, &bv);
11431 +
11432 +- if (iod->nvmeq->qid &&
11433 ++ if (iod->nvmeq->qid && sgl_threshold &&
11434 + dev->ctrl.sgls & ((1 << 0) | (1 << 1)))
11435 + return nvme_setup_sgl_simple(dev, req,
11436 + &cmnd->rw, &bv);
11437 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
11438 +index 9444e5e2a95ba..4cf81f3841aee 100644
11439 +--- a/drivers/nvme/host/tcp.c
11440 ++++ b/drivers/nvme/host/tcp.c
11441 +@@ -874,7 +874,7 @@ static void nvme_tcp_state_change(struct sock *sk)
11442 + {
11443 + struct nvme_tcp_queue *queue;
11444 +
11445 +- read_lock(&sk->sk_callback_lock);
11446 ++ read_lock_bh(&sk->sk_callback_lock);
11447 + queue = sk->sk_user_data;
11448 + if (!queue)
11449 + goto done;
11450 +@@ -895,7 +895,7 @@ static void nvme_tcp_state_change(struct sock *sk)
11451 +
11452 + queue->state_change(sk);
11453 + done:
11454 +- read_unlock(&sk->sk_callback_lock);
11455 ++ read_unlock_bh(&sk->sk_callback_lock);
11456 + }
11457 +
11458 + static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
11459 +diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
11460 +index d658c6e8263af..d958b5da9b88a 100644
11461 +--- a/drivers/nvme/target/tcp.c
11462 ++++ b/drivers/nvme/target/tcp.c
11463 +@@ -525,11 +525,36 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
11464 + struct nvmet_tcp_cmd *cmd =
11465 + container_of(req, struct nvmet_tcp_cmd, req);
11466 + struct nvmet_tcp_queue *queue = cmd->queue;
11467 ++ struct nvme_sgl_desc *sgl;
11468 ++ u32 len;
11469 ++
11470 ++ if (unlikely(cmd == queue->cmd)) {
11471 ++ sgl = &cmd->req.cmd->common.dptr.sgl;
11472 ++ len = le32_to_cpu(sgl->length);
11473 ++
11474 ++ /*
11475 ++ * Wait for inline data before processing the response.
11476 ++ * Avoid using helpers, this might happen before
11477 ++ * nvmet_req_init is completed.
11478 ++ */
11479 ++ if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
11480 ++ len && len < cmd->req.port->inline_data_size &&
11481 ++ nvme_is_write(cmd->req.cmd))
11482 ++ return;
11483 ++ }
11484 +
11485 + llist_add(&cmd->lentry, &queue->resp_list);
11486 + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work);
11487 + }
11488 +
11489 ++static void nvmet_tcp_execute_request(struct nvmet_tcp_cmd *cmd)
11490 ++{
11491 ++ if (unlikely(cmd->flags & NVMET_TCP_F_INIT_FAILED))
11492 ++ nvmet_tcp_queue_response(&cmd->req);
11493 ++ else
11494 ++ cmd->req.execute(&cmd->req);
11495 ++}
11496 ++
11497 + static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd)
11498 + {
11499 + u8 hdgst = nvmet_tcp_hdgst_len(cmd->queue);
11500 +@@ -961,7 +986,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
11501 + le32_to_cpu(req->cmd->common.dptr.sgl.length));
11502 +
11503 + nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
11504 +- return -EAGAIN;
11505 ++ return 0;
11506 + }
11507 +
11508 + ret = nvmet_tcp_map_data(queue->cmd);
11509 +@@ -1104,10 +1129,8 @@ static int nvmet_tcp_try_recv_data(struct nvmet_tcp_queue *queue)
11510 + return 0;
11511 + }
11512 +
11513 +- if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
11514 +- cmd->rbytes_done == cmd->req.transfer_len) {
11515 +- cmd->req.execute(&cmd->req);
11516 +- }
11517 ++ if (cmd->rbytes_done == cmd->req.transfer_len)
11518 ++ nvmet_tcp_execute_request(cmd);
11519 +
11520 + nvmet_prepare_receive_pdu(queue);
11521 + return 0;
11522 +@@ -1144,9 +1167,9 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
11523 + goto out;
11524 + }
11525 +
11526 +- if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED) &&
11527 +- cmd->rbytes_done == cmd->req.transfer_len)
11528 +- cmd->req.execute(&cmd->req);
11529 ++ if (cmd->rbytes_done == cmd->req.transfer_len)
11530 ++ nvmet_tcp_execute_request(cmd);
11531 ++
11532 + ret = 0;
11533 + out:
11534 + nvmet_prepare_receive_pdu(queue);
11535 +@@ -1434,7 +1457,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
11536 + {
11537 + struct nvmet_tcp_queue *queue;
11538 +
11539 +- write_lock_bh(&sk->sk_callback_lock);
11540 ++ read_lock_bh(&sk->sk_callback_lock);
11541 + queue = sk->sk_user_data;
11542 + if (!queue)
11543 + goto done;
11544 +@@ -1452,7 +1475,7 @@ static void nvmet_tcp_state_change(struct sock *sk)
11545 + queue->idx, sk->sk_state);
11546 + }
11547 + done:
11548 +- write_unlock_bh(&sk->sk_callback_lock);
11549 ++ read_unlock_bh(&sk->sk_callback_lock);
11550 + }
11551 +
11552 + static int nvmet_tcp_set_queue_sock(struct nvmet_tcp_queue *queue)
11553 +diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
11554 +index 5e9e60e2e591d..955b8b8c82386 100644
11555 +--- a/drivers/nvmem/qfprom.c
11556 ++++ b/drivers/nvmem/qfprom.c
11557 +@@ -104,6 +104,16 @@ static void qfprom_disable_fuse_blowing(const struct qfprom_priv *priv,
11558 + {
11559 + int ret;
11560 +
11561 ++ /*
11562 ++ * This may be a shared rail and may be able to run at a lower rate
11563 ++ * when we're not blowing fuses. At the moment, the regulator framework
11564 ++ * applies voltage constraints even on disabled rails, so remove our
11565 ++ * constraints and allow the rail to be adjusted by other users.
11566 ++ */
11567 ++ ret = regulator_set_voltage(priv->vcc, 0, INT_MAX);
11568 ++ if (ret)
11569 ++ dev_warn(priv->dev, "Failed to set 0 voltage (ignoring)\n");
11570 ++
11571 + ret = regulator_disable(priv->vcc);
11572 + if (ret)
11573 + dev_warn(priv->dev, "Failed to disable regulator (ignoring)\n");
11574 +@@ -149,6 +159,17 @@ static int qfprom_enable_fuse_blowing(const struct qfprom_priv *priv,
11575 + goto err_clk_prepared;
11576 + }
11577 +
11578 ++ /*
11579 ++ * Hardware requires 1.8V min for fuse blowing; this may be
11580 ++ * a rail shared do don't specify a max--regulator constraints
11581 ++ * will handle.
11582 ++ */
11583 ++ ret = regulator_set_voltage(priv->vcc, 1800000, INT_MAX);
11584 ++ if (ret) {
11585 ++ dev_err(priv->dev, "Failed to set 1.8 voltage\n");
11586 ++ goto err_clk_rate_set;
11587 ++ }
11588 ++
11589 + ret = regulator_enable(priv->vcc);
11590 + if (ret) {
11591 + dev_err(priv->dev, "Failed to enable regulator\n");
11592 +diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
11593 +index 50bbe0edf5380..43a77d7200087 100644
11594 +--- a/drivers/of/overlay.c
11595 ++++ b/drivers/of/overlay.c
11596 +@@ -796,6 +796,7 @@ static int init_overlay_changeset(struct overlay_changeset *ovcs,
11597 + if (!fragment->target) {
11598 + of_node_put(fragment->overlay);
11599 + ret = -EINVAL;
11600 ++ of_node_put(node);
11601 + goto err_free_fragments;
11602 + }
11603 +
11604 +diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
11605 +index a222728238cae..90482d5246ff1 100644
11606 +--- a/drivers/pci/controller/dwc/pci-keystone.c
11607 ++++ b/drivers/pci/controller/dwc/pci-keystone.c
11608 +@@ -811,7 +811,8 @@ static int __init ks_pcie_host_init(struct pcie_port *pp)
11609 + int ret;
11610 +
11611 + pp->bridge->ops = &ks_pcie_ops;
11612 +- pp->bridge->child_ops = &ks_child_pcie_ops;
11613 ++ if (!ks_pcie->is_am6)
11614 ++ pp->bridge->child_ops = &ks_child_pcie_ops;
11615 +
11616 + ret = ks_pcie_config_legacy_irq(ks_pcie);
11617 + if (ret)
11618 +diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
11619 +index 8e0db84f089da..c33b385ac918e 100644
11620 +--- a/drivers/pci/controller/pci-xgene.c
11621 ++++ b/drivers/pci/controller/pci-xgene.c
11622 +@@ -355,7 +355,8 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
11623 + if (IS_ERR(port->csr_base))
11624 + return PTR_ERR(port->csr_base);
11625 +
11626 +- port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
11627 ++ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg");
11628 ++ port->cfg_base = devm_ioremap_resource(dev, res);
11629 + if (IS_ERR(port->cfg_base))
11630 + return PTR_ERR(port->cfg_base);
11631 + port->cfg_addr = res->start;
11632 +diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
11633 +index 7915d10f9aa10..bd549070c0112 100644
11634 +--- a/drivers/pci/vpd.c
11635 ++++ b/drivers/pci/vpd.c
11636 +@@ -570,7 +570,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
11637 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
11638 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
11639 + quirk_blacklist_vpd);
11640 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
11641 + /*
11642 + * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port
11643 + * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class.
11644 +diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
11645 +index 453ef26fa1c7f..aaa0bbe473f76 100644
11646 +--- a/drivers/phy/cadence/phy-cadence-sierra.c
11647 ++++ b/drivers/phy/cadence/phy-cadence-sierra.c
11648 +@@ -319,6 +319,12 @@ static int cdns_sierra_phy_on(struct phy *gphy)
11649 + u32 val;
11650 + int ret;
11651 +
11652 ++ ret = reset_control_deassert(sp->phy_rst);
11653 ++ if (ret) {
11654 ++ dev_err(dev, "Failed to take the PHY out of reset\n");
11655 ++ return ret;
11656 ++ }
11657 ++
11658 + /* Take the PHY lane group out of reset */
11659 + ret = reset_control_deassert(ins->lnk_rst);
11660 + if (ret) {
11661 +@@ -618,7 +624,6 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
11662 +
11663 + pm_runtime_enable(dev);
11664 + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
11665 +- reset_control_deassert(sp->phy_rst);
11666 + return PTR_ERR_OR_ZERO(phy_provider);
11667 +
11668 + put_child:
11669 +diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
11670 +index 8f6273c837ec3..8ab9031c98946 100644
11671 +--- a/drivers/phy/marvell/Kconfig
11672 ++++ b/drivers/phy/marvell/Kconfig
11673 +@@ -3,8 +3,8 @@
11674 + # Phy drivers for Marvell platforms
11675 + #
11676 + config ARMADA375_USBCLUSTER_PHY
11677 +- def_bool y
11678 +- depends on MACH_ARMADA_375 || COMPILE_TEST
11679 ++ bool "Armada 375 USB cluster PHY support" if COMPILE_TEST
11680 ++ default y if MACH_ARMADA_375
11681 + depends on OF && HAS_IOMEM
11682 + select GENERIC_PHY
11683 +
11684 +diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
11685 +index c9cfafe89cbf1..e28e25f98708c 100644
11686 +--- a/drivers/phy/ti/phy-j721e-wiz.c
11687 ++++ b/drivers/phy/ti/phy-j721e-wiz.c
11688 +@@ -615,6 +615,12 @@ static void wiz_clock_cleanup(struct wiz *wiz, struct device_node *node)
11689 + of_clk_del_provider(clk_node);
11690 + of_node_put(clk_node);
11691 + }
11692 ++
11693 ++ for (i = 0; i < wiz->clk_div_sel_num; i++) {
11694 ++ clk_node = of_get_child_by_name(node, clk_div_sel[i].node_name);
11695 ++ of_clk_del_provider(clk_node);
11696 ++ of_node_put(clk_node);
11697 ++ }
11698 + }
11699 +
11700 + static int wiz_clock_init(struct wiz *wiz, struct device_node *node)
11701 +@@ -947,27 +953,24 @@ static int wiz_probe(struct platform_device *pdev)
11702 + goto err_get_sync;
11703 + }
11704 +
11705 ++ ret = wiz_init(wiz);
11706 ++ if (ret) {
11707 ++ dev_err(dev, "WIZ initialization failed\n");
11708 ++ goto err_wiz_init;
11709 ++ }
11710 ++
11711 + serdes_pdev = of_platform_device_create(child_node, NULL, dev);
11712 + if (!serdes_pdev) {
11713 + dev_WARN(dev, "Unable to create SERDES platform device\n");
11714 + ret = -ENOMEM;
11715 +- goto err_pdev_create;
11716 +- }
11717 +- wiz->serdes_pdev = serdes_pdev;
11718 +-
11719 +- ret = wiz_init(wiz);
11720 +- if (ret) {
11721 +- dev_err(dev, "WIZ initialization failed\n");
11722 + goto err_wiz_init;
11723 + }
11724 ++ wiz->serdes_pdev = serdes_pdev;
11725 +
11726 + of_node_put(child_node);
11727 + return 0;
11728 +
11729 + err_wiz_init:
11730 +- of_platform_device_destroy(&serdes_pdev->dev, NULL);
11731 +-
11732 +-err_pdev_create:
11733 + wiz_clock_cleanup(wiz, node);
11734 +
11735 + err_get_sync:
11736 +diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
11737 +index f3cd7e2967126..12cc4eb186377 100644
11738 +--- a/drivers/pinctrl/pinctrl-single.c
11739 ++++ b/drivers/pinctrl/pinctrl-single.c
11740 +@@ -270,20 +270,44 @@ static void __maybe_unused pcs_writel(unsigned val, void __iomem *reg)
11741 + writel(val, reg);
11742 + }
11743 +
11744 ++static unsigned int pcs_pin_reg_offset_get(struct pcs_device *pcs,
11745 ++ unsigned int pin)
11746 ++{
11747 ++ unsigned int mux_bytes = pcs->width / BITS_PER_BYTE;
11748 ++
11749 ++ if (pcs->bits_per_mux) {
11750 ++ unsigned int pin_offset_bytes;
11751 ++
11752 ++ pin_offset_bytes = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
11753 ++ return (pin_offset_bytes / mux_bytes) * mux_bytes;
11754 ++ }
11755 ++
11756 ++ return pin * mux_bytes;
11757 ++}
11758 ++
11759 ++static unsigned int pcs_pin_shift_reg_get(struct pcs_device *pcs,
11760 ++ unsigned int pin)
11761 ++{
11762 ++ return (pin % (pcs->width / pcs->bits_per_pin)) * pcs->bits_per_pin;
11763 ++}
11764 ++
11765 + static void pcs_pin_dbg_show(struct pinctrl_dev *pctldev,
11766 + struct seq_file *s,
11767 + unsigned pin)
11768 + {
11769 + struct pcs_device *pcs;
11770 +- unsigned val, mux_bytes;
11771 ++ unsigned int val;
11772 + unsigned long offset;
11773 + size_t pa;
11774 +
11775 + pcs = pinctrl_dev_get_drvdata(pctldev);
11776 +
11777 +- mux_bytes = pcs->width / BITS_PER_BYTE;
11778 +- offset = pin * mux_bytes;
11779 ++ offset = pcs_pin_reg_offset_get(pcs, pin);
11780 + val = pcs->read(pcs->base + offset);
11781 ++
11782 ++ if (pcs->bits_per_mux)
11783 ++ val &= pcs->fmask << pcs_pin_shift_reg_get(pcs, pin);
11784 ++
11785 + pa = pcs->res->start + offset;
11786 +
11787 + seq_printf(s, "%zx %08x %s ", pa, val, DRIVER_NAME);
11788 +@@ -384,7 +408,6 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
11789 + struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
11790 + struct pcs_gpiofunc_range *frange = NULL;
11791 + struct list_head *pos, *tmp;
11792 +- int mux_bytes = 0;
11793 + unsigned data;
11794 +
11795 + /* If function mask is null, return directly. */
11796 +@@ -392,29 +415,27 @@ static int pcs_request_gpio(struct pinctrl_dev *pctldev,
11797 + return -ENOTSUPP;
11798 +
11799 + list_for_each_safe(pos, tmp, &pcs->gpiofuncs) {
11800 ++ u32 offset;
11801 ++
11802 + frange = list_entry(pos, struct pcs_gpiofunc_range, node);
11803 + if (pin >= frange->offset + frange->npins
11804 + || pin < frange->offset)
11805 + continue;
11806 +- mux_bytes = pcs->width / BITS_PER_BYTE;
11807 +
11808 +- if (pcs->bits_per_mux) {
11809 +- int byte_num, offset, pin_shift;
11810 ++ offset = pcs_pin_reg_offset_get(pcs, pin);
11811 +
11812 +- byte_num = (pcs->bits_per_pin * pin) / BITS_PER_BYTE;
11813 +- offset = (byte_num / mux_bytes) * mux_bytes;
11814 +- pin_shift = pin % (pcs->width / pcs->bits_per_pin) *
11815 +- pcs->bits_per_pin;
11816 ++ if (pcs->bits_per_mux) {
11817 ++ int pin_shift = pcs_pin_shift_reg_get(pcs, pin);
11818 +
11819 + data = pcs->read(pcs->base + offset);
11820 + data &= ~(pcs->fmask << pin_shift);
11821 + data |= frange->gpiofunc << pin_shift;
11822 + pcs->write(data, pcs->base + offset);
11823 + } else {
11824 +- data = pcs->read(pcs->base + pin * mux_bytes);
11825 ++ data = pcs->read(pcs->base + offset);
11826 + data &= ~pcs->fmask;
11827 + data |= frange->gpiofunc;
11828 +- pcs->write(data, pcs->base + pin * mux_bytes);
11829 ++ pcs->write(data, pcs->base + offset);
11830 + }
11831 + break;
11832 + }
11833 +@@ -656,10 +677,8 @@ static const struct pinconf_ops pcs_pinconf_ops = {
11834 + * pcs_add_pin() - add a pin to the static per controller pin array
11835 + * @pcs: pcs driver instance
11836 + * @offset: register offset from base
11837 +- * @pin_pos: unused
11838 + */
11839 +-static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
11840 +- unsigned pin_pos)
11841 ++static int pcs_add_pin(struct pcs_device *pcs, unsigned int offset)
11842 + {
11843 + struct pcs_soc_data *pcs_soc = &pcs->socdata;
11844 + struct pinctrl_pin_desc *pin;
11845 +@@ -728,17 +747,9 @@ static int pcs_allocate_pin_table(struct pcs_device *pcs)
11846 + for (i = 0; i < pcs->desc.npins; i++) {
11847 + unsigned offset;
11848 + int res;
11849 +- int byte_num;
11850 +- int pin_pos = 0;
11851 +
11852 +- if (pcs->bits_per_mux) {
11853 +- byte_num = (pcs->bits_per_pin * i) / BITS_PER_BYTE;
11854 +- offset = (byte_num / mux_bytes) * mux_bytes;
11855 +- pin_pos = i % num_pins_in_register;
11856 +- } else {
11857 +- offset = i * mux_bytes;
11858 +- }
11859 +- res = pcs_add_pin(pcs, offset, pin_pos);
11860 ++ offset = pcs_pin_reg_offset_get(pcs, i);
11861 ++ res = pcs_add_pin(pcs, offset);
11862 + if (res < 0) {
11863 + dev_err(pcs->dev, "error adding pins: %i\n", res);
11864 + return res;
11865 +diff --git a/drivers/platform/x86/pmc_atom.c b/drivers/platform/x86/pmc_atom.c
11866 +index ca684ed760d14..a9d2a4b98e570 100644
11867 +--- a/drivers/platform/x86/pmc_atom.c
11868 ++++ b/drivers/platform/x86/pmc_atom.c
11869 +@@ -393,34 +393,10 @@ static const struct dmi_system_id critclk_systems[] = {
11870 + },
11871 + {
11872 + /* pmc_plt_clk* - are used for ethernet controllers */
11873 +- .ident = "Beckhoff CB3163",
11874 ++ .ident = "Beckhoff Baytrail",
11875 + .matches = {
11876 + DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
11877 +- DMI_MATCH(DMI_BOARD_NAME, "CB3163"),
11878 +- },
11879 +- },
11880 +- {
11881 +- /* pmc_plt_clk* - are used for ethernet controllers */
11882 +- .ident = "Beckhoff CB4063",
11883 +- .matches = {
11884 +- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
11885 +- DMI_MATCH(DMI_BOARD_NAME, "CB4063"),
11886 +- },
11887 +- },
11888 +- {
11889 +- /* pmc_plt_clk* - are used for ethernet controllers */
11890 +- .ident = "Beckhoff CB6263",
11891 +- .matches = {
11892 +- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
11893 +- DMI_MATCH(DMI_BOARD_NAME, "CB6263"),
11894 +- },
11895 +- },
11896 +- {
11897 +- /* pmc_plt_clk* - are used for ethernet controllers */
11898 +- .ident = "Beckhoff CB6363",
11899 +- .matches = {
11900 +- DMI_MATCH(DMI_SYS_VENDOR, "Beckhoff Automation"),
11901 +- DMI_MATCH(DMI_BOARD_NAME, "CB6363"),
11902 ++ DMI_MATCH(DMI_PRODUCT_FAMILY, "CBxx63"),
11903 + },
11904 + },
11905 + {
11906 +diff --git a/drivers/power/supply/bq25980_charger.c b/drivers/power/supply/bq25980_charger.c
11907 +index c936f311eb4f0..b94ecf814e434 100644
11908 +--- a/drivers/power/supply/bq25980_charger.c
11909 ++++ b/drivers/power/supply/bq25980_charger.c
11910 +@@ -606,33 +606,6 @@ static int bq25980_get_state(struct bq25980_device *bq,
11911 + return 0;
11912 + }
11913 +
11914 +-static int bq25980_set_battery_property(struct power_supply *psy,
11915 +- enum power_supply_property psp,
11916 +- const union power_supply_propval *val)
11917 +-{
11918 +- struct bq25980_device *bq = power_supply_get_drvdata(psy);
11919 +- int ret = 0;
11920 +-
11921 +- switch (psp) {
11922 +- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
11923 +- ret = bq25980_set_const_charge_curr(bq, val->intval);
11924 +- if (ret)
11925 +- return ret;
11926 +- break;
11927 +-
11928 +- case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
11929 +- ret = bq25980_set_const_charge_volt(bq, val->intval);
11930 +- if (ret)
11931 +- return ret;
11932 +- break;
11933 +-
11934 +- default:
11935 +- return -EINVAL;
11936 +- }
11937 +-
11938 +- return ret;
11939 +-}
11940 +-
11941 + static int bq25980_get_battery_property(struct power_supply *psy,
11942 + enum power_supply_property psp,
11943 + union power_supply_propval *val)
11944 +@@ -701,6 +674,18 @@ static int bq25980_set_charger_property(struct power_supply *psy,
11945 + return ret;
11946 + break;
11947 +
11948 ++ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
11949 ++ ret = bq25980_set_const_charge_curr(bq, val->intval);
11950 ++ if (ret)
11951 ++ return ret;
11952 ++ break;
11953 ++
11954 ++ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
11955 ++ ret = bq25980_set_const_charge_volt(bq, val->intval);
11956 ++ if (ret)
11957 ++ return ret;
11958 ++ break;
11959 ++
11960 + default:
11961 + return -EINVAL;
11962 + }
11963 +@@ -922,7 +907,6 @@ static struct power_supply_desc bq25980_battery_desc = {
11964 + .name = "bq25980-battery",
11965 + .type = POWER_SUPPLY_TYPE_BATTERY,
11966 + .get_property = bq25980_get_battery_property,
11967 +- .set_property = bq25980_set_battery_property,
11968 + .properties = bq25980_battery_props,
11969 + .num_properties = ARRAY_SIZE(bq25980_battery_props),
11970 + .property_is_writeable = bq25980_property_is_writeable,
11971 +diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c
11972 +index a8b5832a5a1bb..204a2da054f53 100644
11973 +--- a/drivers/regulator/bd9576-regulator.c
11974 ++++ b/drivers/regulator/bd9576-regulator.c
11975 +@@ -206,7 +206,7 @@ static int bd957x_probe(struct platform_device *pdev)
11976 + {
11977 + struct regmap *regmap;
11978 + struct regulator_config config = { 0 };
11979 +- int i, err;
11980 ++ int i;
11981 + bool vout_mode, ddr_sel;
11982 + const struct bd957x_regulator_data *reg_data = &bd9576_regulators[0];
11983 + unsigned int num_reg_data = ARRAY_SIZE(bd9576_regulators);
11984 +@@ -279,8 +279,7 @@ static int bd957x_probe(struct platform_device *pdev)
11985 + break;
11986 + default:
11987 + dev_err(&pdev->dev, "Unsupported chip type\n");
11988 +- err = -EINVAL;
11989 +- goto err;
11990 ++ return -EINVAL;
11991 + }
11992 +
11993 + config.dev = pdev->dev.parent;
11994 +@@ -300,8 +299,7 @@ static int bd957x_probe(struct platform_device *pdev)
11995 + dev_err(&pdev->dev,
11996 + "failed to register %s regulator\n",
11997 + desc->name);
11998 +- err = PTR_ERR(rdev);
11999 +- goto err;
12000 ++ return PTR_ERR(rdev);
12001 + }
12002 + /*
12003 + * Clear the VOUT1 GPIO setting - rest of the regulators do not
12004 +@@ -310,8 +308,7 @@ static int bd957x_probe(struct platform_device *pdev)
12005 + config.ena_gpiod = NULL;
12006 + }
12007 +
12008 +-err:
12009 +- return err;
12010 ++ return 0;
12011 + }
12012 +
12013 + static const struct platform_device_id bd957x_pmic_id[] = {
12014 +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
12015 +index 22eecc89d41bd..6c2a97f80b120 100644
12016 +--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
12017 ++++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
12018 +@@ -1644,7 +1644,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
12019 + idx = i * HISI_SAS_PHY_INT_NR;
12020 + for (j = 0; j < HISI_SAS_PHY_INT_NR; j++, idx++) {
12021 + irq = platform_get_irq(pdev, idx);
12022 +- if (!irq) {
12023 ++ if (irq < 0) {
12024 + dev_err(dev, "irq init: fail map phy interrupt %d\n",
12025 + idx);
12026 + return -ENOENT;
12027 +@@ -1663,7 +1663,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
12028 + idx = hisi_hba->n_phy * HISI_SAS_PHY_INT_NR;
12029 + for (i = 0; i < hisi_hba->queue_count; i++, idx++) {
12030 + irq = platform_get_irq(pdev, idx);
12031 +- if (!irq) {
12032 ++ if (irq < 0) {
12033 + dev_err(dev, "irq init: could not map cq interrupt %d\n",
12034 + idx);
12035 + return -ENOENT;
12036 +@@ -1681,7 +1681,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
12037 + idx = (hisi_hba->n_phy * HISI_SAS_PHY_INT_NR) + hisi_hba->queue_count;
12038 + for (i = 0; i < HISI_SAS_FATAL_INT_NR; i++, idx++) {
12039 + irq = platform_get_irq(pdev, idx);
12040 +- if (!irq) {
12041 ++ if (irq < 0) {
12042 + dev_err(dev, "irq init: could not map fatal interrupt %d\n",
12043 + idx);
12044 + return -ENOENT;
12045 +diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
12046 +index 57c9a71fa33a7..f6d6539c657f0 100644
12047 +--- a/drivers/scsi/ibmvscsi/ibmvfc.c
12048 ++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
12049 +@@ -532,8 +532,17 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
12050 + if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
12051 + vhost->action = action;
12052 + break;
12053 ++ case IBMVFC_HOST_ACTION_REENABLE:
12054 ++ case IBMVFC_HOST_ACTION_RESET:
12055 ++ vhost->action = action;
12056 ++ break;
12057 + case IBMVFC_HOST_ACTION_INIT:
12058 + case IBMVFC_HOST_ACTION_TGT_DEL:
12059 ++ case IBMVFC_HOST_ACTION_LOGO:
12060 ++ case IBMVFC_HOST_ACTION_QUERY_TGTS:
12061 ++ case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
12062 ++ case IBMVFC_HOST_ACTION_NONE:
12063 ++ default:
12064 + switch (vhost->action) {
12065 + case IBMVFC_HOST_ACTION_RESET:
12066 + case IBMVFC_HOST_ACTION_REENABLE:
12067 +@@ -543,15 +552,6 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
12068 + break;
12069 + }
12070 + break;
12071 +- case IBMVFC_HOST_ACTION_LOGO:
12072 +- case IBMVFC_HOST_ACTION_QUERY_TGTS:
12073 +- case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
12074 +- case IBMVFC_HOST_ACTION_NONE:
12075 +- case IBMVFC_HOST_ACTION_RESET:
12076 +- case IBMVFC_HOST_ACTION_REENABLE:
12077 +- default:
12078 +- vhost->action = action;
12079 +- break;
12080 + }
12081 + }
12082 +
12083 +@@ -4658,26 +4658,45 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
12084 + case IBMVFC_HOST_ACTION_INIT_WAIT:
12085 + break;
12086 + case IBMVFC_HOST_ACTION_RESET:
12087 +- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
12088 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
12089 + rc = ibmvfc_reset_crq(vhost);
12090 ++
12091 + spin_lock_irqsave(vhost->host->host_lock, flags);
12092 +- if (rc == H_CLOSED)
12093 ++ if (!rc || rc == H_CLOSED)
12094 + vio_enable_interrupts(to_vio_dev(vhost->dev));
12095 +- if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
12096 +- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
12097 +- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
12098 +- dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
12099 ++ if (vhost->action == IBMVFC_HOST_ACTION_RESET) {
12100 ++ /*
12101 ++ * The only action we could have changed to would have
12102 ++ * been reenable, in which case, we skip the rest of
12103 ++ * this path and wait until we've done the re-enable
12104 ++ * before sending the crq init.
12105 ++ */
12106 ++ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
12107 ++
12108 ++ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
12109 ++ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
12110 ++ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
12111 ++ dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
12112 ++ }
12113 + }
12114 + break;
12115 + case IBMVFC_HOST_ACTION_REENABLE:
12116 +- vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
12117 + spin_unlock_irqrestore(vhost->host->host_lock, flags);
12118 + rc = ibmvfc_reenable_crq_queue(vhost);
12119 ++
12120 + spin_lock_irqsave(vhost->host->host_lock, flags);
12121 +- if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
12122 +- ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
12123 +- dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
12124 ++ if (vhost->action == IBMVFC_HOST_ACTION_REENABLE) {
12125 ++ /*
12126 ++ * The only action we could have changed to would have
12127 ++ * been reset, in which case, we skip the rest of this
12128 ++ * path and wait until we've done the reset before
12129 ++ * sending the crq init.
12130 ++ */
12131 ++ vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
12132 ++ if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
12133 ++ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
12134 ++ dev_err(vhost->dev, "Error after enable (rc=%d)\n", rc);
12135 ++ }
12136 + }
12137 + break;
12138 + case IBMVFC_HOST_ACTION_LOGO:
12139 +diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
12140 +index f0ed6863cc700..60a88a95a8e23 100644
12141 +--- a/drivers/scsi/jazz_esp.c
12142 ++++ b/drivers/scsi/jazz_esp.c
12143 +@@ -143,7 +143,9 @@ static int esp_jazz_probe(struct platform_device *dev)
12144 + if (!esp->command_block)
12145 + goto fail_unmap_regs;
12146 +
12147 +- host->irq = platform_get_irq(dev, 0);
12148 ++ host->irq = err = platform_get_irq(dev, 0);
12149 ++ if (err < 0)
12150 ++ goto fail_unmap_command_block;
12151 + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
12152 + if (err < 0)
12153 + goto fail_unmap_command_block;
12154 +diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
12155 +index c4705269e39fa..355d1c5f2194c 100644
12156 +--- a/drivers/scsi/pm8001/pm8001_hwi.c
12157 ++++ b/drivers/scsi/pm8001/pm8001_hwi.c
12158 +@@ -643,7 +643,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
12159 + */
12160 + static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
12161 + {
12162 +- u8 i = 0;
12163 ++ u32 i = 0;
12164 + u16 deviceid;
12165 + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid);
12166 + /* 8081 controllers need BAR shift to access MPI space
12167 +diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
12168 +index 055f7649676ec..27b354860a16e 100644
12169 +--- a/drivers/scsi/pm8001/pm80xx_hwi.c
12170 ++++ b/drivers/scsi/pm8001/pm80xx_hwi.c
12171 +@@ -1488,9 +1488,9 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
12172 +
12173 + /* wait until Inbound DoorBell Clear Register toggled */
12174 + if (IS_SPCV_12G(pm8001_ha->pdev)) {
12175 +- max_wait_count = 4 * 1000 * 1000;/* 4 sec */
12176 ++ max_wait_count = 30 * 1000 * 1000; /* 30 sec */
12177 + } else {
12178 +- max_wait_count = 2 * 1000 * 1000;/* 2 sec */
12179 ++ max_wait_count = 15 * 1000 * 1000; /* 15 sec */
12180 + }
12181 + do {
12182 + udelay(1);
12183 +diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
12184 +index 9e2e196bc2026..97c6f81b1d2a6 100644
12185 +--- a/drivers/scsi/sni_53c710.c
12186 ++++ b/drivers/scsi/sni_53c710.c
12187 +@@ -58,6 +58,7 @@ static int snirm710_probe(struct platform_device *dev)
12188 + struct NCR_700_Host_Parameters *hostdata;
12189 + struct Scsi_Host *host;
12190 + struct resource *res;
12191 ++ int rc;
12192 +
12193 + res = platform_get_resource(dev, IORESOURCE_MEM, 0);
12194 + if (!res)
12195 +@@ -83,7 +84,9 @@ static int snirm710_probe(struct platform_device *dev)
12196 + goto out_kfree;
12197 + host->this_id = 7;
12198 + host->base = base;
12199 +- host->irq = platform_get_irq(dev, 0);
12200 ++ host->irq = rc = platform_get_irq(dev, 0);
12201 ++ if (rc < 0)
12202 ++ goto out_put_host;
12203 + if(request_irq(host->irq, NCR_700_intr, IRQF_SHARED, "snirm710", host)) {
12204 + printk(KERN_ERR "snirm710: request_irq failed!\n");
12205 + goto out_put_host;
12206 +diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
12207 +index 7de82f2c97579..d3489ac7ab28b 100644
12208 +--- a/drivers/scsi/sun3x_esp.c
12209 ++++ b/drivers/scsi/sun3x_esp.c
12210 +@@ -206,7 +206,9 @@ static int esp_sun3x_probe(struct platform_device *dev)
12211 + if (!esp->command_block)
12212 + goto fail_unmap_regs_dma;
12213 +
12214 +- host->irq = platform_get_irq(dev, 0);
12215 ++ host->irq = err = platform_get_irq(dev, 0);
12216 ++ if (err < 0)
12217 ++ goto fail_unmap_command_block;
12218 + err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED,
12219 + "SUN3X ESP", esp);
12220 + if (err < 0)
12221 +diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
12222 +index 3db0af66c71c0..24927cf485b47 100644
12223 +--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
12224 ++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
12225 +@@ -377,7 +377,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
12226 +
12227 + irq = platform_get_irq(pdev, 0);
12228 + if (irq < 0) {
12229 +- err = -ENODEV;
12230 ++ err = irq;
12231 + goto out;
12232 + }
12233 +
12234 +diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
12235 +index dbe5325a324d5..538d7aab8db5c 100644
12236 +--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
12237 ++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
12238 +@@ -95,8 +95,10 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
12239 + return -EINTR;
12240 + }
12241 + ret = kfifo_to_user(&chan->fifo, buffer, count, &copied);
12242 ++ if (ret)
12243 ++ return ret;
12244 +
12245 +- return ret ? ret : copied;
12246 ++ return copied;
12247 + }
12248 +
12249 + static __poll_t snoop_file_poll(struct file *file,
12250 +diff --git a/drivers/soc/qcom/mdt_loader.c b/drivers/soc/qcom/mdt_loader.c
12251 +index 24cd193dec550..eba7f76f9d61a 100644
12252 +--- a/drivers/soc/qcom/mdt_loader.c
12253 ++++ b/drivers/soc/qcom/mdt_loader.c
12254 +@@ -230,6 +230,14 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
12255 + break;
12256 + }
12257 +
12258 ++ if (phdr->p_filesz > phdr->p_memsz) {
12259 ++ dev_err(dev,
12260 ++ "refusing to load segment %d with p_filesz > p_memsz\n",
12261 ++ i);
12262 ++ ret = -EINVAL;
12263 ++ break;
12264 ++ }
12265 ++
12266 + ptr = mem_region + offset;
12267 +
12268 + if (phdr->p_filesz && phdr->p_offset < fw->size) {
12269 +@@ -253,6 +261,15 @@ static int __qcom_mdt_load(struct device *dev, const struct firmware *fw,
12270 + break;
12271 + }
12272 +
12273 ++ if (seg_fw->size != phdr->p_filesz) {
12274 ++ dev_err(dev,
12275 ++ "failed to load segment %d from truncated file %s\n",
12276 ++ i, fw_name);
12277 ++ release_firmware(seg_fw);
12278 ++ ret = -EINVAL;
12279 ++ break;
12280 ++ }
12281 ++
12282 + release_firmware(seg_fw);
12283 + }
12284 +
12285 +diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
12286 +index f63135c09667f..205cc96823b70 100644
12287 +--- a/drivers/soc/qcom/pdr_interface.c
12288 ++++ b/drivers/soc/qcom/pdr_interface.c
12289 +@@ -153,7 +153,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
12290 + if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
12291 + pr_err("PDR: %s register listener failed: 0x%x\n",
12292 + pds->service_path, resp.resp.error);
12293 +- return ret;
12294 ++ return -EREMOTEIO;
12295 + }
12296 +
12297 + pds->state = resp.curr_state;
12298 +diff --git a/drivers/soc/tegra/regulators-tegra30.c b/drivers/soc/tegra/regulators-tegra30.c
12299 +index 7f21f31de09d6..0e776b20f6252 100644
12300 +--- a/drivers/soc/tegra/regulators-tegra30.c
12301 ++++ b/drivers/soc/tegra/regulators-tegra30.c
12302 +@@ -178,7 +178,7 @@ static int tegra30_voltage_update(struct tegra_regulator_coupler *tegra,
12303 + * survive the voltage drop if it's running on a higher frequency.
12304 + */
12305 + if (!cpu_min_uV_consumers)
12306 +- cpu_min_uV = cpu_uV;
12307 ++ cpu_min_uV = max(cpu_uV, cpu_min_uV);
12308 +
12309 + /*
12310 + * Bootloader shall set up voltages correctly, but if it
12311 +diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
12312 +index 1fe786855095a..3317a02bcc170 100644
12313 +--- a/drivers/soundwire/bus.c
12314 ++++ b/drivers/soundwire/bus.c
12315 +@@ -703,7 +703,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
12316 + struct sdw_slave *slave, *_s;
12317 + struct sdw_slave_id id;
12318 + struct sdw_msg msg;
12319 +- bool found = false;
12320 ++ bool found;
12321 + int count = 0, ret;
12322 + u64 addr;
12323 +
12324 +@@ -735,6 +735,7 @@ static int sdw_program_device_num(struct sdw_bus *bus)
12325 +
12326 + sdw_extract_slave_id(bus, addr, &id);
12327 +
12328 ++ found = false;
12329 + /* Now compare with entries */
12330 + list_for_each_entry_safe(slave, _s, &bus->slaves, node) {
12331 + if (sdw_compare_devid(slave, id) == 0) {
12332 +diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
12333 +index 1099b5d1262be..a418c3c7001c0 100644
12334 +--- a/drivers/soundwire/stream.c
12335 ++++ b/drivers/soundwire/stream.c
12336 +@@ -1375,8 +1375,16 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
12337 + }
12338 +
12339 + ret = sdw_config_stream(&slave->dev, stream, stream_config, true);
12340 +- if (ret)
12341 ++ if (ret) {
12342 ++ /*
12343 ++ * sdw_release_master_stream will release s_rt in slave_rt_list in
12344 ++ * stream_error case, but s_rt is only added to slave_rt_list
12345 ++ * when sdw_config_stream is successful, so free s_rt explicitly
12346 ++ * when sdw_config_stream is failed.
12347 ++ */
12348 ++ kfree(s_rt);
12349 + goto stream_error;
12350 ++ }
12351 +
12352 + list_add_tail(&s_rt->m_rt_node, &m_rt->slave_rt_list);
12353 +
12354 +diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
12355 +index a2886ee44e4cb..5d98611dd999d 100644
12356 +--- a/drivers/spi/spi-fsl-lpspi.c
12357 ++++ b/drivers/spi/spi-fsl-lpspi.c
12358 +@@ -200,7 +200,7 @@ static int lpspi_prepare_xfer_hardware(struct spi_controller *controller)
12359 + spi_controller_get_devdata(controller);
12360 + int ret;
12361 +
12362 +- ret = pm_runtime_get_sync(fsl_lpspi->dev);
12363 ++ ret = pm_runtime_resume_and_get(fsl_lpspi->dev);
12364 + if (ret < 0) {
12365 + dev_err(fsl_lpspi->dev, "failed to enable clock\n");
12366 + return ret;
12367 +diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
12368 +index e4a8d203f9408..d0e5aa18b7bad 100644
12369 +--- a/drivers/spi/spi-fsl-spi.c
12370 ++++ b/drivers/spi/spi-fsl-spi.c
12371 +@@ -707,6 +707,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
12372 + struct resource mem;
12373 + int irq, type;
12374 + int ret;
12375 ++ bool spisel_boot = false;
12376 ++#if IS_ENABLED(CONFIG_FSL_SOC)
12377 ++ struct mpc8xxx_spi_probe_info *pinfo = NULL;
12378 ++#endif
12379 ++
12380 +
12381 + ret = of_mpc8xxx_spi_probe(ofdev);
12382 + if (ret)
12383 +@@ -715,9 +720,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
12384 + type = fsl_spi_get_type(&ofdev->dev);
12385 + if (type == TYPE_FSL) {
12386 + struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
12387 +- bool spisel_boot = false;
12388 + #if IS_ENABLED(CONFIG_FSL_SOC)
12389 +- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
12390 ++ pinfo = to_of_pinfo(pdata);
12391 +
12392 + spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
12393 + if (spisel_boot) {
12394 +@@ -746,15 +750,24 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
12395 +
12396 + ret = of_address_to_resource(np, 0, &mem);
12397 + if (ret)
12398 +- return ret;
12399 ++ goto unmap_out;
12400 +
12401 + irq = platform_get_irq(ofdev, 0);
12402 +- if (irq < 0)
12403 +- return irq;
12404 ++ if (irq < 0) {
12405 ++ ret = irq;
12406 ++ goto unmap_out;
12407 ++ }
12408 +
12409 + master = fsl_spi_probe(dev, &mem, irq);
12410 +
12411 + return PTR_ERR_OR_ZERO(master);
12412 ++
12413 ++unmap_out:
12414 ++#if IS_ENABLED(CONFIG_FSL_SOC)
12415 ++ if (spisel_boot)
12416 ++ iounmap(pinfo->immr_spi_cs);
12417 ++#endif
12418 ++ return ret;
12419 + }
12420 +
12421 + static int of_fsl_spi_remove(struct platform_device *ofdev)
12422 +diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
12423 +index 75a8a9428ff89..0aab37cd64e74 100644
12424 +--- a/drivers/spi/spi-rockchip.c
12425 ++++ b/drivers/spi/spi-rockchip.c
12426 +@@ -474,7 +474,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
12427 + return 1;
12428 + }
12429 +
12430 +-static void rockchip_spi_config(struct rockchip_spi *rs,
12431 ++static int rockchip_spi_config(struct rockchip_spi *rs,
12432 + struct spi_device *spi, struct spi_transfer *xfer,
12433 + bool use_dma, bool slave_mode)
12434 + {
12435 +@@ -519,7 +519,9 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
12436 + * ctlr->bits_per_word_mask, so this shouldn't
12437 + * happen
12438 + */
12439 +- unreachable();
12440 ++ dev_err(rs->dev, "unknown bits per word: %d\n",
12441 ++ xfer->bits_per_word);
12442 ++ return -EINVAL;
12443 + }
12444 +
12445 + if (use_dma) {
12446 +@@ -552,6 +554,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
12447 + */
12448 + writel_relaxed(2 * DIV_ROUND_UP(rs->freq, 2 * xfer->speed_hz),
12449 + rs->regs + ROCKCHIP_SPI_BAUDR);
12450 ++
12451 ++ return 0;
12452 + }
12453 +
12454 + static size_t rockchip_spi_max_transfer_size(struct spi_device *spi)
12455 +@@ -575,6 +579,7 @@ static int rockchip_spi_transfer_one(
12456 + struct spi_transfer *xfer)
12457 + {
12458 + struct rockchip_spi *rs = spi_controller_get_devdata(ctlr);
12459 ++ int ret;
12460 + bool use_dma;
12461 +
12462 + WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
12463 +@@ -594,7 +599,9 @@ static int rockchip_spi_transfer_one(
12464 +
12465 + use_dma = ctlr->can_dma ? ctlr->can_dma(ctlr, spi, xfer) : false;
12466 +
12467 +- rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
12468 ++ ret = rockchip_spi_config(rs, spi, xfer, use_dma, ctlr->slave);
12469 ++ if (ret)
12470 ++ return ret;
12471 +
12472 + if (use_dma)
12473 + return rockchip_spi_prepare_dma(rs, ctlr, xfer);
12474 +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
12475 +index 53c4311cc6ab5..0318f02d62123 100644
12476 +--- a/drivers/spi/spi-stm32.c
12477 ++++ b/drivers/spi/spi-stm32.c
12478 +@@ -1830,7 +1830,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
12479 + struct resource *res;
12480 + int ret;
12481 +
12482 +- master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
12483 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi));
12484 + if (!master) {
12485 + dev_err(&pdev->dev, "spi master allocation failed\n");
12486 + return -ENOMEM;
12487 +@@ -1848,18 +1848,16 @@ static int stm32_spi_probe(struct platform_device *pdev)
12488 +
12489 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
12490 + spi->base = devm_ioremap_resource(&pdev->dev, res);
12491 +- if (IS_ERR(spi->base)) {
12492 +- ret = PTR_ERR(spi->base);
12493 +- goto err_master_put;
12494 +- }
12495 ++ if (IS_ERR(spi->base))
12496 ++ return PTR_ERR(spi->base);
12497 +
12498 + spi->phys_addr = (dma_addr_t)res->start;
12499 +
12500 + spi->irq = platform_get_irq(pdev, 0);
12501 +- if (spi->irq <= 0) {
12502 +- ret = dev_err_probe(&pdev->dev, spi->irq, "failed to get irq\n");
12503 +- goto err_master_put;
12504 +- }
12505 ++ if (spi->irq <= 0)
12506 ++ return dev_err_probe(&pdev->dev, spi->irq,
12507 ++ "failed to get irq\n");
12508 ++
12509 + ret = devm_request_threaded_irq(&pdev->dev, spi->irq,
12510 + spi->cfg->irq_handler_event,
12511 + spi->cfg->irq_handler_thread,
12512 +@@ -1867,20 +1865,20 @@ static int stm32_spi_probe(struct platform_device *pdev)
12513 + if (ret) {
12514 + dev_err(&pdev->dev, "irq%d request failed: %d\n", spi->irq,
12515 + ret);
12516 +- goto err_master_put;
12517 ++ return ret;
12518 + }
12519 +
12520 + spi->clk = devm_clk_get(&pdev->dev, NULL);
12521 + if (IS_ERR(spi->clk)) {
12522 + ret = PTR_ERR(spi->clk);
12523 + dev_err(&pdev->dev, "clk get failed: %d\n", ret);
12524 +- goto err_master_put;
12525 ++ return ret;
12526 + }
12527 +
12528 + ret = clk_prepare_enable(spi->clk);
12529 + if (ret) {
12530 + dev_err(&pdev->dev, "clk enable failed: %d\n", ret);
12531 +- goto err_master_put;
12532 ++ return ret;
12533 + }
12534 + spi->clk_rate = clk_get_rate(spi->clk);
12535 + if (!spi->clk_rate) {
12536 +@@ -1950,7 +1948,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
12537 + pm_runtime_set_active(&pdev->dev);
12538 + pm_runtime_enable(&pdev->dev);
12539 +
12540 +- ret = devm_spi_register_master(&pdev->dev, master);
12541 ++ ret = spi_register_master(master);
12542 + if (ret) {
12543 + dev_err(&pdev->dev, "spi master registration failed: %d\n",
12544 + ret);
12545 +@@ -1976,8 +1974,6 @@ err_dma_release:
12546 + dma_release_channel(spi->dma_rx);
12547 + err_clk_disable:
12548 + clk_disable_unprepare(spi->clk);
12549 +-err_master_put:
12550 +- spi_master_put(master);
12551 +
12552 + return ret;
12553 + }
12554 +@@ -1987,6 +1983,7 @@ static int stm32_spi_remove(struct platform_device *pdev)
12555 + struct spi_master *master = platform_get_drvdata(pdev);
12556 + struct stm32_spi *spi = spi_master_get_devdata(master);
12557 +
12558 ++ spi_unregister_master(master);
12559 + spi->cfg->disable(spi);
12560 +
12561 + if (master->dma_tx)
12562 +diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
12563 +index c8fa6ee18ae77..1dd2af9cc2374 100644
12564 +--- a/drivers/spi/spi-zynqmp-gqspi.c
12565 ++++ b/drivers/spi/spi-zynqmp-gqspi.c
12566 +@@ -157,6 +157,7 @@ enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
12567 + * @data_completion: completion structure
12568 + */
12569 + struct zynqmp_qspi {
12570 ++ struct spi_controller *ctlr;
12571 + void __iomem *regs;
12572 + struct clk *refclk;
12573 + struct clk *pclk;
12574 +@@ -173,6 +174,7 @@ struct zynqmp_qspi {
12575 + u32 genfifoentry;
12576 + enum mode_type mode;
12577 + struct completion data_completion;
12578 ++ struct mutex op_lock;
12579 + };
12580 +
12581 + /**
12582 +@@ -486,24 +488,10 @@ static int zynqmp_qspi_setup_op(struct spi_device *qspi)
12583 + {
12584 + struct spi_controller *ctlr = qspi->master;
12585 + struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
12586 +- struct device *dev = &ctlr->dev;
12587 +- int ret;
12588 +
12589 + if (ctlr->busy)
12590 + return -EBUSY;
12591 +
12592 +- ret = clk_enable(xqspi->refclk);
12593 +- if (ret) {
12594 +- dev_err(dev, "Cannot enable device clock.\n");
12595 +- return ret;
12596 +- }
12597 +-
12598 +- ret = clk_enable(xqspi->pclk);
12599 +- if (ret) {
12600 +- dev_err(dev, "Cannot enable APB clock.\n");
12601 +- clk_disable(xqspi->refclk);
12602 +- return ret;
12603 +- }
12604 + zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
12605 +
12606 + return 0;
12607 +@@ -520,7 +508,7 @@ static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
12608 + {
12609 + u32 count = 0, intermediate;
12610 +
12611 +- while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
12612 ++ while ((xqspi->bytes_to_transfer > 0) && (count < size) && (xqspi->txbuf)) {
12613 + memcpy(&intermediate, xqspi->txbuf, 4);
12614 + zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
12615 +
12616 +@@ -579,7 +567,7 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
12617 + genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
12618 + genfifoentry |= GQSPI_GENFIFO_TX;
12619 + transfer_len = xqspi->bytes_to_transfer;
12620 +- } else {
12621 ++ } else if (xqspi->rxbuf) {
12622 + genfifoentry &= ~GQSPI_GENFIFO_TX;
12623 + genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
12624 + genfifoentry |= GQSPI_GENFIFO_RX;
12625 +@@ -587,6 +575,11 @@ static void zynqmp_qspi_fillgenfifo(struct zynqmp_qspi *xqspi, u8 nbits,
12626 + transfer_len = xqspi->dma_rx_bytes;
12627 + else
12628 + transfer_len = xqspi->bytes_to_receive;
12629 ++ } else {
12630 ++ /* Sending dummy circles here */
12631 ++ genfifoentry &= ~(GQSPI_GENFIFO_TX | GQSPI_GENFIFO_RX);
12632 ++ genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
12633 ++ transfer_len = xqspi->bytes_to_transfer;
12634 + }
12635 + genfifoentry |= zynqmp_qspi_selectspimode(xqspi, nbits);
12636 + xqspi->genfifoentry = genfifoentry;
12637 +@@ -738,7 +731,7 @@ static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
12638 + * zynqmp_qspi_setuprxdma - This function sets up the RX DMA operation
12639 + * @xqspi: xqspi is a pointer to the GQSPI instance.
12640 + */
12641 +-static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
12642 ++static int zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
12643 + {
12644 + u32 rx_bytes, rx_rem, config_reg;
12645 + dma_addr_t addr;
12646 +@@ -752,7 +745,7 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
12647 + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
12648 + xqspi->mode = GQSPI_MODE_IO;
12649 + xqspi->dma_rx_bytes = 0;
12650 +- return;
12651 ++ return 0;
12652 + }
12653 +
12654 + rx_rem = xqspi->bytes_to_receive % 4;
12655 +@@ -760,8 +753,10 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
12656 +
12657 + addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
12658 + rx_bytes, DMA_FROM_DEVICE);
12659 +- if (dma_mapping_error(xqspi->dev, addr))
12660 ++ if (dma_mapping_error(xqspi->dev, addr)) {
12661 + dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
12662 ++ return -ENOMEM;
12663 ++ }
12664 +
12665 + xqspi->dma_rx_bytes = rx_bytes;
12666 + xqspi->dma_addr = addr;
12667 +@@ -782,6 +777,8 @@ static void zynqmp_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
12668 +
12669 + /* Write the number of bytes to transfer */
12670 + zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
12671 ++
12672 ++ return 0;
12673 + }
12674 +
12675 + /**
12676 +@@ -818,11 +815,17 @@ static void zynqmp_qspi_write_op(struct zynqmp_qspi *xqspi, u8 tx_nbits,
12677 + * @genfifoentry: genfifoentry is pointer to the variable in which
12678 + * GENFIFO mask is returned to calling function
12679 + */
12680 +-static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
12681 ++static int zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
12682 + u32 genfifoentry)
12683 + {
12684 ++ int ret;
12685 ++
12686 ++ ret = zynqmp_qspi_setuprxdma(xqspi);
12687 ++ if (ret)
12688 ++ return ret;
12689 + zynqmp_qspi_fillgenfifo(xqspi, rx_nbits, genfifoentry);
12690 +- zynqmp_qspi_setuprxdma(xqspi);
12691 ++
12692 ++ return 0;
12693 + }
12694 +
12695 + /**
12696 +@@ -835,10 +838,13 @@ static void zynqmp_qspi_read_op(struct zynqmp_qspi *xqspi, u8 rx_nbits,
12697 + */
12698 + static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
12699 + {
12700 +- struct spi_controller *ctlr = dev_get_drvdata(dev);
12701 +- struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
12702 ++ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
12703 ++ struct spi_controller *ctlr = xqspi->ctlr;
12704 ++ int ret;
12705 +
12706 +- spi_controller_suspend(ctlr);
12707 ++ ret = spi_controller_suspend(ctlr);
12708 ++ if (ret)
12709 ++ return ret;
12710 +
12711 + zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
12712 +
12713 +@@ -856,27 +862,13 @@ static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
12714 + */
12715 + static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
12716 + {
12717 +- struct spi_controller *ctlr = dev_get_drvdata(dev);
12718 +- struct zynqmp_qspi *xqspi = spi_controller_get_devdata(ctlr);
12719 +- int ret = 0;
12720 ++ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
12721 ++ struct spi_controller *ctlr = xqspi->ctlr;
12722 +
12723 +- ret = clk_enable(xqspi->pclk);
12724 +- if (ret) {
12725 +- dev_err(dev, "Cannot enable APB clock.\n");
12726 +- return ret;
12727 +- }
12728 +-
12729 +- ret = clk_enable(xqspi->refclk);
12730 +- if (ret) {
12731 +- dev_err(dev, "Cannot enable device clock.\n");
12732 +- clk_disable(xqspi->pclk);
12733 +- return ret;
12734 +- }
12735 ++ zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
12736 +
12737 + spi_controller_resume(ctlr);
12738 +
12739 +- clk_disable(xqspi->refclk);
12740 +- clk_disable(xqspi->pclk);
12741 + return 0;
12742 + }
12743 +
12744 +@@ -890,10 +882,10 @@ static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
12745 + */
12746 + static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
12747 + {
12748 +- struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
12749 ++ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
12750 +
12751 +- clk_disable(xqspi->refclk);
12752 +- clk_disable(xqspi->pclk);
12753 ++ clk_disable_unprepare(xqspi->refclk);
12754 ++ clk_disable_unprepare(xqspi->pclk);
12755 +
12756 + return 0;
12757 + }
12758 +@@ -908,19 +900,19 @@ static int __maybe_unused zynqmp_runtime_suspend(struct device *dev)
12759 + */
12760 + static int __maybe_unused zynqmp_runtime_resume(struct device *dev)
12761 + {
12762 +- struct zynqmp_qspi *xqspi = (struct zynqmp_qspi *)dev_get_drvdata(dev);
12763 ++ struct zynqmp_qspi *xqspi = dev_get_drvdata(dev);
12764 + int ret;
12765 +
12766 +- ret = clk_enable(xqspi->pclk);
12767 ++ ret = clk_prepare_enable(xqspi->pclk);
12768 + if (ret) {
12769 + dev_err(dev, "Cannot enable APB clock.\n");
12770 + return ret;
12771 + }
12772 +
12773 +- ret = clk_enable(xqspi->refclk);
12774 ++ ret = clk_prepare_enable(xqspi->refclk);
12775 + if (ret) {
12776 + dev_err(dev, "Cannot enable device clock.\n");
12777 +- clk_disable(xqspi->pclk);
12778 ++ clk_disable_unprepare(xqspi->pclk);
12779 + return ret;
12780 + }
12781 +
12782 +@@ -944,25 +936,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12783 + struct zynqmp_qspi *xqspi = spi_controller_get_devdata
12784 + (mem->spi->master);
12785 + int err = 0, i;
12786 +- u8 *tmpbuf;
12787 + u32 genfifoentry = 0;
12788 ++ u16 opcode = op->cmd.opcode;
12789 ++ u64 opaddr;
12790 +
12791 + dev_dbg(xqspi->dev, "cmd:%#x mode:%d.%d.%d.%d\n",
12792 + op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
12793 + op->dummy.buswidth, op->data.buswidth);
12794 +
12795 ++ mutex_lock(&xqspi->op_lock);
12796 + zynqmp_qspi_config_op(xqspi, mem->spi);
12797 + zynqmp_qspi_chipselect(mem->spi, false);
12798 + genfifoentry |= xqspi->genfifocs;
12799 + genfifoentry |= xqspi->genfifobus;
12800 +
12801 + if (op->cmd.opcode) {
12802 +- tmpbuf = kzalloc(op->cmd.nbytes, GFP_KERNEL | GFP_DMA);
12803 +- if (!tmpbuf)
12804 +- return -ENOMEM;
12805 +- tmpbuf[0] = op->cmd.opcode;
12806 + reinit_completion(&xqspi->data_completion);
12807 +- xqspi->txbuf = tmpbuf;
12808 ++ xqspi->txbuf = &opcode;
12809 + xqspi->rxbuf = NULL;
12810 + xqspi->bytes_to_transfer = op->cmd.nbytes;
12811 + xqspi->bytes_to_receive = 0;
12812 +@@ -973,16 +963,15 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12813 + zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
12814 + GQSPI_IER_GENFIFOEMPTY_MASK |
12815 + GQSPI_IER_TXNOT_FULL_MASK);
12816 +- if (!wait_for_completion_interruptible_timeout
12817 ++ if (!wait_for_completion_timeout
12818 + (&xqspi->data_completion, msecs_to_jiffies(1000))) {
12819 + err = -ETIMEDOUT;
12820 +- kfree(tmpbuf);
12821 + goto return_err;
12822 + }
12823 +- kfree(tmpbuf);
12824 + }
12825 +
12826 + if (op->addr.nbytes) {
12827 ++ xqspi->txbuf = &opaddr;
12828 + for (i = 0; i < op->addr.nbytes; i++) {
12829 + *(((u8 *)xqspi->txbuf) + i) = op->addr.val >>
12830 + (8 * (op->addr.nbytes - i - 1));
12831 +@@ -1001,7 +990,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12832 + GQSPI_IER_TXEMPTY_MASK |
12833 + GQSPI_IER_GENFIFOEMPTY_MASK |
12834 + GQSPI_IER_TXNOT_FULL_MASK);
12835 +- if (!wait_for_completion_interruptible_timeout
12836 ++ if (!wait_for_completion_timeout
12837 + (&xqspi->data_completion, msecs_to_jiffies(1000))) {
12838 + err = -ETIMEDOUT;
12839 + goto return_err;
12840 +@@ -1009,32 +998,23 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12841 + }
12842 +
12843 + if (op->dummy.nbytes) {
12844 +- tmpbuf = kzalloc(op->dummy.nbytes, GFP_KERNEL | GFP_DMA);
12845 +- if (!tmpbuf)
12846 +- return -ENOMEM;
12847 +- memset(tmpbuf, 0xff, op->dummy.nbytes);
12848 +- reinit_completion(&xqspi->data_completion);
12849 +- xqspi->txbuf = tmpbuf;
12850 ++ xqspi->txbuf = NULL;
12851 + xqspi->rxbuf = NULL;
12852 +- xqspi->bytes_to_transfer = op->dummy.nbytes;
12853 ++ /*
12854 ++ * xqspi->bytes_to_transfer here represents the dummy circles
12855 ++ * which need to be sent.
12856 ++ */
12857 ++ xqspi->bytes_to_transfer = op->dummy.nbytes * 8 / op->dummy.buswidth;
12858 + xqspi->bytes_to_receive = 0;
12859 +- zynqmp_qspi_write_op(xqspi, op->dummy.buswidth,
12860 ++ /*
12861 ++ * Using op->data.buswidth instead of op->dummy.buswidth here because
12862 ++ * we need to use it to configure the correct SPI mode.
12863 ++ */
12864 ++ zynqmp_qspi_write_op(xqspi, op->data.buswidth,
12865 + genfifoentry);
12866 + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
12867 + zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
12868 + GQSPI_CFG_START_GEN_FIFO_MASK);
12869 +- zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
12870 +- GQSPI_IER_TXEMPTY_MASK |
12871 +- GQSPI_IER_GENFIFOEMPTY_MASK |
12872 +- GQSPI_IER_TXNOT_FULL_MASK);
12873 +- if (!wait_for_completion_interruptible_timeout
12874 +- (&xqspi->data_completion, msecs_to_jiffies(1000))) {
12875 +- err = -ETIMEDOUT;
12876 +- kfree(tmpbuf);
12877 +- goto return_err;
12878 +- }
12879 +-
12880 +- kfree(tmpbuf);
12881 + }
12882 +
12883 + if (op->data.nbytes) {
12884 +@@ -1059,8 +1039,11 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12885 + xqspi->rxbuf = (u8 *)op->data.buf.in;
12886 + xqspi->bytes_to_receive = op->data.nbytes;
12887 + xqspi->bytes_to_transfer = 0;
12888 +- zynqmp_qspi_read_op(xqspi, op->data.buswidth,
12889 ++ err = zynqmp_qspi_read_op(xqspi, op->data.buswidth,
12890 + genfifoentry);
12891 ++ if (err)
12892 ++ goto return_err;
12893 ++
12894 + zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
12895 + zynqmp_gqspi_read
12896 + (xqspi, GQSPI_CONFIG_OFST) |
12897 +@@ -1076,7 +1059,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12898 + GQSPI_IER_RXEMPTY_MASK);
12899 + }
12900 + }
12901 +- if (!wait_for_completion_interruptible_timeout
12902 ++ if (!wait_for_completion_timeout
12903 + (&xqspi->data_completion, msecs_to_jiffies(1000)))
12904 + err = -ETIMEDOUT;
12905 + }
12906 +@@ -1084,6 +1067,7 @@ static int zynqmp_qspi_exec_op(struct spi_mem *mem,
12907 + return_err:
12908 +
12909 + zynqmp_qspi_chipselect(mem->spi, true);
12910 ++ mutex_unlock(&xqspi->op_lock);
12911 +
12912 + return err;
12913 + }
12914 +@@ -1120,6 +1104,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
12915 +
12916 + xqspi = spi_controller_get_devdata(ctlr);
12917 + xqspi->dev = dev;
12918 ++ xqspi->ctlr = ctlr;
12919 + platform_set_drvdata(pdev, xqspi);
12920 +
12921 + xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
12922 +@@ -1135,13 +1120,11 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
12923 + goto remove_master;
12924 + }
12925 +
12926 +- init_completion(&xqspi->data_completion);
12927 +-
12928 + xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
12929 + if (IS_ERR(xqspi->refclk)) {
12930 + dev_err(dev, "ref_clk clock not found.\n");
12931 + ret = PTR_ERR(xqspi->refclk);
12932 +- goto clk_dis_pclk;
12933 ++ goto remove_master;
12934 + }
12935 +
12936 + ret = clk_prepare_enable(xqspi->pclk);
12937 +@@ -1156,6 +1139,10 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
12938 + goto clk_dis_pclk;
12939 + }
12940 +
12941 ++ init_completion(&xqspi->data_completion);
12942 ++
12943 ++ mutex_init(&xqspi->op_lock);
12944 ++
12945 + pm_runtime_use_autosuspend(&pdev->dev);
12946 + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
12947 + pm_runtime_set_active(&pdev->dev);
12948 +@@ -1178,6 +1165,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
12949 + goto clk_dis_all;
12950 + }
12951 +
12952 ++ dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
12953 + ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
12954 + ctlr->num_chipselect = GQSPI_DEFAULT_NUM_CS;
12955 + ctlr->mem_ops = &zynqmp_qspi_mem_ops;
12956 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
12957 +index 1eee8b3c1b381..419de3d404814 100644
12958 +--- a/drivers/spi/spi.c
12959 ++++ b/drivers/spi/spi.c
12960 +@@ -2480,6 +2480,7 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
12961 +
12962 + ctlr = __spi_alloc_controller(dev, size, slave);
12963 + if (ctlr) {
12964 ++ ctlr->devm_allocated = true;
12965 + *ptr = ctlr;
12966 + devres_add(dev, ptr);
12967 + } else {
12968 +@@ -2826,11 +2827,6 @@ int devm_spi_register_controller(struct device *dev,
12969 + }
12970 + EXPORT_SYMBOL_GPL(devm_spi_register_controller);
12971 +
12972 +-static int devm_spi_match_controller(struct device *dev, void *res, void *ctlr)
12973 +-{
12974 +- return *(struct spi_controller **)res == ctlr;
12975 +-}
12976 +-
12977 + static int __unregister(struct device *dev, void *null)
12978 + {
12979 + spi_unregister_device(to_spi_device(dev));
12980 +@@ -2877,8 +2873,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
12981 + /* Release the last reference on the controller if its driver
12982 + * has not yet been converted to devm_spi_alloc_master/slave().
12983 + */
12984 +- if (!devres_find(ctlr->dev.parent, devm_spi_release_controller,
12985 +- devm_spi_match_controller, ctlr))
12986 ++ if (!ctlr->devm_allocated)
12987 + put_device(&ctlr->dev);
12988 +
12989 + /* free bus id */
12990 +diff --git a/drivers/staging/comedi/drivers/tests/ni_routes_test.c b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
12991 +index eaefaf596a376..02606e39625af 100644
12992 +--- a/drivers/staging/comedi/drivers/tests/ni_routes_test.c
12993 ++++ b/drivers/staging/comedi/drivers/tests/ni_routes_test.c
12994 +@@ -217,7 +217,8 @@ void test_ni_assign_device_routes(void)
12995 + const u8 *table, *oldtable;
12996 +
12997 + init_pci_6070e();
12998 +- ni_assign_device_routes(ni_eseries, pci_6070e, &private.routing_tables);
12999 ++ ni_assign_device_routes(ni_eseries, pci_6070e, NULL,
13000 ++ &private.routing_tables);
13001 + devroutes = private.routing_tables.valid_routes;
13002 + table = private.routing_tables.route_values;
13003 +
13004 +@@ -253,7 +254,8 @@ void test_ni_assign_device_routes(void)
13005 + olddevroutes = devroutes;
13006 + oldtable = table;
13007 + init_pci_6220();
13008 +- ni_assign_device_routes(ni_mseries, pci_6220, &private.routing_tables);
13009 ++ ni_assign_device_routes(ni_mseries, pci_6220, NULL,
13010 ++ &private.routing_tables);
13011 + devroutes = private.routing_tables.valid_routes;
13012 + table = private.routing_tables.route_values;
13013 +
13014 +diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
13015 +index c368082aae1aa..0f4655d7d520a 100644
13016 +--- a/drivers/staging/fwserial/fwserial.c
13017 ++++ b/drivers/staging/fwserial/fwserial.c
13018 +@@ -1218,13 +1218,12 @@ static int get_serial_info(struct tty_struct *tty,
13019 + struct fwtty_port *port = tty->driver_data;
13020 +
13021 + mutex_lock(&port->port.mutex);
13022 +- ss->type = PORT_UNKNOWN;
13023 +- ss->line = port->port.tty->index;
13024 +- ss->flags = port->port.flags;
13025 +- ss->xmit_fifo_size = FWTTY_PORT_TXFIFO_LEN;
13026 ++ ss->line = port->index;
13027 + ss->baud_base = 400000000;
13028 +- ss->close_delay = port->port.close_delay;
13029 ++ ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
13030 ++ ss->closing_wait = 3000;
13031 + mutex_unlock(&port->port.mutex);
13032 ++
13033 + return 0;
13034 + }
13035 +
13036 +@@ -1232,20 +1231,20 @@ static int set_serial_info(struct tty_struct *tty,
13037 + struct serial_struct *ss)
13038 + {
13039 + struct fwtty_port *port = tty->driver_data;
13040 ++ unsigned int cdelay;
13041 +
13042 +- if (ss->irq != 0 || ss->port != 0 || ss->custom_divisor != 0 ||
13043 +- ss->baud_base != 400000000)
13044 +- return -EPERM;
13045 ++ cdelay = msecs_to_jiffies(ss->close_delay * 10);
13046 +
13047 + mutex_lock(&port->port.mutex);
13048 + if (!capable(CAP_SYS_ADMIN)) {
13049 +- if (((ss->flags & ~ASYNC_USR_MASK) !=
13050 ++ if (cdelay != port->port.close_delay ||
13051 ++ ((ss->flags & ~ASYNC_USR_MASK) !=
13052 + (port->port.flags & ~ASYNC_USR_MASK))) {
13053 + mutex_unlock(&port->port.mutex);
13054 + return -EPERM;
13055 + }
13056 + }
13057 +- port->port.close_delay = ss->close_delay * HZ / 100;
13058 ++ port->port.close_delay = cdelay;
13059 + mutex_unlock(&port->port.mutex);
13060 +
13061 + return 0;
13062 +diff --git a/drivers/staging/greybus/uart.c b/drivers/staging/greybus/uart.c
13063 +index 607378bfebb7e..a520f7f213db0 100644
13064 +--- a/drivers/staging/greybus/uart.c
13065 ++++ b/drivers/staging/greybus/uart.c
13066 +@@ -614,10 +614,12 @@ static int get_serial_info(struct tty_struct *tty,
13067 + ss->line = gb_tty->minor;
13068 + ss->xmit_fifo_size = 16;
13069 + ss->baud_base = 9600;
13070 +- ss->close_delay = gb_tty->port.close_delay / 10;
13071 ++ ss->close_delay = jiffies_to_msecs(gb_tty->port.close_delay) / 10;
13072 + ss->closing_wait =
13073 + gb_tty->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
13074 +- ASYNC_CLOSING_WAIT_NONE : gb_tty->port.closing_wait / 10;
13075 ++ ASYNC_CLOSING_WAIT_NONE :
13076 ++ jiffies_to_msecs(gb_tty->port.closing_wait) / 10;
13077 ++
13078 + return 0;
13079 + }
13080 +
13081 +@@ -629,17 +631,16 @@ static int set_serial_info(struct tty_struct *tty,
13082 + unsigned int close_delay;
13083 + int retval = 0;
13084 +
13085 +- close_delay = ss->close_delay * 10;
13086 ++ close_delay = msecs_to_jiffies(ss->close_delay * 10);
13087 + closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
13088 +- ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
13089 ++ ASYNC_CLOSING_WAIT_NONE :
13090 ++ msecs_to_jiffies(ss->closing_wait * 10);
13091 +
13092 + mutex_lock(&gb_tty->port.mutex);
13093 + if (!capable(CAP_SYS_ADMIN)) {
13094 + if ((close_delay != gb_tty->port.close_delay) ||
13095 + (closing_wait != gb_tty->port.closing_wait))
13096 + retval = -EPERM;
13097 +- else
13098 +- retval = -EOPNOTSUPP;
13099 + } else {
13100 + gb_tty->port.close_delay = close_delay;
13101 + gb_tty->port.closing_wait = closing_wait;
13102 +diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
13103 +index 7ca7378b18592..0ab67b2aec671 100644
13104 +--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
13105 ++++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
13106 +@@ -843,8 +843,10 @@ static int lm3554_probe(struct i2c_client *client)
13107 + return -ENOMEM;
13108 +
13109 + flash->pdata = lm3554_platform_data_func(client);
13110 +- if (IS_ERR(flash->pdata))
13111 +- return PTR_ERR(flash->pdata);
13112 ++ if (IS_ERR(flash->pdata)) {
13113 ++ err = PTR_ERR(flash->pdata);
13114 ++ goto fail1;
13115 ++ }
13116 +
13117 + v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
13118 + flash->sd.internal_ops = &lm3554_internal_ops;
13119 +@@ -856,7 +858,7 @@ static int lm3554_probe(struct i2c_client *client)
13120 + ARRAY_SIZE(lm3554_controls));
13121 + if (ret) {
13122 + dev_err(&client->dev, "error initialize a ctrl_handler.\n");
13123 +- goto fail2;
13124 ++ goto fail3;
13125 + }
13126 +
13127 + for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
13128 +@@ -865,14 +867,14 @@ static int lm3554_probe(struct i2c_client *client)
13129 +
13130 + if (flash->ctrl_handler.error) {
13131 + dev_err(&client->dev, "ctrl_handler error.\n");
13132 +- goto fail2;
13133 ++ goto fail3;
13134 + }
13135 +
13136 + flash->sd.ctrl_handler = &flash->ctrl_handler;
13137 + err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
13138 + if (err) {
13139 + dev_err(&client->dev, "error initialize a media entity.\n");
13140 +- goto fail1;
13141 ++ goto fail2;
13142 + }
13143 +
13144 + flash->sd.entity.function = MEDIA_ENT_F_FLASH;
13145 +@@ -884,14 +886,15 @@ static int lm3554_probe(struct i2c_client *client)
13146 + err = lm3554_gpio_init(client);
13147 + if (err) {
13148 + dev_err(&client->dev, "gpio request/direction_output fail");
13149 +- goto fail2;
13150 ++ goto fail3;
13151 + }
13152 + return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
13153 +-fail2:
13154 ++fail3:
13155 + media_entity_cleanup(&flash->sd.entity);
13156 + v4l2_ctrl_handler_free(&flash->ctrl_handler);
13157 +-fail1:
13158 ++fail2:
13159 + v4l2_device_unregister_subdev(&flash->sd);
13160 ++fail1:
13161 + kfree(flash);
13162 +
13163 + return err;
13164 +diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
13165 +index 2ae50decfc8bd..9da82855552de 100644
13166 +--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
13167 ++++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
13168 +@@ -948,10 +948,8 @@ int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd,
13169 + dev_dbg(isp->dev, "allocating %d dis buffers\n", count);
13170 + while (count--) {
13171 + dis_buf = kzalloc(sizeof(struct atomisp_dis_buf), GFP_KERNEL);
13172 +- if (!dis_buf) {
13173 +- kfree(s3a_buf);
13174 ++ if (!dis_buf)
13175 + goto error;
13176 +- }
13177 + if (atomisp_css_allocate_stat_buffers(
13178 + asd, stream_id, NULL, dis_buf, NULL)) {
13179 + kfree(dis_buf);
13180 +diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
13181 +index f13af2329f486..0168f9839c905 100644
13182 +--- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
13183 ++++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c
13184 +@@ -857,16 +857,17 @@ static void free_private_pages(struct hmm_buffer_object *bo,
13185 + kfree(bo->page_obj);
13186 + }
13187 +
13188 +-static void free_user_pages(struct hmm_buffer_object *bo)
13189 ++static void free_user_pages(struct hmm_buffer_object *bo,
13190 ++ unsigned int page_nr)
13191 + {
13192 + int i;
13193 +
13194 + hmm_mem_stat.usr_size -= bo->pgnr;
13195 +
13196 + if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) {
13197 +- unpin_user_pages(bo->pages, bo->pgnr);
13198 ++ unpin_user_pages(bo->pages, page_nr);
13199 + } else {
13200 +- for (i = 0; i < bo->pgnr; i++)
13201 ++ for (i = 0; i < page_nr; i++)
13202 + put_page(bo->pages[i]);
13203 + }
13204 + kfree(bo->pages);
13205 +@@ -942,6 +943,8 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
13206 + dev_err(atomisp_dev,
13207 + "get_user_pages err: bo->pgnr = %d, pgnr actually pinned = %d.\n",
13208 + bo->pgnr, page_nr);
13209 ++ if (page_nr < 0)
13210 ++ page_nr = 0;
13211 + goto out_of_mem;
13212 + }
13213 +
13214 +@@ -954,7 +957,7 @@ static int alloc_user_pages(struct hmm_buffer_object *bo,
13215 +
13216 + out_of_mem:
13217 +
13218 +- free_user_pages(bo);
13219 ++ free_user_pages(bo, page_nr);
13220 +
13221 + return -ENOMEM;
13222 + }
13223 +@@ -1037,7 +1040,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo)
13224 + if (bo->type == HMM_BO_PRIVATE)
13225 + free_private_pages(bo, &dynamic_pool, &reserved_pool);
13226 + else if (bo->type == HMM_BO_USER)
13227 +- free_user_pages(bo);
13228 ++ free_user_pages(bo, bo->pgnr);
13229 + else
13230 + dev_err(atomisp_dev, "invalid buffer type.\n");
13231 + mutex_unlock(&bo->mutex);
13232 +diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
13233 +index e06ea7ea1e502..3dac35f682388 100644
13234 +--- a/drivers/staging/media/omap4iss/iss.c
13235 ++++ b/drivers/staging/media/omap4iss/iss.c
13236 +@@ -1236,8 +1236,10 @@ static int iss_probe(struct platform_device *pdev)
13237 + if (ret < 0)
13238 + goto error;
13239 +
13240 +- if (!omap4iss_get(iss))
13241 ++ if (!omap4iss_get(iss)) {
13242 ++ ret = -EINVAL;
13243 + goto error;
13244 ++ }
13245 +
13246 + ret = iss_reset(iss);
13247 + if (ret < 0)
13248 +diff --git a/drivers/staging/media/rkisp1/rkisp1-resizer.c b/drivers/staging/media/rkisp1/rkisp1-resizer.c
13249 +index 1687d82e6c68d..4dcc342ac2b27 100644
13250 +--- a/drivers/staging/media/rkisp1/rkisp1-resizer.c
13251 ++++ b/drivers/staging/media/rkisp1/rkisp1-resizer.c
13252 +@@ -520,14 +520,15 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
13253 + struct v4l2_mbus_framefmt *format,
13254 + unsigned int which)
13255 + {
13256 +- const struct rkisp1_isp_mbus_info *mbus_info;
13257 +- struct v4l2_mbus_framefmt *src_fmt;
13258 ++ const struct rkisp1_isp_mbus_info *sink_mbus_info;
13259 ++ struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
13260 +
13261 ++ sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SINK, which);
13262 + src_fmt = rkisp1_rsz_get_pad_fmt(rsz, cfg, RKISP1_RSZ_PAD_SRC, which);
13263 +- mbus_info = rkisp1_isp_mbus_info_get(src_fmt->code);
13264 ++ sink_mbus_info = rkisp1_isp_mbus_info_get(sink_fmt->code);
13265 +
13266 + /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
13267 +- if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
13268 ++ if (sink_mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV &&
13269 + rkisp1_rsz_get_yuv_mbus_info(format->code))
13270 + src_fmt->code = format->code;
13271 +
13272 +diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
13273 +index 66b152f18d17a..426387cf16ac7 100644
13274 +--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
13275 ++++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
13276 +@@ -443,16 +443,17 @@
13277 + #define VE_DEC_H265_STATUS_STCD_BUSY BIT(21)
13278 + #define VE_DEC_H265_STATUS_WB_BUSY BIT(20)
13279 + #define VE_DEC_H265_STATUS_BS_DMA_BUSY BIT(19)
13280 +-#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(18)
13281 ++#define VE_DEC_H265_STATUS_IT_BUSY BIT(18)
13282 + #define VE_DEC_H265_STATUS_INTER_BUSY BIT(17)
13283 + #define VE_DEC_H265_STATUS_MORE_DATA BIT(16)
13284 +-#define VE_DEC_H265_STATUS_VLD_BUSY BIT(14)
13285 +-#define VE_DEC_H265_STATUS_DEBLOCKING_BUSY BIT(13)
13286 +-#define VE_DEC_H265_STATUS_DEBLOCKING_DRAM_BUSY BIT(12)
13287 +-#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(11)
13288 +-#define VE_DEC_H265_STATUS_SAO_BUSY BIT(10)
13289 +-#define VE_DEC_H265_STATUS_MVP_BUSY BIT(9)
13290 +-#define VE_DEC_H265_STATUS_SWDEC_BUSY BIT(8)
13291 ++#define VE_DEC_H265_STATUS_DBLK_BUSY BIT(15)
13292 ++#define VE_DEC_H265_STATUS_IREC_BUSY BIT(14)
13293 ++#define VE_DEC_H265_STATUS_INTRA_BUSY BIT(13)
13294 ++#define VE_DEC_H265_STATUS_MCRI_BUSY BIT(12)
13295 ++#define VE_DEC_H265_STATUS_IQIT_BUSY BIT(11)
13296 ++#define VE_DEC_H265_STATUS_MVP_BUSY BIT(10)
13297 ++#define VE_DEC_H265_STATUS_IS_BUSY BIT(9)
13298 ++#define VE_DEC_H265_STATUS_VLD_BUSY BIT(8)
13299 + #define VE_DEC_H265_STATUS_OVER_TIME BIT(3)
13300 + #define VE_DEC_H265_STATUS_VLD_DATA_REQ BIT(2)
13301 + #define VE_DEC_H265_STATUS_ERROR BIT(1)
13302 +diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
13303 +index 27dc181c4c9b6..03d31e52b3999 100644
13304 +--- a/drivers/staging/rtl8192u/r8192U_core.c
13305 ++++ b/drivers/staging/rtl8192u/r8192U_core.c
13306 +@@ -3208,7 +3208,7 @@ static void rtl819x_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
13307 + u32 *TotalRxDataNum)
13308 + {
13309 + u16 SlotIndex;
13310 +- u8 i;
13311 ++ u16 i;
13312 +
13313 + *TotalRxBcnNum = 0;
13314 + *TotalRxDataNum = 0;
13315 +diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
13316 +index 13f63c01c5894..f60db967bf7b5 100644
13317 +--- a/drivers/tty/amiserial.c
13318 ++++ b/drivers/tty/amiserial.c
13319 +@@ -970,6 +970,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
13320 + if (!serial_isroot()) {
13321 + if ((ss->baud_base != state->baud_base) ||
13322 + (ss->close_delay != port->close_delay) ||
13323 ++ (ss->closing_wait != port->closing_wait) ||
13324 + (ss->xmit_fifo_size != state->xmit_fifo_size) ||
13325 + ((ss->flags & ~ASYNC_USR_MASK) !=
13326 + (port->flags & ~ASYNC_USR_MASK))) {
13327 +diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
13328 +index 9f13f7d49dd78..f9f14104bd2c0 100644
13329 +--- a/drivers/tty/moxa.c
13330 ++++ b/drivers/tty/moxa.c
13331 +@@ -2040,7 +2040,7 @@ static int moxa_get_serial_info(struct tty_struct *tty,
13332 + ss->line = info->port.tty->index,
13333 + ss->flags = info->port.flags,
13334 + ss->baud_base = 921600,
13335 +- ss->close_delay = info->port.close_delay;
13336 ++ ss->close_delay = jiffies_to_msecs(info->port.close_delay) / 10;
13337 + mutex_unlock(&info->port.mutex);
13338 + return 0;
13339 + }
13340 +@@ -2050,6 +2050,7 @@ static int moxa_set_serial_info(struct tty_struct *tty,
13341 + struct serial_struct *ss)
13342 + {
13343 + struct moxa_port *info = tty->driver_data;
13344 ++ unsigned int close_delay;
13345 +
13346 + if (tty->index == MAX_PORTS)
13347 + return -EINVAL;
13348 +@@ -2061,19 +2062,24 @@ static int moxa_set_serial_info(struct tty_struct *tty,
13349 + ss->baud_base != 921600)
13350 + return -EPERM;
13351 +
13352 ++ close_delay = msecs_to_jiffies(ss->close_delay * 10);
13353 ++
13354 + mutex_lock(&info->port.mutex);
13355 + if (!capable(CAP_SYS_ADMIN)) {
13356 +- if (((ss->flags & ~ASYNC_USR_MASK) !=
13357 ++ if (close_delay != info->port.close_delay ||
13358 ++ ss->type != info->type ||
13359 ++ ((ss->flags & ~ASYNC_USR_MASK) !=
13360 + (info->port.flags & ~ASYNC_USR_MASK))) {
13361 + mutex_unlock(&info->port.mutex);
13362 + return -EPERM;
13363 + }
13364 +- }
13365 +- info->port.close_delay = ss->close_delay * HZ / 100;
13366 ++ } else {
13367 ++ info->port.close_delay = close_delay;
13368 +
13369 +- MoxaSetFifo(info, ss->type == PORT_16550A);
13370 ++ MoxaSetFifo(info, ss->type == PORT_16550A);
13371 +
13372 +- info->type = ss->type;
13373 ++ info->type = ss->type;
13374 ++ }
13375 + mutex_unlock(&info->port.mutex);
13376 + return 0;
13377 + }
13378 +diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
13379 +index 76b94d0ff5865..84e8158088cd2 100644
13380 +--- a/drivers/tty/serial/omap-serial.c
13381 ++++ b/drivers/tty/serial/omap-serial.c
13382 +@@ -159,6 +159,8 @@ struct uart_omap_port {
13383 + u32 calc_latency;
13384 + struct work_struct qos_work;
13385 + bool is_suspending;
13386 ++
13387 ++ unsigned int rs485_tx_filter_count;
13388 + };
13389 +
13390 + #define to_uart_omap_port(p) ((container_of((p), struct uart_omap_port, port)))
13391 +@@ -302,7 +304,8 @@ static void serial_omap_stop_tx(struct uart_port *port)
13392 + serial_out(up, UART_OMAP_SCR, up->scr);
13393 + res = (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) ?
13394 + 1 : 0;
13395 +- if (gpiod_get_value(up->rts_gpiod) != res) {
13396 ++ if (up->rts_gpiod &&
13397 ++ gpiod_get_value(up->rts_gpiod) != res) {
13398 + if (port->rs485.delay_rts_after_send > 0)
13399 + mdelay(
13400 + port->rs485.delay_rts_after_send);
13401 +@@ -328,19 +331,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
13402 + serial_out(up, UART_IER, up->ier);
13403 + }
13404 +
13405 +- if ((port->rs485.flags & SER_RS485_ENABLED) &&
13406 +- !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
13407 +- /*
13408 +- * Empty the RX FIFO, we are not interested in anything
13409 +- * received during the half-duplex transmission.
13410 +- */
13411 +- serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_RCVR);
13412 +- /* Re-enable RX interrupts */
13413 +- up->ier |= UART_IER_RLSI | UART_IER_RDI;
13414 +- up->port.read_status_mask |= UART_LSR_DR;
13415 +- serial_out(up, UART_IER, up->ier);
13416 +- }
13417 +-
13418 + pm_runtime_mark_last_busy(up->dev);
13419 + pm_runtime_put_autosuspend(up->dev);
13420 + }
13421 +@@ -366,6 +356,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
13422 + serial_out(up, UART_TX, up->port.x_char);
13423 + up->port.icount.tx++;
13424 + up->port.x_char = 0;
13425 ++ if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
13426 ++ !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
13427 ++ up->rs485_tx_filter_count++;
13428 ++
13429 + return;
13430 + }
13431 + if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
13432 +@@ -377,6 +371,10 @@ static void transmit_chars(struct uart_omap_port *up, unsigned int lsr)
13433 + serial_out(up, UART_TX, xmit->buf[xmit->tail]);
13434 + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
13435 + up->port.icount.tx++;
13436 ++ if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
13437 ++ !(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
13438 ++ up->rs485_tx_filter_count++;
13439 ++
13440 + if (uart_circ_empty(xmit))
13441 + break;
13442 + } while (--count > 0);
13443 +@@ -411,7 +409,7 @@ static void serial_omap_start_tx(struct uart_port *port)
13444 +
13445 + /* if rts not already enabled */
13446 + res = (port->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
13447 +- if (gpiod_get_value(up->rts_gpiod) != res) {
13448 ++ if (up->rts_gpiod && gpiod_get_value(up->rts_gpiod) != res) {
13449 + gpiod_set_value(up->rts_gpiod, res);
13450 + if (port->rs485.delay_rts_before_send > 0)
13451 + mdelay(port->rs485.delay_rts_before_send);
13452 +@@ -420,7 +418,7 @@ static void serial_omap_start_tx(struct uart_port *port)
13453 +
13454 + if ((port->rs485.flags & SER_RS485_ENABLED) &&
13455 + !(port->rs485.flags & SER_RS485_RX_DURING_TX))
13456 +- serial_omap_stop_rx(port);
13457 ++ up->rs485_tx_filter_count = 0;
13458 +
13459 + serial_omap_enable_ier_thri(up);
13460 + pm_runtime_mark_last_busy(up->dev);
13461 +@@ -491,8 +489,13 @@ static void serial_omap_rlsi(struct uart_omap_port *up, unsigned int lsr)
13462 + * Read one data character out to avoid stalling the receiver according
13463 + * to the table 23-246 of the omap4 TRM.
13464 + */
13465 +- if (likely(lsr & UART_LSR_DR))
13466 ++ if (likely(lsr & UART_LSR_DR)) {
13467 + serial_in(up, UART_RX);
13468 ++ if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
13469 ++ !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
13470 ++ up->rs485_tx_filter_count)
13471 ++ up->rs485_tx_filter_count--;
13472 ++ }
13473 +
13474 + up->port.icount.rx++;
13475 + flag = TTY_NORMAL;
13476 +@@ -543,6 +546,13 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
13477 + return;
13478 +
13479 + ch = serial_in(up, UART_RX);
13480 ++ if ((up->port.rs485.flags & SER_RS485_ENABLED) &&
13481 ++ !(up->port.rs485.flags & SER_RS485_RX_DURING_TX) &&
13482 ++ up->rs485_tx_filter_count) {
13483 ++ up->rs485_tx_filter_count--;
13484 ++ return;
13485 ++ }
13486 ++
13487 + flag = TTY_NORMAL;
13488 + up->port.icount.rx++;
13489 +
13490 +@@ -1407,18 +1417,13 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
13491 + /* store new config */
13492 + port->rs485 = *rs485;
13493 +
13494 +- /*
13495 +- * Just as a precaution, only allow rs485
13496 +- * to be enabled if the gpio pin is valid
13497 +- */
13498 + if (up->rts_gpiod) {
13499 + /* enable / disable rts */
13500 + val = (port->rs485.flags & SER_RS485_ENABLED) ?
13501 + SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
13502 + val = (port->rs485.flags & val) ? 1 : 0;
13503 + gpiod_set_value(up->rts_gpiod, val);
13504 +- } else
13505 +- port->rs485.flags &= ~SER_RS485_ENABLED;
13506 ++ }
13507 +
13508 + /* Enable interrupts */
13509 + up->ier = mode;
13510 +diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
13511 +index f86ec2d2635b7..9adb8362578c5 100644
13512 +--- a/drivers/tty/serial/sc16is7xx.c
13513 ++++ b/drivers/tty/serial/sc16is7xx.c
13514 +@@ -1196,7 +1196,7 @@ static int sc16is7xx_probe(struct device *dev,
13515 + ret = regmap_read(regmap,
13516 + SC16IS7XX_LSR_REG << SC16IS7XX_REG_SHIFT, &val);
13517 + if (ret < 0)
13518 +- return ret;
13519 ++ return -EPROBE_DEFER;
13520 +
13521 + /* Alloc port structure */
13522 + s = devm_kzalloc(dev, struct_size(s, p, devtype->nr_uart), GFP_KERNEL);
13523 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
13524 +index 828f9ad1be49c..c6cbaccc19b0d 100644
13525 +--- a/drivers/tty/serial/serial_core.c
13526 ++++ b/drivers/tty/serial/serial_core.c
13527 +@@ -1306,7 +1306,7 @@ static int uart_set_rs485_config(struct uart_port *port,
13528 + unsigned long flags;
13529 +
13530 + if (!port->rs485_config)
13531 +- return -ENOIOCTLCMD;
13532 ++ return -ENOTTY;
13533 +
13534 + if (copy_from_user(&rs485, rs485_user, sizeof(*rs485_user)))
13535 + return -EFAULT;
13536 +@@ -1330,7 +1330,7 @@ static int uart_get_iso7816_config(struct uart_port *port,
13537 + struct serial_iso7816 aux;
13538 +
13539 + if (!port->iso7816_config)
13540 +- return -ENOIOCTLCMD;
13541 ++ return -ENOTTY;
13542 +
13543 + spin_lock_irqsave(&port->lock, flags);
13544 + aux = port->iso7816;
13545 +@@ -1350,7 +1350,7 @@ static int uart_set_iso7816_config(struct uart_port *port,
13546 + unsigned long flags;
13547 +
13548 + if (!port->iso7816_config)
13549 +- return -ENOIOCTLCMD;
13550 ++ return -ENOTTY;
13551 +
13552 + if (copy_from_user(&iso7816, iso7816_user, sizeof(*iso7816_user)))
13553 + return -EFAULT;
13554 +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
13555 +index 6248304a001f4..2cf9fc915510c 100644
13556 +--- a/drivers/tty/serial/stm32-usart.c
13557 ++++ b/drivers/tty/serial/stm32-usart.c
13558 +@@ -34,15 +34,15 @@
13559 + #include "serial_mctrl_gpio.h"
13560 + #include "stm32-usart.h"
13561 +
13562 +-static void stm32_stop_tx(struct uart_port *port);
13563 +-static void stm32_transmit_chars(struct uart_port *port);
13564 ++static void stm32_usart_stop_tx(struct uart_port *port);
13565 ++static void stm32_usart_transmit_chars(struct uart_port *port);
13566 +
13567 + static inline struct stm32_port *to_stm32_port(struct uart_port *port)
13568 + {
13569 + return container_of(port, struct stm32_port, port);
13570 + }
13571 +
13572 +-static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
13573 ++static void stm32_usart_set_bits(struct uart_port *port, u32 reg, u32 bits)
13574 + {
13575 + u32 val;
13576 +
13577 +@@ -51,7 +51,7 @@ static void stm32_set_bits(struct uart_port *port, u32 reg, u32 bits)
13578 + writel_relaxed(val, port->membase + reg);
13579 + }
13580 +
13581 +-static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
13582 ++static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
13583 + {
13584 + u32 val;
13585 +
13586 +@@ -60,8 +60,8 @@ static void stm32_clr_bits(struct uart_port *port, u32 reg, u32 bits)
13587 + writel_relaxed(val, port->membase + reg);
13588 + }
13589 +
13590 +-static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
13591 +- u32 delay_DDE, u32 baud)
13592 ++static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
13593 ++ u32 delay_DDE, u32 baud)
13594 + {
13595 + u32 rs485_deat_dedt;
13596 + u32 rs485_deat_dedt_max = (USART_CR1_DEAT_MASK >> USART_CR1_DEAT_SHIFT);
13597 +@@ -95,16 +95,16 @@ static void stm32_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
13598 + *cr1 |= rs485_deat_dedt;
13599 + }
13600 +
13601 +-static int stm32_config_rs485(struct uart_port *port,
13602 +- struct serial_rs485 *rs485conf)
13603 ++static int stm32_usart_config_rs485(struct uart_port *port,
13604 ++ struct serial_rs485 *rs485conf)
13605 + {
13606 + struct stm32_port *stm32_port = to_stm32_port(port);
13607 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13608 +- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
13609 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13610 ++ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
13611 + u32 usartdiv, baud, cr1, cr3;
13612 + bool over8;
13613 +
13614 +- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
13615 ++ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
13616 +
13617 + port->rs485 = *rs485conf;
13618 +
13619 +@@ -122,9 +122,10 @@ static int stm32_config_rs485(struct uart_port *port,
13620 + << USART_BRR_04_R_SHIFT;
13621 +
13622 + baud = DIV_ROUND_CLOSEST(port->uartclk, usartdiv);
13623 +- stm32_config_reg_rs485(&cr1, &cr3,
13624 +- rs485conf->delay_rts_before_send,
13625 +- rs485conf->delay_rts_after_send, baud);
13626 ++ stm32_usart_config_reg_rs485(&cr1, &cr3,
13627 ++ rs485conf->delay_rts_before_send,
13628 ++ rs485conf->delay_rts_after_send,
13629 ++ baud);
13630 +
13631 + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
13632 + cr3 &= ~USART_CR3_DEP;
13633 +@@ -137,18 +138,19 @@ static int stm32_config_rs485(struct uart_port *port,
13634 + writel_relaxed(cr3, port->membase + ofs->cr3);
13635 + writel_relaxed(cr1, port->membase + ofs->cr1);
13636 + } else {
13637 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_DEM | USART_CR3_DEP);
13638 +- stm32_clr_bits(port, ofs->cr1,
13639 +- USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
13640 ++ stm32_usart_clr_bits(port, ofs->cr3,
13641 ++ USART_CR3_DEM | USART_CR3_DEP);
13642 ++ stm32_usart_clr_bits(port, ofs->cr1,
13643 ++ USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
13644 + }
13645 +
13646 +- stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
13647 ++ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
13648 +
13649 + return 0;
13650 + }
13651 +
13652 +-static int stm32_init_rs485(struct uart_port *port,
13653 +- struct platform_device *pdev)
13654 ++static int stm32_usart_init_rs485(struct uart_port *port,
13655 ++ struct platform_device *pdev)
13656 + {
13657 + struct serial_rs485 *rs485conf = &port->rs485;
13658 +
13659 +@@ -162,11 +164,11 @@ static int stm32_init_rs485(struct uart_port *port,
13660 + return uart_get_rs485_mode(port);
13661 + }
13662 +
13663 +-static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
13664 +- bool threaded)
13665 ++static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr,
13666 ++ int *last_res, bool threaded)
13667 + {
13668 + struct stm32_port *stm32_port = to_stm32_port(port);
13669 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13670 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13671 + enum dma_status status;
13672 + struct dma_tx_state state;
13673 +
13674 +@@ -176,8 +178,7 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
13675 + status = dmaengine_tx_status(stm32_port->rx_ch,
13676 + stm32_port->rx_ch->cookie,
13677 + &state);
13678 +- if ((status == DMA_IN_PROGRESS) &&
13679 +- (*last_res != state.residue))
13680 ++ if (status == DMA_IN_PROGRESS && (*last_res != state.residue))
13681 + return 1;
13682 + else
13683 + return 0;
13684 +@@ -187,11 +188,11 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
13685 + return 0;
13686 + }
13687 +
13688 +-static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
13689 +- int *last_res)
13690 ++static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
13691 ++ int *last_res)
13692 + {
13693 + struct stm32_port *stm32_port = to_stm32_port(port);
13694 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13695 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13696 + unsigned long c;
13697 +
13698 + if (stm32_port->rx_ch) {
13699 +@@ -207,19 +208,22 @@ static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
13700 + return c;
13701 + }
13702 +
13703 +-static void stm32_receive_chars(struct uart_port *port, bool threaded)
13704 ++static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
13705 + {
13706 + struct tty_port *tport = &port->state->port;
13707 + struct stm32_port *stm32_port = to_stm32_port(port);
13708 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13709 +- unsigned long c;
13710 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13711 ++ unsigned long c, flags;
13712 + u32 sr;
13713 + char flag;
13714 +
13715 +- if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
13716 +- pm_wakeup_event(tport->tty->dev, 0);
13717 ++ if (threaded)
13718 ++ spin_lock_irqsave(&port->lock, flags);
13719 ++ else
13720 ++ spin_lock(&port->lock);
13721 +
13722 +- while (stm32_pending_rx(port, &sr, &stm32_port->last_res, threaded)) {
13723 ++ while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
13724 ++ threaded)) {
13725 + sr |= USART_SR_DUMMY_RX;
13726 + flag = TTY_NORMAL;
13727 +
13728 +@@ -238,7 +242,7 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
13729 + writel_relaxed(sr & USART_SR_ERR_MASK,
13730 + port->membase + ofs->icr);
13731 +
13732 +- c = stm32_get_char(port, &sr, &stm32_port->last_res);
13733 ++ c = stm32_usart_get_char(port, &sr, &stm32_port->last_res);
13734 + port->icount.rx++;
13735 + if (sr & USART_SR_ERR_MASK) {
13736 + if (sr & USART_SR_ORE) {
13737 +@@ -273,58 +277,65 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
13738 + uart_insert_char(port, sr, USART_SR_ORE, c, flag);
13739 + }
13740 +
13741 +- spin_unlock(&port->lock);
13742 ++ if (threaded)
13743 ++ spin_unlock_irqrestore(&port->lock, flags);
13744 ++ else
13745 ++ spin_unlock(&port->lock);
13746 ++
13747 + tty_flip_buffer_push(tport);
13748 +- spin_lock(&port->lock);
13749 + }
13750 +
13751 +-static void stm32_tx_dma_complete(void *arg)
13752 ++static void stm32_usart_tx_dma_complete(void *arg)
13753 + {
13754 + struct uart_port *port = arg;
13755 + struct stm32_port *stm32port = to_stm32_port(port);
13756 +- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
13757 ++ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
13758 ++ unsigned long flags;
13759 +
13760 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
13761 ++ dmaengine_terminate_async(stm32port->tx_ch);
13762 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
13763 + stm32port->tx_dma_busy = false;
13764 +
13765 + /* Let's see if we have pending data to send */
13766 +- stm32_transmit_chars(port);
13767 ++ spin_lock_irqsave(&port->lock, flags);
13768 ++ stm32_usart_transmit_chars(port);
13769 ++ spin_unlock_irqrestore(&port->lock, flags);
13770 + }
13771 +
13772 +-static void stm32_tx_interrupt_enable(struct uart_port *port)
13773 ++static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
13774 + {
13775 + struct stm32_port *stm32_port = to_stm32_port(port);
13776 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13777 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13778 +
13779 + /*
13780 + * Enables TX FIFO threashold irq when FIFO is enabled,
13781 + * or TX empty irq when FIFO is disabled
13782 + */
13783 + if (stm32_port->fifoen)
13784 +- stm32_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
13785 ++ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_TXFTIE);
13786 + else
13787 +- stm32_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
13788 ++ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
13789 + }
13790 +
13791 +-static void stm32_tx_interrupt_disable(struct uart_port *port)
13792 ++static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
13793 + {
13794 + struct stm32_port *stm32_port = to_stm32_port(port);
13795 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13796 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13797 +
13798 + if (stm32_port->fifoen)
13799 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
13800 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_TXFTIE);
13801 + else
13802 +- stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
13803 ++ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
13804 + }
13805 +
13806 +-static void stm32_transmit_chars_pio(struct uart_port *port)
13807 ++static void stm32_usart_transmit_chars_pio(struct uart_port *port)
13808 + {
13809 + struct stm32_port *stm32_port = to_stm32_port(port);
13810 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13811 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13812 + struct circ_buf *xmit = &port->state->xmit;
13813 +
13814 + if (stm32_port->tx_dma_busy) {
13815 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
13816 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
13817 + stm32_port->tx_dma_busy = false;
13818 + }
13819 +
13820 +@@ -339,15 +350,15 @@ static void stm32_transmit_chars_pio(struct uart_port *port)
13821 +
13822 + /* rely on TXE irq (mask or unmask) for sending remaining data */
13823 + if (uart_circ_empty(xmit))
13824 +- stm32_tx_interrupt_disable(port);
13825 ++ stm32_usart_tx_interrupt_disable(port);
13826 + else
13827 +- stm32_tx_interrupt_enable(port);
13828 ++ stm32_usart_tx_interrupt_enable(port);
13829 + }
13830 +
13831 +-static void stm32_transmit_chars_dma(struct uart_port *port)
13832 ++static void stm32_usart_transmit_chars_dma(struct uart_port *port)
13833 + {
13834 + struct stm32_port *stm32port = to_stm32_port(port);
13835 +- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
13836 ++ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
13837 + struct circ_buf *xmit = &port->state->xmit;
13838 + struct dma_async_tx_descriptor *desc = NULL;
13839 + unsigned int count, i;
13840 +@@ -386,7 +397,7 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
13841 + if (!desc)
13842 + goto fallback_err;
13843 +
13844 +- desc->callback = stm32_tx_dma_complete;
13845 ++ desc->callback = stm32_usart_tx_dma_complete;
13846 + desc->callback_param = port;
13847 +
13848 + /* Push current DMA TX transaction in the pending queue */
13849 +@@ -399,7 +410,7 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
13850 + /* Issue pending DMA TX requests */
13851 + dma_async_issue_pending(stm32port->tx_ch);
13852 +
13853 +- stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
13854 ++ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
13855 +
13856 + xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
13857 + port->icount.tx += count;
13858 +@@ -407,74 +418,79 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
13859 +
13860 + fallback_err:
13861 + for (i = count; i > 0; i--)
13862 +- stm32_transmit_chars_pio(port);
13863 ++ stm32_usart_transmit_chars_pio(port);
13864 + }
13865 +
13866 +-static void stm32_transmit_chars(struct uart_port *port)
13867 ++static void stm32_usart_transmit_chars(struct uart_port *port)
13868 + {
13869 + struct stm32_port *stm32_port = to_stm32_port(port);
13870 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13871 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13872 + struct circ_buf *xmit = &port->state->xmit;
13873 +
13874 + if (port->x_char) {
13875 + if (stm32_port->tx_dma_busy)
13876 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
13877 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
13878 + writel_relaxed(port->x_char, port->membase + ofs->tdr);
13879 + port->x_char = 0;
13880 + port->icount.tx++;
13881 + if (stm32_port->tx_dma_busy)
13882 +- stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
13883 ++ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
13884 + return;
13885 + }
13886 +
13887 + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
13888 +- stm32_tx_interrupt_disable(port);
13889 ++ stm32_usart_tx_interrupt_disable(port);
13890 + return;
13891 + }
13892 +
13893 + if (ofs->icr == UNDEF_REG)
13894 +- stm32_clr_bits(port, ofs->isr, USART_SR_TC);
13895 ++ stm32_usart_clr_bits(port, ofs->isr, USART_SR_TC);
13896 + else
13897 + writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
13898 +
13899 + if (stm32_port->tx_ch)
13900 +- stm32_transmit_chars_dma(port);
13901 ++ stm32_usart_transmit_chars_dma(port);
13902 + else
13903 +- stm32_transmit_chars_pio(port);
13904 ++ stm32_usart_transmit_chars_pio(port);
13905 +
13906 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
13907 + uart_write_wakeup(port);
13908 +
13909 + if (uart_circ_empty(xmit))
13910 +- stm32_tx_interrupt_disable(port);
13911 ++ stm32_usart_tx_interrupt_disable(port);
13912 + }
13913 +
13914 +-static irqreturn_t stm32_interrupt(int irq, void *ptr)
13915 ++static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
13916 + {
13917 + struct uart_port *port = ptr;
13918 ++ struct tty_port *tport = &port->state->port;
13919 + struct stm32_port *stm32_port = to_stm32_port(port);
13920 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13921 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13922 + u32 sr;
13923 +
13924 +- spin_lock(&port->lock);
13925 +-
13926 + sr = readl_relaxed(port->membase + ofs->isr);
13927 +
13928 + if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
13929 + writel_relaxed(USART_ICR_RTOCF,
13930 + port->membase + ofs->icr);
13931 +
13932 +- if ((sr & USART_SR_WUF) && (ofs->icr != UNDEF_REG))
13933 ++ if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) {
13934 ++ /* Clear wake up flag and disable wake up interrupt */
13935 + writel_relaxed(USART_ICR_WUCF,
13936 + port->membase + ofs->icr);
13937 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
13938 ++ if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
13939 ++ pm_wakeup_event(tport->tty->dev, 0);
13940 ++ }
13941 +
13942 + if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
13943 +- stm32_receive_chars(port, false);
13944 +-
13945 +- if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch))
13946 +- stm32_transmit_chars(port);
13947 ++ stm32_usart_receive_chars(port, false);
13948 +
13949 +- spin_unlock(&port->lock);
13950 ++ if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
13951 ++ spin_lock(&port->lock);
13952 ++ stm32_usart_transmit_chars(port);
13953 ++ spin_unlock(&port->lock);
13954 ++ }
13955 +
13956 + if (stm32_port->rx_ch)
13957 + return IRQ_WAKE_THREAD;
13958 +@@ -482,43 +498,42 @@ static irqreturn_t stm32_interrupt(int irq, void *ptr)
13959 + return IRQ_HANDLED;
13960 + }
13961 +
13962 +-static irqreturn_t stm32_threaded_interrupt(int irq, void *ptr)
13963 ++static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
13964 + {
13965 + struct uart_port *port = ptr;
13966 + struct stm32_port *stm32_port = to_stm32_port(port);
13967 +
13968 +- spin_lock(&port->lock);
13969 +-
13970 + if (stm32_port->rx_ch)
13971 +- stm32_receive_chars(port, true);
13972 +-
13973 +- spin_unlock(&port->lock);
13974 ++ stm32_usart_receive_chars(port, true);
13975 +
13976 + return IRQ_HANDLED;
13977 + }
13978 +
13979 +-static unsigned int stm32_tx_empty(struct uart_port *port)
13980 ++static unsigned int stm32_usart_tx_empty(struct uart_port *port)
13981 + {
13982 + struct stm32_port *stm32_port = to_stm32_port(port);
13983 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13984 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13985 +
13986 +- return readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE;
13987 ++ if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
13988 ++ return TIOCSER_TEMT;
13989 ++
13990 ++ return 0;
13991 + }
13992 +
13993 +-static void stm32_set_mctrl(struct uart_port *port, unsigned int mctrl)
13994 ++static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
13995 + {
13996 + struct stm32_port *stm32_port = to_stm32_port(port);
13997 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13998 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
13999 +
14000 + if ((mctrl & TIOCM_RTS) && (port->status & UPSTAT_AUTORTS))
14001 +- stm32_set_bits(port, ofs->cr3, USART_CR3_RTSE);
14002 ++ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_RTSE);
14003 + else
14004 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
14005 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_RTSE);
14006 +
14007 + mctrl_gpio_set(stm32_port->gpios, mctrl);
14008 + }
14009 +
14010 +-static unsigned int stm32_get_mctrl(struct uart_port *port)
14011 ++static unsigned int stm32_usart_get_mctrl(struct uart_port *port)
14012 + {
14013 + struct stm32_port *stm32_port = to_stm32_port(port);
14014 + unsigned int ret;
14015 +@@ -529,23 +544,23 @@ static unsigned int stm32_get_mctrl(struct uart_port *port)
14016 + return mctrl_gpio_get(stm32_port->gpios, &ret);
14017 + }
14018 +
14019 +-static void stm32_enable_ms(struct uart_port *port)
14020 ++static void stm32_usart_enable_ms(struct uart_port *port)
14021 + {
14022 + mctrl_gpio_enable_ms(to_stm32_port(port)->gpios);
14023 + }
14024 +
14025 +-static void stm32_disable_ms(struct uart_port *port)
14026 ++static void stm32_usart_disable_ms(struct uart_port *port)
14027 + {
14028 + mctrl_gpio_disable_ms(to_stm32_port(port)->gpios);
14029 + }
14030 +
14031 + /* Transmit stop */
14032 +-static void stm32_stop_tx(struct uart_port *port)
14033 ++static void stm32_usart_stop_tx(struct uart_port *port)
14034 + {
14035 + struct stm32_port *stm32_port = to_stm32_port(port);
14036 + struct serial_rs485 *rs485conf = &port->rs485;
14037 +
14038 +- stm32_tx_interrupt_disable(port);
14039 ++ stm32_usart_tx_interrupt_disable(port);
14040 +
14041 + if (rs485conf->flags & SER_RS485_ENABLED) {
14042 + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
14043 +@@ -559,7 +574,7 @@ static void stm32_stop_tx(struct uart_port *port)
14044 + }
14045 +
14046 + /* There are probably characters waiting to be transmitted. */
14047 +-static void stm32_start_tx(struct uart_port *port)
14048 ++static void stm32_usart_start_tx(struct uart_port *port)
14049 + {
14050 + struct stm32_port *stm32_port = to_stm32_port(port);
14051 + struct serial_rs485 *rs485conf = &port->rs485;
14052 +@@ -578,102 +593,91 @@ static void stm32_start_tx(struct uart_port *port)
14053 + }
14054 + }
14055 +
14056 +- stm32_transmit_chars(port);
14057 ++ stm32_usart_transmit_chars(port);
14058 + }
14059 +
14060 + /* Throttle the remote when input buffer is about to overflow. */
14061 +-static void stm32_throttle(struct uart_port *port)
14062 ++static void stm32_usart_throttle(struct uart_port *port)
14063 + {
14064 + struct stm32_port *stm32_port = to_stm32_port(port);
14065 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14066 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14067 + unsigned long flags;
14068 +
14069 + spin_lock_irqsave(&port->lock, flags);
14070 +- stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
14071 ++ stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
14072 + if (stm32_port->cr3_irq)
14073 +- stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
14074 ++ stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
14075 +
14076 + spin_unlock_irqrestore(&port->lock, flags);
14077 + }
14078 +
14079 + /* Unthrottle the remote, the input buffer can now accept data. */
14080 +-static void stm32_unthrottle(struct uart_port *port)
14081 ++static void stm32_usart_unthrottle(struct uart_port *port)
14082 + {
14083 + struct stm32_port *stm32_port = to_stm32_port(port);
14084 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14085 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14086 + unsigned long flags;
14087 +
14088 + spin_lock_irqsave(&port->lock, flags);
14089 +- stm32_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
14090 ++ stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq);
14091 + if (stm32_port->cr3_irq)
14092 +- stm32_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
14093 ++ stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
14094 +
14095 + spin_unlock_irqrestore(&port->lock, flags);
14096 + }
14097 +
14098 + /* Receive stop */
14099 +-static void stm32_stop_rx(struct uart_port *port)
14100 ++static void stm32_usart_stop_rx(struct uart_port *port)
14101 + {
14102 + struct stm32_port *stm32_port = to_stm32_port(port);
14103 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14104 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14105 +
14106 +- stm32_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
14107 ++ stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq);
14108 + if (stm32_port->cr3_irq)
14109 +- stm32_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
14110 +-
14111 ++ stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
14112 + }
14113 +
14114 + /* Handle breaks - ignored by us */
14115 +-static void stm32_break_ctl(struct uart_port *port, int break_state)
14116 ++static void stm32_usart_break_ctl(struct uart_port *port, int break_state)
14117 + {
14118 + }
14119 +
14120 +-static int stm32_startup(struct uart_port *port)
14121 ++static int stm32_usart_startup(struct uart_port *port)
14122 + {
14123 + struct stm32_port *stm32_port = to_stm32_port(port);
14124 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14125 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14126 ++ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14127 + const char *name = to_platform_device(port->dev)->name;
14128 + u32 val;
14129 + int ret;
14130 +
14131 +- ret = request_threaded_irq(port->irq, stm32_interrupt,
14132 +- stm32_threaded_interrupt,
14133 ++ ret = request_threaded_irq(port->irq, stm32_usart_interrupt,
14134 ++ stm32_usart_threaded_interrupt,
14135 + IRQF_NO_SUSPEND, name, port);
14136 + if (ret)
14137 + return ret;
14138 +
14139 + /* RX FIFO Flush */
14140 + if (ofs->rqr != UNDEF_REG)
14141 +- stm32_set_bits(port, ofs->rqr, USART_RQR_RXFRQ);
14142 ++ writel_relaxed(USART_RQR_RXFRQ, port->membase + ofs->rqr);
14143 +
14144 +- /* Tx and RX FIFO configuration */
14145 +- if (stm32_port->fifoen) {
14146 +- val = readl_relaxed(port->membase + ofs->cr3);
14147 +- val &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
14148 +- val |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
14149 +- val |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
14150 +- writel_relaxed(val, port->membase + ofs->cr3);
14151 +- }
14152 +-
14153 +- /* RX FIFO enabling */
14154 +- val = stm32_port->cr1_irq | USART_CR1_RE;
14155 +- if (stm32_port->fifoen)
14156 +- val |= USART_CR1_FIFOEN;
14157 +- stm32_set_bits(port, ofs->cr1, val);
14158 ++ /* RX enabling */
14159 ++ val = stm32_port->cr1_irq | USART_CR1_RE | BIT(cfg->uart_enable_bit);
14160 ++ stm32_usart_set_bits(port, ofs->cr1, val);
14161 +
14162 + return 0;
14163 + }
14164 +
14165 +-static void stm32_shutdown(struct uart_port *port)
14166 ++static void stm32_usart_shutdown(struct uart_port *port)
14167 + {
14168 + struct stm32_port *stm32_port = to_stm32_port(port);
14169 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14170 +- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14171 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14172 ++ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14173 + u32 val, isr;
14174 + int ret;
14175 +
14176 + /* Disable modem control interrupts */
14177 +- stm32_disable_ms(port);
14178 ++ stm32_usart_disable_ms(port);
14179 +
14180 + val = USART_CR1_TXEIE | USART_CR1_TE;
14181 + val |= stm32_port->cr1_irq | USART_CR1_RE;
14182 +@@ -688,12 +692,17 @@ static void stm32_shutdown(struct uart_port *port)
14183 + if (ret)
14184 + dev_err(port->dev, "transmission complete not set\n");
14185 +
14186 +- stm32_clr_bits(port, ofs->cr1, val);
14187 ++ /* flush RX & TX FIFO */
14188 ++ if (ofs->rqr != UNDEF_REG)
14189 ++ writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
14190 ++ port->membase + ofs->rqr);
14191 ++
14192 ++ stm32_usart_clr_bits(port, ofs->cr1, val);
14193 +
14194 + free_irq(port->irq, port);
14195 + }
14196 +
14197 +-static unsigned int stm32_get_databits(struct ktermios *termios)
14198 ++static unsigned int stm32_usart_get_databits(struct ktermios *termios)
14199 + {
14200 + unsigned int bits;
14201 +
14202 +@@ -723,18 +732,20 @@ static unsigned int stm32_get_databits(struct ktermios *termios)
14203 + return bits;
14204 + }
14205 +
14206 +-static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
14207 +- struct ktermios *old)
14208 ++static void stm32_usart_set_termios(struct uart_port *port,
14209 ++ struct ktermios *termios,
14210 ++ struct ktermios *old)
14211 + {
14212 + struct stm32_port *stm32_port = to_stm32_port(port);
14213 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14214 +- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14215 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14216 ++ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14217 + struct serial_rs485 *rs485conf = &port->rs485;
14218 + unsigned int baud, bits;
14219 + u32 usartdiv, mantissa, fraction, oversampling;
14220 + tcflag_t cflag = termios->c_cflag;
14221 +- u32 cr1, cr2, cr3;
14222 ++ u32 cr1, cr2, cr3, isr;
14223 + unsigned long flags;
14224 ++ int ret;
14225 +
14226 + if (!stm32_port->hw_flow_control)
14227 + cflag &= ~CRTSCTS;
14228 +@@ -743,26 +754,41 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
14229 +
14230 + spin_lock_irqsave(&port->lock, flags);
14231 +
14232 ++ ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
14233 ++ isr,
14234 ++ (isr & USART_SR_TC),
14235 ++ 10, 100000);
14236 ++
14237 ++ /* Send the TC error message only when ISR_TC is not set. */
14238 ++ if (ret)
14239 ++ dev_err(port->dev, "Transmission is not complete\n");
14240 ++
14241 + /* Stop serial port and reset value */
14242 + writel_relaxed(0, port->membase + ofs->cr1);
14243 +
14244 + /* flush RX & TX FIFO */
14245 + if (ofs->rqr != UNDEF_REG)
14246 +- stm32_set_bits(port, ofs->rqr,
14247 +- USART_RQR_TXFRQ | USART_RQR_RXFRQ);
14248 ++ writel_relaxed(USART_RQR_TXFRQ | USART_RQR_RXFRQ,
14249 ++ port->membase + ofs->rqr);
14250 +
14251 + cr1 = USART_CR1_TE | USART_CR1_RE;
14252 + if (stm32_port->fifoen)
14253 + cr1 |= USART_CR1_FIFOEN;
14254 + cr2 = 0;
14255 ++
14256 ++ /* Tx and RX FIFO configuration */
14257 + cr3 = readl_relaxed(port->membase + ofs->cr3);
14258 +- cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTCFG_MASK | USART_CR3_RXFTIE
14259 +- | USART_CR3_TXFTCFG_MASK;
14260 ++ cr3 &= USART_CR3_TXFTIE | USART_CR3_RXFTIE;
14261 ++ if (stm32_port->fifoen) {
14262 ++ cr3 &= ~(USART_CR3_TXFTCFG_MASK | USART_CR3_RXFTCFG_MASK);
14263 ++ cr3 |= USART_CR3_TXFTCFG_HALF << USART_CR3_TXFTCFG_SHIFT;
14264 ++ cr3 |= USART_CR3_RXFTCFG_HALF << USART_CR3_RXFTCFG_SHIFT;
14265 ++ }
14266 +
14267 + if (cflag & CSTOPB)
14268 + cr2 |= USART_CR2_STOP_2B;
14269 +
14270 +- bits = stm32_get_databits(termios);
14271 ++ bits = stm32_usart_get_databits(termios);
14272 + stm32_port->rdr_mask = (BIT(bits) - 1);
14273 +
14274 + if (cflag & PARENB) {
14275 +@@ -813,12 +839,6 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
14276 + cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
14277 + }
14278 +
14279 +- /* Handle modem control interrupts */
14280 +- if (UART_ENABLE_MS(port, termios->c_cflag))
14281 +- stm32_enable_ms(port);
14282 +- else
14283 +- stm32_disable_ms(port);
14284 +-
14285 + usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
14286 +
14287 + /*
14288 +@@ -830,11 +850,11 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
14289 + if (usartdiv < 16) {
14290 + oversampling = 8;
14291 + cr1 |= USART_CR1_OVER8;
14292 +- stm32_set_bits(port, ofs->cr1, USART_CR1_OVER8);
14293 ++ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
14294 + } else {
14295 + oversampling = 16;
14296 + cr1 &= ~USART_CR1_OVER8;
14297 +- stm32_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
14298 ++ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
14299 + }
14300 +
14301 + mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
14302 +@@ -871,9 +891,10 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
14303 + cr3 |= USART_CR3_DMAR;
14304 +
14305 + if (rs485conf->flags & SER_RS485_ENABLED) {
14306 +- stm32_config_reg_rs485(&cr1, &cr3,
14307 +- rs485conf->delay_rts_before_send,
14308 +- rs485conf->delay_rts_after_send, baud);
14309 ++ stm32_usart_config_reg_rs485(&cr1, &cr3,
14310 ++ rs485conf->delay_rts_before_send,
14311 ++ rs485conf->delay_rts_after_send,
14312 ++ baud);
14313 + if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
14314 + cr3 &= ~USART_CR3_DEP;
14315 + rs485conf->flags &= ~SER_RS485_RTS_AFTER_SEND;
14316 +@@ -887,48 +908,60 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
14317 + cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK);
14318 + }
14319 +
14320 ++ /* Configure wake up from low power on start bit detection */
14321 ++ if (stm32_port->wakeirq > 0) {
14322 ++ cr3 &= ~USART_CR3_WUS_MASK;
14323 ++ cr3 |= USART_CR3_WUS_START_BIT;
14324 ++ }
14325 ++
14326 + writel_relaxed(cr3, port->membase + ofs->cr3);
14327 + writel_relaxed(cr2, port->membase + ofs->cr2);
14328 + writel_relaxed(cr1, port->membase + ofs->cr1);
14329 +
14330 +- stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
14331 ++ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
14332 + spin_unlock_irqrestore(&port->lock, flags);
14333 ++
14334 ++ /* Handle modem control interrupts */
14335 ++ if (UART_ENABLE_MS(port, termios->c_cflag))
14336 ++ stm32_usart_enable_ms(port);
14337 ++ else
14338 ++ stm32_usart_disable_ms(port);
14339 + }
14340 +
14341 +-static const char *stm32_type(struct uart_port *port)
14342 ++static const char *stm32_usart_type(struct uart_port *port)
14343 + {
14344 + return (port->type == PORT_STM32) ? DRIVER_NAME : NULL;
14345 + }
14346 +
14347 +-static void stm32_release_port(struct uart_port *port)
14348 ++static void stm32_usart_release_port(struct uart_port *port)
14349 + {
14350 + }
14351 +
14352 +-static int stm32_request_port(struct uart_port *port)
14353 ++static int stm32_usart_request_port(struct uart_port *port)
14354 + {
14355 + return 0;
14356 + }
14357 +
14358 +-static void stm32_config_port(struct uart_port *port, int flags)
14359 ++static void stm32_usart_config_port(struct uart_port *port, int flags)
14360 + {
14361 + if (flags & UART_CONFIG_TYPE)
14362 + port->type = PORT_STM32;
14363 + }
14364 +
14365 + static int
14366 +-stm32_verify_port(struct uart_port *port, struct serial_struct *ser)
14367 ++stm32_usart_verify_port(struct uart_port *port, struct serial_struct *ser)
14368 + {
14369 + /* No user changeable parameters */
14370 + return -EINVAL;
14371 + }
14372 +
14373 +-static void stm32_pm(struct uart_port *port, unsigned int state,
14374 +- unsigned int oldstate)
14375 ++static void stm32_usart_pm(struct uart_port *port, unsigned int state,
14376 ++ unsigned int oldstate)
14377 + {
14378 + struct stm32_port *stm32port = container_of(port,
14379 + struct stm32_port, port);
14380 +- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
14381 +- struct stm32_usart_config *cfg = &stm32port->info->cfg;
14382 ++ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
14383 ++ const struct stm32_usart_config *cfg = &stm32port->info->cfg;
14384 + unsigned long flags = 0;
14385 +
14386 + switch (state) {
14387 +@@ -937,7 +970,7 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
14388 + break;
14389 + case UART_PM_STATE_OFF:
14390 + spin_lock_irqsave(&port->lock, flags);
14391 +- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
14392 ++ stm32_usart_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
14393 + spin_unlock_irqrestore(&port->lock, flags);
14394 + pm_runtime_put_sync(port->dev);
14395 + break;
14396 +@@ -945,49 +978,48 @@ static void stm32_pm(struct uart_port *port, unsigned int state,
14397 + }
14398 +
14399 + static const struct uart_ops stm32_uart_ops = {
14400 +- .tx_empty = stm32_tx_empty,
14401 +- .set_mctrl = stm32_set_mctrl,
14402 +- .get_mctrl = stm32_get_mctrl,
14403 +- .stop_tx = stm32_stop_tx,
14404 +- .start_tx = stm32_start_tx,
14405 +- .throttle = stm32_throttle,
14406 +- .unthrottle = stm32_unthrottle,
14407 +- .stop_rx = stm32_stop_rx,
14408 +- .enable_ms = stm32_enable_ms,
14409 +- .break_ctl = stm32_break_ctl,
14410 +- .startup = stm32_startup,
14411 +- .shutdown = stm32_shutdown,
14412 +- .set_termios = stm32_set_termios,
14413 +- .pm = stm32_pm,
14414 +- .type = stm32_type,
14415 +- .release_port = stm32_release_port,
14416 +- .request_port = stm32_request_port,
14417 +- .config_port = stm32_config_port,
14418 +- .verify_port = stm32_verify_port,
14419 ++ .tx_empty = stm32_usart_tx_empty,
14420 ++ .set_mctrl = stm32_usart_set_mctrl,
14421 ++ .get_mctrl = stm32_usart_get_mctrl,
14422 ++ .stop_tx = stm32_usart_stop_tx,
14423 ++ .start_tx = stm32_usart_start_tx,
14424 ++ .throttle = stm32_usart_throttle,
14425 ++ .unthrottle = stm32_usart_unthrottle,
14426 ++ .stop_rx = stm32_usart_stop_rx,
14427 ++ .enable_ms = stm32_usart_enable_ms,
14428 ++ .break_ctl = stm32_usart_break_ctl,
14429 ++ .startup = stm32_usart_startup,
14430 ++ .shutdown = stm32_usart_shutdown,
14431 ++ .set_termios = stm32_usart_set_termios,
14432 ++ .pm = stm32_usart_pm,
14433 ++ .type = stm32_usart_type,
14434 ++ .release_port = stm32_usart_release_port,
14435 ++ .request_port = stm32_usart_request_port,
14436 ++ .config_port = stm32_usart_config_port,
14437 ++ .verify_port = stm32_usart_verify_port,
14438 + };
14439 +
14440 +-static int stm32_init_port(struct stm32_port *stm32port,
14441 +- struct platform_device *pdev)
14442 ++static int stm32_usart_init_port(struct stm32_port *stm32port,
14443 ++ struct platform_device *pdev)
14444 + {
14445 + struct uart_port *port = &stm32port->port;
14446 + struct resource *res;
14447 + int ret;
14448 +
14449 ++ ret = platform_get_irq(pdev, 0);
14450 ++ if (ret <= 0)
14451 ++ return ret ? : -ENODEV;
14452 ++
14453 + port->iotype = UPIO_MEM;
14454 + port->flags = UPF_BOOT_AUTOCONF;
14455 + port->ops = &stm32_uart_ops;
14456 + port->dev = &pdev->dev;
14457 + port->fifosize = stm32port->info->cfg.fifosize;
14458 + port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
14459 +-
14460 +- ret = platform_get_irq(pdev, 0);
14461 +- if (ret <= 0)
14462 +- return ret ? : -ENODEV;
14463 + port->irq = ret;
14464 ++ port->rs485_config = stm32_usart_config_rs485;
14465 +
14466 +- port->rs485_config = stm32_config_rs485;
14467 +-
14468 +- ret = stm32_init_rs485(port, pdev);
14469 ++ ret = stm32_usart_init_rs485(port, pdev);
14470 + if (ret)
14471 + return ret;
14472 +
14473 +@@ -1046,7 +1078,7 @@ err_clk:
14474 + return ret;
14475 + }
14476 +
14477 +-static struct stm32_port *stm32_of_get_stm32_port(struct platform_device *pdev)
14478 ++static struct stm32_port *stm32_usart_of_get_port(struct platform_device *pdev)
14479 + {
14480 + struct device_node *np = pdev->dev.of_node;
14481 + int id;
14482 +@@ -1084,10 +1116,10 @@ static const struct of_device_id stm32_match[] = {
14483 + MODULE_DEVICE_TABLE(of, stm32_match);
14484 + #endif
14485 +
14486 +-static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
14487 +- struct platform_device *pdev)
14488 ++static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
14489 ++ struct platform_device *pdev)
14490 + {
14491 +- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
14492 ++ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
14493 + struct uart_port *port = &stm32port->port;
14494 + struct device *dev = &pdev->dev;
14495 + struct dma_slave_config config;
14496 +@@ -1101,8 +1133,8 @@ static int stm32_of_dma_rx_probe(struct stm32_port *stm32port,
14497 + return -ENODEV;
14498 + }
14499 + stm32port->rx_buf = dma_alloc_coherent(&pdev->dev, RX_BUF_L,
14500 +- &stm32port->rx_dma_buf,
14501 +- GFP_KERNEL);
14502 ++ &stm32port->rx_dma_buf,
14503 ++ GFP_KERNEL);
14504 + if (!stm32port->rx_buf) {
14505 + ret = -ENOMEM;
14506 + goto alloc_err;
14507 +@@ -1159,10 +1191,10 @@ alloc_err:
14508 + return ret;
14509 + }
14510 +
14511 +-static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
14512 +- struct platform_device *pdev)
14513 ++static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
14514 ++ struct platform_device *pdev)
14515 + {
14516 +- struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
14517 ++ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
14518 + struct uart_port *port = &stm32port->port;
14519 + struct device *dev = &pdev->dev;
14520 + struct dma_slave_config config;
14521 +@@ -1177,8 +1209,8 @@ static int stm32_of_dma_tx_probe(struct stm32_port *stm32port,
14522 + return -ENODEV;
14523 + }
14524 + stm32port->tx_buf = dma_alloc_coherent(&pdev->dev, TX_BUF_L,
14525 +- &stm32port->tx_dma_buf,
14526 +- GFP_KERNEL);
14527 ++ &stm32port->tx_dma_buf,
14528 ++ GFP_KERNEL);
14529 + if (!stm32port->tx_buf) {
14530 + ret = -ENOMEM;
14531 + goto alloc_err;
14532 +@@ -1210,23 +1242,20 @@ alloc_err:
14533 + return ret;
14534 + }
14535 +
14536 +-static int stm32_serial_probe(struct platform_device *pdev)
14537 ++static int stm32_usart_serial_probe(struct platform_device *pdev)
14538 + {
14539 +- const struct of_device_id *match;
14540 + struct stm32_port *stm32port;
14541 + int ret;
14542 +
14543 +- stm32port = stm32_of_get_stm32_port(pdev);
14544 ++ stm32port = stm32_usart_of_get_port(pdev);
14545 + if (!stm32port)
14546 + return -ENODEV;
14547 +
14548 +- match = of_match_device(stm32_match, &pdev->dev);
14549 +- if (match && match->data)
14550 +- stm32port->info = (struct stm32_usart_info *)match->data;
14551 +- else
14552 ++ stm32port->info = of_device_get_match_data(&pdev->dev);
14553 ++ if (!stm32port->info)
14554 + return -EINVAL;
14555 +
14556 +- ret = stm32_init_port(stm32port, pdev);
14557 ++ ret = stm32_usart_init_port(stm32port, pdev);
14558 + if (ret)
14559 + return ret;
14560 +
14561 +@@ -1243,15 +1272,11 @@ static int stm32_serial_probe(struct platform_device *pdev)
14562 + device_set_wakeup_enable(&pdev->dev, false);
14563 + }
14564 +
14565 +- ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
14566 +- if (ret)
14567 +- goto err_wirq;
14568 +-
14569 +- ret = stm32_of_dma_rx_probe(stm32port, pdev);
14570 ++ ret = stm32_usart_of_dma_rx_probe(stm32port, pdev);
14571 + if (ret)
14572 + dev_info(&pdev->dev, "interrupt mode used for rx (no dma)\n");
14573 +
14574 +- ret = stm32_of_dma_tx_probe(stm32port, pdev);
14575 ++ ret = stm32_usart_of_dma_tx_probe(stm32port, pdev);
14576 + if (ret)
14577 + dev_info(&pdev->dev, "interrupt mode used for tx (no dma)\n");
14578 +
14579 +@@ -1260,11 +1285,40 @@ static int stm32_serial_probe(struct platform_device *pdev)
14580 + pm_runtime_get_noresume(&pdev->dev);
14581 + pm_runtime_set_active(&pdev->dev);
14582 + pm_runtime_enable(&pdev->dev);
14583 ++
14584 ++ ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
14585 ++ if (ret)
14586 ++ goto err_port;
14587 ++
14588 + pm_runtime_put_sync(&pdev->dev);
14589 +
14590 + return 0;
14591 +
14592 +-err_wirq:
14593 ++err_port:
14594 ++ pm_runtime_disable(&pdev->dev);
14595 ++ pm_runtime_set_suspended(&pdev->dev);
14596 ++ pm_runtime_put_noidle(&pdev->dev);
14597 ++
14598 ++ if (stm32port->rx_ch) {
14599 ++ dmaengine_terminate_async(stm32port->rx_ch);
14600 ++ dma_release_channel(stm32port->rx_ch);
14601 ++ }
14602 ++
14603 ++ if (stm32port->rx_dma_buf)
14604 ++ dma_free_coherent(&pdev->dev,
14605 ++ RX_BUF_L, stm32port->rx_buf,
14606 ++ stm32port->rx_dma_buf);
14607 ++
14608 ++ if (stm32port->tx_ch) {
14609 ++ dmaengine_terminate_async(stm32port->tx_ch);
14610 ++ dma_release_channel(stm32port->tx_ch);
14611 ++ }
14612 ++
14613 ++ if (stm32port->tx_dma_buf)
14614 ++ dma_free_coherent(&pdev->dev,
14615 ++ TX_BUF_L, stm32port->tx_buf,
14616 ++ stm32port->tx_dma_buf);
14617 ++
14618 + if (stm32port->wakeirq > 0)
14619 + dev_pm_clear_wake_irq(&pdev->dev);
14620 +
14621 +@@ -1278,29 +1332,40 @@ err_uninit:
14622 + return ret;
14623 + }
14624 +
14625 +-static int stm32_serial_remove(struct platform_device *pdev)
14626 ++static int stm32_usart_serial_remove(struct platform_device *pdev)
14627 + {
14628 + struct uart_port *port = platform_get_drvdata(pdev);
14629 + struct stm32_port *stm32_port = to_stm32_port(port);
14630 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14631 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14632 + int err;
14633 +
14634 + pm_runtime_get_sync(&pdev->dev);
14635 ++ err = uart_remove_one_port(&stm32_usart_driver, port);
14636 ++ if (err)
14637 ++ return(err);
14638 +
14639 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
14640 ++ pm_runtime_disable(&pdev->dev);
14641 ++ pm_runtime_set_suspended(&pdev->dev);
14642 ++ pm_runtime_put_noidle(&pdev->dev);
14643 +
14644 +- if (stm32_port->rx_ch)
14645 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
14646 ++
14647 ++ if (stm32_port->rx_ch) {
14648 ++ dmaengine_terminate_async(stm32_port->rx_ch);
14649 + dma_release_channel(stm32_port->rx_ch);
14650 ++ }
14651 +
14652 + if (stm32_port->rx_dma_buf)
14653 + dma_free_coherent(&pdev->dev,
14654 + RX_BUF_L, stm32_port->rx_buf,
14655 + stm32_port->rx_dma_buf);
14656 +
14657 +- stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
14658 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
14659 +
14660 +- if (stm32_port->tx_ch)
14661 ++ if (stm32_port->tx_ch) {
14662 ++ dmaengine_terminate_async(stm32_port->tx_ch);
14663 + dma_release_channel(stm32_port->tx_ch);
14664 ++ }
14665 +
14666 + if (stm32_port->tx_dma_buf)
14667 + dma_free_coherent(&pdev->dev,
14668 +@@ -1314,20 +1379,14 @@ static int stm32_serial_remove(struct platform_device *pdev)
14669 +
14670 + clk_disable_unprepare(stm32_port->clk);
14671 +
14672 +- err = uart_remove_one_port(&stm32_usart_driver, port);
14673 +-
14674 +- pm_runtime_disable(&pdev->dev);
14675 +- pm_runtime_put_noidle(&pdev->dev);
14676 +-
14677 +- return err;
14678 ++ return 0;
14679 + }
14680 +
14681 +-
14682 + #ifdef CONFIG_SERIAL_STM32_CONSOLE
14683 +-static void stm32_console_putchar(struct uart_port *port, int ch)
14684 ++static void stm32_usart_console_putchar(struct uart_port *port, int ch)
14685 + {
14686 + struct stm32_port *stm32_port = to_stm32_port(port);
14687 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14688 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14689 +
14690 + while (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE))
14691 + cpu_relax();
14692 +@@ -1335,12 +1394,13 @@ static void stm32_console_putchar(struct uart_port *port, int ch)
14693 + writel_relaxed(ch, port->membase + ofs->tdr);
14694 + }
14695 +
14696 +-static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
14697 ++static void stm32_usart_console_write(struct console *co, const char *s,
14698 ++ unsigned int cnt)
14699 + {
14700 + struct uart_port *port = &stm32_ports[co->index].port;
14701 + struct stm32_port *stm32_port = to_stm32_port(port);
14702 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14703 +- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14704 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14705 ++ const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14706 + unsigned long flags;
14707 + u32 old_cr1, new_cr1;
14708 + int locked = 1;
14709 +@@ -1359,7 +1419,7 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
14710 + new_cr1 |= USART_CR1_TE | BIT(cfg->uart_enable_bit);
14711 + writel_relaxed(new_cr1, port->membase + ofs->cr1);
14712 +
14713 +- uart_console_write(port, s, cnt, stm32_console_putchar);
14714 ++ uart_console_write(port, s, cnt, stm32_usart_console_putchar);
14715 +
14716 + /* Restore interrupt state */
14717 + writel_relaxed(old_cr1, port->membase + ofs->cr1);
14718 +@@ -1369,7 +1429,7 @@ static void stm32_console_write(struct console *co, const char *s, unsigned cnt)
14719 + local_irq_restore(flags);
14720 + }
14721 +
14722 +-static int stm32_console_setup(struct console *co, char *options)
14723 ++static int stm32_usart_console_setup(struct console *co, char *options)
14724 + {
14725 + struct stm32_port *stm32port;
14726 + int baud = 9600;
14727 +@@ -1388,7 +1448,7 @@ static int stm32_console_setup(struct console *co, char *options)
14728 + * this to be called during the uart port registration when the
14729 + * driver gets probed and the port should be mapped at that point.
14730 + */
14731 +- if (stm32port->port.mapbase == 0 || stm32port->port.membase == NULL)
14732 ++ if (stm32port->port.mapbase == 0 || !stm32port->port.membase)
14733 + return -ENXIO;
14734 +
14735 + if (options)
14736 +@@ -1400,8 +1460,8 @@ static int stm32_console_setup(struct console *co, char *options)
14737 + static struct console stm32_console = {
14738 + .name = STM32_SERIAL_NAME,
14739 + .device = uart_console_device,
14740 +- .write = stm32_console_write,
14741 +- .setup = stm32_console_setup,
14742 ++ .write = stm32_usart_console_write,
14743 ++ .setup = stm32_usart_console_setup,
14744 + .flags = CON_PRINTBUFFER,
14745 + .index = -1,
14746 + .data = &stm32_usart_driver,
14747 +@@ -1422,41 +1482,38 @@ static struct uart_driver stm32_usart_driver = {
14748 + .cons = STM32_SERIAL_CONSOLE,
14749 + };
14750 +
14751 +-static void __maybe_unused stm32_serial_enable_wakeup(struct uart_port *port,
14752 +- bool enable)
14753 ++static void __maybe_unused stm32_usart_serial_en_wakeup(struct uart_port *port,
14754 ++ bool enable)
14755 + {
14756 + struct stm32_port *stm32_port = to_stm32_port(port);
14757 +- struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14758 +- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
14759 +- u32 val;
14760 ++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
14761 +
14762 + if (stm32_port->wakeirq <= 0)
14763 + return;
14764 +
14765 ++ /*
14766 ++ * Enable low-power wake-up and wake-up irq if argument is set to
14767 ++ * "enable", disable low-power wake-up and wake-up irq otherwise
14768 ++ */
14769 + if (enable) {
14770 +- stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
14771 +- stm32_set_bits(port, ofs->cr1, USART_CR1_UESM);
14772 +- val = readl_relaxed(port->membase + ofs->cr3);
14773 +- val &= ~USART_CR3_WUS_MASK;
14774 +- /* Enable Wake up interrupt from low power on start bit */
14775 +- val |= USART_CR3_WUS_START_BIT | USART_CR3_WUFIE;
14776 +- writel_relaxed(val, port->membase + ofs->cr3);
14777 +- stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
14778 ++ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
14779 ++ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
14780 + } else {
14781 +- stm32_clr_bits(port, ofs->cr1, USART_CR1_UESM);
14782 ++ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
14783 ++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
14784 + }
14785 + }
14786 +
14787 +-static int __maybe_unused stm32_serial_suspend(struct device *dev)
14788 ++static int __maybe_unused stm32_usart_serial_suspend(struct device *dev)
14789 + {
14790 + struct uart_port *port = dev_get_drvdata(dev);
14791 +
14792 + uart_suspend_port(&stm32_usart_driver, port);
14793 +
14794 + if (device_may_wakeup(dev))
14795 +- stm32_serial_enable_wakeup(port, true);
14796 ++ stm32_usart_serial_en_wakeup(port, true);
14797 + else
14798 +- stm32_serial_enable_wakeup(port, false);
14799 ++ stm32_usart_serial_en_wakeup(port, false);
14800 +
14801 + /*
14802 + * When "no_console_suspend" is enabled, keep the pinctrl default state
14803 +@@ -1474,19 +1531,19 @@ static int __maybe_unused stm32_serial_suspend(struct device *dev)
14804 + return 0;
14805 + }
14806 +
14807 +-static int __maybe_unused stm32_serial_resume(struct device *dev)
14808 ++static int __maybe_unused stm32_usart_serial_resume(struct device *dev)
14809 + {
14810 + struct uart_port *port = dev_get_drvdata(dev);
14811 +
14812 + pinctrl_pm_select_default_state(dev);
14813 +
14814 + if (device_may_wakeup(dev))
14815 +- stm32_serial_enable_wakeup(port, false);
14816 ++ stm32_usart_serial_en_wakeup(port, false);
14817 +
14818 + return uart_resume_port(&stm32_usart_driver, port);
14819 + }
14820 +
14821 +-static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
14822 ++static int __maybe_unused stm32_usart_runtime_suspend(struct device *dev)
14823 + {
14824 + struct uart_port *port = dev_get_drvdata(dev);
14825 + struct stm32_port *stm32port = container_of(port,
14826 +@@ -1497,7 +1554,7 @@ static int __maybe_unused stm32_serial_runtime_suspend(struct device *dev)
14827 + return 0;
14828 + }
14829 +
14830 +-static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
14831 ++static int __maybe_unused stm32_usart_runtime_resume(struct device *dev)
14832 + {
14833 + struct uart_port *port = dev_get_drvdata(dev);
14834 + struct stm32_port *stm32port = container_of(port,
14835 +@@ -1507,14 +1564,15 @@ static int __maybe_unused stm32_serial_runtime_resume(struct device *dev)
14836 + }
14837 +
14838 + static const struct dev_pm_ops stm32_serial_pm_ops = {
14839 +- SET_RUNTIME_PM_OPS(stm32_serial_runtime_suspend,
14840 +- stm32_serial_runtime_resume, NULL)
14841 +- SET_SYSTEM_SLEEP_PM_OPS(stm32_serial_suspend, stm32_serial_resume)
14842 ++ SET_RUNTIME_PM_OPS(stm32_usart_runtime_suspend,
14843 ++ stm32_usart_runtime_resume, NULL)
14844 ++ SET_SYSTEM_SLEEP_PM_OPS(stm32_usart_serial_suspend,
14845 ++ stm32_usart_serial_resume)
14846 + };
14847 +
14848 + static struct platform_driver stm32_serial_driver = {
14849 +- .probe = stm32_serial_probe,
14850 +- .remove = stm32_serial_remove,
14851 ++ .probe = stm32_usart_serial_probe,
14852 ++ .remove = stm32_usart_serial_remove,
14853 + .driver = {
14854 + .name = DRIVER_NAME,
14855 + .pm = &stm32_serial_pm_ops,
14856 +@@ -1522,7 +1580,7 @@ static struct platform_driver stm32_serial_driver = {
14857 + },
14858 + };
14859 +
14860 +-static int __init usart_init(void)
14861 ++static int __init stm32_usart_init(void)
14862 + {
14863 + static char banner[] __initdata = "STM32 USART driver initialized";
14864 + int ret;
14865 +@@ -1540,14 +1598,14 @@ static int __init usart_init(void)
14866 + return ret;
14867 + }
14868 +
14869 +-static void __exit usart_exit(void)
14870 ++static void __exit stm32_usart_exit(void)
14871 + {
14872 + platform_driver_unregister(&stm32_serial_driver);
14873 + uart_unregister_driver(&stm32_usart_driver);
14874 + }
14875 +
14876 +-module_init(usart_init);
14877 +-module_exit(usart_exit);
14878 ++module_init(stm32_usart_init);
14879 ++module_exit(stm32_usart_exit);
14880 +
14881 + MODULE_ALIAS("platform:" DRIVER_NAME);
14882 + MODULE_DESCRIPTION("STMicroelectronics STM32 serial port driver");
14883 +diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
14884 +index d4c916e78d403..94b568aa46bbd 100644
14885 +--- a/drivers/tty/serial/stm32-usart.h
14886 ++++ b/drivers/tty/serial/stm32-usart.h
14887 +@@ -127,9 +127,6 @@ struct stm32_usart_info stm32h7_info = {
14888 + /* Dummy bits */
14889 + #define USART_SR_DUMMY_RX BIT(16)
14890 +
14891 +-/* USART_ICR (F7) */
14892 +-#define USART_CR_TC BIT(6)
14893 +-
14894 + /* USART_DR */
14895 + #define USART_DR_MASK GENMASK(8, 0)
14896 +
14897 +@@ -259,7 +256,7 @@ struct stm32_usart_info stm32h7_info = {
14898 + struct stm32_port {
14899 + struct uart_port port;
14900 + struct clk *clk;
14901 +- struct stm32_usart_info *info;
14902 ++ const struct stm32_usart_info *info;
14903 + struct dma_chan *rx_ch; /* dma rx channel */
14904 + dma_addr_t rx_dma_buf; /* dma rx buffer bus address */
14905 + unsigned char *rx_buf; /* dma rx buffer cpu address */
14906 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
14907 +index 146bd67115623..bc5314092aa4e 100644
14908 +--- a/drivers/tty/tty_io.c
14909 ++++ b/drivers/tty/tty_io.c
14910 +@@ -2492,14 +2492,14 @@ out:
14911 + * @p: pointer to result
14912 + *
14913 + * Obtain the modem status bits from the tty driver if the feature
14914 +- * is supported. Return -EINVAL if it is not available.
14915 ++ * is supported. Return -ENOTTY if it is not available.
14916 + *
14917 + * Locking: none (up to the driver)
14918 + */
14919 +
14920 + static int tty_tiocmget(struct tty_struct *tty, int __user *p)
14921 + {
14922 +- int retval = -EINVAL;
14923 ++ int retval = -ENOTTY;
14924 +
14925 + if (tty->ops->tiocmget) {
14926 + retval = tty->ops->tiocmget(tty);
14927 +@@ -2517,7 +2517,7 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
14928 + * @p: pointer to desired bits
14929 + *
14930 + * Set the modem status bits from the tty driver if the feature
14931 +- * is supported. Return -EINVAL if it is not available.
14932 ++ * is supported. Return -ENOTTY if it is not available.
14933 + *
14934 + * Locking: none (up to the driver)
14935 + */
14936 +@@ -2529,7 +2529,7 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
14937 + unsigned int set, clear, val;
14938 +
14939 + if (tty->ops->tiocmset == NULL)
14940 +- return -EINVAL;
14941 ++ return -ENOTTY;
14942 +
14943 + retval = get_user(val, p);
14944 + if (retval)
14945 +diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
14946 +index e18f318586ab4..803da2d111c8c 100644
14947 +--- a/drivers/tty/tty_ioctl.c
14948 ++++ b/drivers/tty/tty_ioctl.c
14949 +@@ -443,51 +443,6 @@ static int get_termio(struct tty_struct *tty, struct termio __user *termio)
14950 + return 0;
14951 + }
14952 +
14953 +-
14954 +-#ifdef TCGETX
14955 +-
14956 +-/**
14957 +- * set_termiox - set termiox fields if possible
14958 +- * @tty: terminal
14959 +- * @arg: termiox structure from user
14960 +- * @opt: option flags for ioctl type
14961 +- *
14962 +- * Implement the device calling points for the SYS5 termiox ioctl
14963 +- * interface in Linux
14964 +- */
14965 +-
14966 +-static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
14967 +-{
14968 +- struct termiox tnew;
14969 +- struct tty_ldisc *ld;
14970 +-
14971 +- if (tty->termiox == NULL)
14972 +- return -EINVAL;
14973 +- if (copy_from_user(&tnew, arg, sizeof(struct termiox)))
14974 +- return -EFAULT;
14975 +-
14976 +- ld = tty_ldisc_ref(tty);
14977 +- if (ld != NULL) {
14978 +- if ((opt & TERMIOS_FLUSH) && ld->ops->flush_buffer)
14979 +- ld->ops->flush_buffer(tty);
14980 +- tty_ldisc_deref(ld);
14981 +- }
14982 +- if (opt & TERMIOS_WAIT) {
14983 +- tty_wait_until_sent(tty, 0);
14984 +- if (signal_pending(current))
14985 +- return -ERESTARTSYS;
14986 +- }
14987 +-
14988 +- down_write(&tty->termios_rwsem);
14989 +- if (tty->ops->set_termiox)
14990 +- tty->ops->set_termiox(tty, &tnew);
14991 +- up_write(&tty->termios_rwsem);
14992 +- return 0;
14993 +-}
14994 +-
14995 +-#endif
14996 +-
14997 +-
14998 + #ifdef TIOCGETP
14999 + /*
15000 + * These are deprecated, but there is limited support..
15001 +@@ -815,24 +770,12 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
15002 + return ret;
15003 + #endif
15004 + #ifdef TCGETX
15005 +- case TCGETX: {
15006 +- struct termiox ktermx;
15007 +- if (real_tty->termiox == NULL)
15008 +- return -EINVAL;
15009 +- down_read(&real_tty->termios_rwsem);
15010 +- memcpy(&ktermx, real_tty->termiox, sizeof(struct termiox));
15011 +- up_read(&real_tty->termios_rwsem);
15012 +- if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
15013 +- ret = -EFAULT;
15014 +- return ret;
15015 +- }
15016 ++ case TCGETX:
15017 + case TCSETX:
15018 +- return set_termiox(real_tty, p, 0);
15019 + case TCSETXW:
15020 +- return set_termiox(real_tty, p, TERMIOS_WAIT);
15021 + case TCSETXF:
15022 +- return set_termiox(real_tty, p, TERMIOS_FLUSH);
15023 +-#endif
15024 ++ return -ENOTTY;
15025 ++#endif
15026 + case TIOCGSOFTCAR:
15027 + copy_termios(real_tty, &kterm);
15028 + ret = put_user((kterm.c_cflag & CLOCAL) ? 1 : 0,
15029 +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
15030 +index bc035ba6e0105..6fbabf56dbb76 100644
15031 +--- a/drivers/usb/class/cdc-acm.c
15032 ++++ b/drivers/usb/class/cdc-acm.c
15033 +@@ -929,8 +929,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_struct *ss)
15034 + {
15035 + struct acm *acm = tty->driver_data;
15036 +
15037 +- ss->xmit_fifo_size = acm->writesize;
15038 +- ss->baud_base = le32_to_cpu(acm->line.dwDTERate);
15039 ++ ss->line = acm->minor;
15040 + ss->close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
15041 + ss->closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
15042 + ASYNC_CLOSING_WAIT_NONE :
15043 +@@ -942,7 +941,6 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
15044 + {
15045 + struct acm *acm = tty->driver_data;
15046 + unsigned int closing_wait, close_delay;
15047 +- unsigned int old_closing_wait, old_close_delay;
15048 + int retval = 0;
15049 +
15050 + close_delay = msecs_to_jiffies(ss->close_delay * 10);
15051 +@@ -950,20 +948,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
15052 + ASYNC_CLOSING_WAIT_NONE :
15053 + msecs_to_jiffies(ss->closing_wait * 10);
15054 +
15055 +- /* we must redo the rounding here, so that the values match */
15056 +- old_close_delay = jiffies_to_msecs(acm->port.close_delay) / 10;
15057 +- old_closing_wait = acm->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
15058 +- ASYNC_CLOSING_WAIT_NONE :
15059 +- jiffies_to_msecs(acm->port.closing_wait) / 10;
15060 +-
15061 + mutex_lock(&acm->port.mutex);
15062 +
15063 + if (!capable(CAP_SYS_ADMIN)) {
15064 +- if ((ss->close_delay != old_close_delay) ||
15065 +- (ss->closing_wait != old_closing_wait))
15066 ++ if ((close_delay != acm->port.close_delay) ||
15067 ++ (closing_wait != acm->port.closing_wait))
15068 + retval = -EPERM;
15069 +- else
15070 +- retval = -EOPNOTSUPP;
15071 + } else {
15072 + acm->port.close_delay = close_delay;
15073 + acm->port.closing_wait = closing_wait;
15074 +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
15075 +index 800c8b6c55ff1..510fd0572feb1 100644
15076 +--- a/drivers/usb/dwc2/core_intr.c
15077 ++++ b/drivers/usb/dwc2/core_intr.c
15078 +@@ -660,6 +660,71 @@ static u32 dwc2_read_common_intr(struct dwc2_hsotg *hsotg)
15079 + return 0;
15080 + }
15081 +
15082 ++/**
15083 ++ * dwc_handle_gpwrdn_disc_det() - Handles the gpwrdn disconnect detect.
15084 ++ * Exits hibernation without restoring registers.
15085 ++ *
15086 ++ * @hsotg: Programming view of DWC_otg controller
15087 ++ * @gpwrdn: GPWRDN register
15088 ++ */
15089 ++static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg,
15090 ++ u32 gpwrdn)
15091 ++{
15092 ++ u32 gpwrdn_tmp;
15093 ++
15094 ++ /* Switch-on voltage to the core */
15095 ++ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15096 ++ gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
15097 ++ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15098 ++ udelay(5);
15099 ++
15100 ++ /* Reset core */
15101 ++ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15102 ++ gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
15103 ++ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15104 ++ udelay(5);
15105 ++
15106 ++ /* Disable Power Down Clamp */
15107 ++ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15108 ++ gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
15109 ++ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15110 ++ udelay(5);
15111 ++
15112 ++ /* Deassert reset core */
15113 ++ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15114 ++ gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
15115 ++ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15116 ++ udelay(5);
15117 ++
15118 ++ /* Disable PMU interrupt */
15119 ++ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15120 ++ gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
15121 ++ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15122 ++
15123 ++ /* De-assert Wakeup Logic */
15124 ++ gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15125 ++ gpwrdn_tmp &= ~GPWRDN_PMUACTV;
15126 ++ dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15127 ++
15128 ++ hsotg->hibernated = 0;
15129 ++ hsotg->bus_suspended = 0;
15130 ++
15131 ++ if (gpwrdn & GPWRDN_IDSTS) {
15132 ++ hsotg->op_state = OTG_STATE_B_PERIPHERAL;
15133 ++ dwc2_core_init(hsotg, false);
15134 ++ dwc2_enable_global_interrupts(hsotg);
15135 ++ dwc2_hsotg_core_init_disconnected(hsotg, false);
15136 ++ dwc2_hsotg_core_connect(hsotg);
15137 ++ } else {
15138 ++ hsotg->op_state = OTG_STATE_A_HOST;
15139 ++
15140 ++ /* Initialize the Core for Host mode */
15141 ++ dwc2_core_init(hsotg, false);
15142 ++ dwc2_enable_global_interrupts(hsotg);
15143 ++ dwc2_hcd_start(hsotg);
15144 ++ }
15145 ++}
15146 ++
15147 + /*
15148 + * GPWRDN interrupt handler.
15149 + *
15150 +@@ -681,64 +746,14 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
15151 +
15152 + if ((gpwrdn & GPWRDN_DISCONN_DET) &&
15153 + (gpwrdn & GPWRDN_DISCONN_DET_MSK) && !linestate) {
15154 +- u32 gpwrdn_tmp;
15155 +-
15156 + dev_dbg(hsotg->dev, "%s: GPWRDN_DISCONN_DET\n", __func__);
15157 +-
15158 +- /* Switch-on voltage to the core */
15159 +- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15160 +- gpwrdn_tmp &= ~GPWRDN_PWRDNSWTCH;
15161 +- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15162 +- udelay(10);
15163 +-
15164 +- /* Reset core */
15165 +- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15166 +- gpwrdn_tmp &= ~GPWRDN_PWRDNRSTN;
15167 +- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15168 +- udelay(10);
15169 +-
15170 +- /* Disable Power Down Clamp */
15171 +- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15172 +- gpwrdn_tmp &= ~GPWRDN_PWRDNCLMP;
15173 +- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15174 +- udelay(10);
15175 +-
15176 +- /* Deassert reset core */
15177 +- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15178 +- gpwrdn_tmp |= GPWRDN_PWRDNRSTN;
15179 +- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15180 +- udelay(10);
15181 +-
15182 +- /* Disable PMU interrupt */
15183 +- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15184 +- gpwrdn_tmp &= ~GPWRDN_PMUINTSEL;
15185 +- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15186 +-
15187 +- /* De-assert Wakeup Logic */
15188 +- gpwrdn_tmp = dwc2_readl(hsotg, GPWRDN);
15189 +- gpwrdn_tmp &= ~GPWRDN_PMUACTV;
15190 +- dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN);
15191 +-
15192 +- hsotg->hibernated = 0;
15193 +-
15194 +- if (gpwrdn & GPWRDN_IDSTS) {
15195 +- hsotg->op_state = OTG_STATE_B_PERIPHERAL;
15196 +- dwc2_core_init(hsotg, false);
15197 +- dwc2_enable_global_interrupts(hsotg);
15198 +- dwc2_hsotg_core_init_disconnected(hsotg, false);
15199 +- dwc2_hsotg_core_connect(hsotg);
15200 +- } else {
15201 +- hsotg->op_state = OTG_STATE_A_HOST;
15202 +-
15203 +- /* Initialize the Core for Host mode */
15204 +- dwc2_core_init(hsotg, false);
15205 +- dwc2_enable_global_interrupts(hsotg);
15206 +- dwc2_hcd_start(hsotg);
15207 +- }
15208 +- }
15209 +-
15210 +- if ((gpwrdn & GPWRDN_LNSTSCHG) &&
15211 +- (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
15212 ++ /*
15213 ++ * Call disconnect detect function to exit from
15214 ++ * hibernation
15215 ++ */
15216 ++ dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
15217 ++ } else if ((gpwrdn & GPWRDN_LNSTSCHG) &&
15218 ++ (gpwrdn & GPWRDN_LNSTSCHG_MSK) && linestate) {
15219 + dev_dbg(hsotg->dev, "%s: GPWRDN_LNSTSCHG\n", __func__);
15220 + if (hsotg->hw_params.hibernation &&
15221 + hsotg->hibernated) {
15222 +@@ -749,24 +764,21 @@ static void dwc2_handle_gpwrdn_intr(struct dwc2_hsotg *hsotg)
15223 + dwc2_exit_hibernation(hsotg, 1, 0, 1);
15224 + }
15225 + }
15226 +- }
15227 +- if ((gpwrdn & GPWRDN_RST_DET) && (gpwrdn & GPWRDN_RST_DET_MSK)) {
15228 ++ } else if ((gpwrdn & GPWRDN_RST_DET) &&
15229 ++ (gpwrdn & GPWRDN_RST_DET_MSK)) {
15230 + dev_dbg(hsotg->dev, "%s: GPWRDN_RST_DET\n", __func__);
15231 + if (!linestate && (gpwrdn & GPWRDN_BSESSVLD))
15232 + dwc2_exit_hibernation(hsotg, 0, 1, 0);
15233 +- }
15234 +- if ((gpwrdn & GPWRDN_STS_CHGINT) &&
15235 +- (gpwrdn & GPWRDN_STS_CHGINT_MSK) && linestate) {
15236 ++ } else if ((gpwrdn & GPWRDN_STS_CHGINT) &&
15237 ++ (gpwrdn & GPWRDN_STS_CHGINT_MSK)) {
15238 + dev_dbg(hsotg->dev, "%s: GPWRDN_STS_CHGINT\n", __func__);
15239 +- if (hsotg->hw_params.hibernation &&
15240 +- hsotg->hibernated) {
15241 +- if (gpwrdn & GPWRDN_IDSTS) {
15242 +- dwc2_exit_hibernation(hsotg, 0, 0, 0);
15243 +- call_gadget(hsotg, resume);
15244 +- } else {
15245 +- dwc2_exit_hibernation(hsotg, 1, 0, 1);
15246 +- }
15247 +- }
15248 ++ /*
15249 ++ * As GPWRDN_STS_CHGINT exit from hibernation flow is
15250 ++ * the same as in GPWRDN_DISCONN_DET flow. Call
15251 ++ * disconnect detect helper function to exit from
15252 ++ * hibernation.
15253 ++ */
15254 ++ dwc_handle_gpwrdn_disc_det(hsotg, gpwrdn);
15255 + }
15256 + }
15257 +
15258 +diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
15259 +index 1a9789ec5847f..6af1dcbc36564 100644
15260 +--- a/drivers/usb/dwc2/hcd.c
15261 ++++ b/drivers/usb/dwc2/hcd.c
15262 +@@ -5580,7 +5580,15 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
15263 + return ret;
15264 + }
15265 +
15266 +- dwc2_hcd_rem_wakeup(hsotg);
15267 ++ if (rem_wakeup) {
15268 ++ dwc2_hcd_rem_wakeup(hsotg);
15269 ++ /*
15270 ++ * Change "port_connect_status_change" flag to re-enumerate,
15271 ++ * because after exit from hibernation port connection status
15272 ++ * is not detected.
15273 ++ */
15274 ++ hsotg->flags.b.port_connect_status_change = 1;
15275 ++ }
15276 +
15277 + hsotg->hibernated = 0;
15278 + hsotg->bus_suspended = 0;
15279 +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c
15280 +index be7bb64e3594d..d11d3d14313f9 100644
15281 +--- a/drivers/usb/gadget/udc/aspeed-vhub/core.c
15282 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c
15283 +@@ -36,6 +36,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
15284 + int status)
15285 + {
15286 + bool internal = req->internal;
15287 ++ struct ast_vhub *vhub = ep->vhub;
15288 +
15289 + EPVDBG(ep, "completing request @%p, status %d\n", req, status);
15290 +
15291 +@@ -46,7 +47,7 @@ void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
15292 +
15293 + if (req->req.dma) {
15294 + if (!WARN_ON(!ep->dev))
15295 +- usb_gadget_unmap_request(&ep->dev->gadget,
15296 ++ usb_gadget_unmap_request_by_dev(&vhub->pdev->dev,
15297 + &req->req, ep->epn.is_in);
15298 + req->req.dma = 0;
15299 + }
15300 +diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
15301 +index 02d8bfae58fb1..cb164c615e6fc 100644
15302 +--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
15303 ++++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
15304 +@@ -376,7 +376,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
15305 + if (ep->epn.desc_mode ||
15306 + ((((unsigned long)u_req->buf & 7) == 0) &&
15307 + (ep->epn.is_in || !(u_req->length & (u_ep->maxpacket - 1))))) {
15308 +- rc = usb_gadget_map_request(&ep->dev->gadget, u_req,
15309 ++ rc = usb_gadget_map_request_by_dev(&vhub->pdev->dev, u_req,
15310 + ep->epn.is_in);
15311 + if (rc) {
15312 + dev_warn(&vhub->pdev->dev,
15313 +diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
15314 +index d6ca50f019853..75bf446f4a666 100644
15315 +--- a/drivers/usb/gadget/udc/fotg210-udc.c
15316 ++++ b/drivers/usb/gadget/udc/fotg210-udc.c
15317 +@@ -338,15 +338,16 @@ static void fotg210_start_dma(struct fotg210_ep *ep,
15318 + } else {
15319 + buffer = req->req.buf + req->req.actual;
15320 + length = ioread32(ep->fotg210->reg +
15321 +- FOTG210_FIBCR(ep->epnum - 1));
15322 +- length &= FIBCR_BCFX;
15323 ++ FOTG210_FIBCR(ep->epnum - 1)) & FIBCR_BCFX;
15324 ++ if (length > req->req.length - req->req.actual)
15325 ++ length = req->req.length - req->req.actual;
15326 + }
15327 + } else {
15328 + buffer = req->req.buf + req->req.actual;
15329 + if (req->req.length - req->req.actual > ep->ep.maxpacket)
15330 + length = ep->ep.maxpacket;
15331 + else
15332 +- length = req->req.length;
15333 ++ length = req->req.length - req->req.actual;
15334 + }
15335 +
15336 + d = dma_map_single(dev, buffer, length,
15337 +@@ -379,8 +380,7 @@ static void fotg210_ep0_queue(struct fotg210_ep *ep,
15338 + }
15339 + if (ep->dir_in) { /* if IN */
15340 + fotg210_start_dma(ep, req);
15341 +- if ((req->req.length == req->req.actual) ||
15342 +- (req->req.actual < ep->ep.maxpacket))
15343 ++ if (req->req.length == req->req.actual)
15344 + fotg210_done(ep, req, 0);
15345 + } else { /* OUT */
15346 + u32 value = ioread32(ep->fotg210->reg + FOTG210_DMISGR0);
15347 +@@ -820,7 +820,7 @@ static void fotg210_ep0in(struct fotg210_udc *fotg210)
15348 + if (req->req.length)
15349 + fotg210_start_dma(ep, req);
15350 +
15351 +- if ((req->req.length - req->req.actual) < ep->ep.maxpacket)
15352 ++ if (req->req.actual == req->req.length)
15353 + fotg210_done(ep, req, 0);
15354 + } else {
15355 + fotg210_set_cxdone(fotg210);
15356 +@@ -849,12 +849,16 @@ static void fotg210_out_fifo_handler(struct fotg210_ep *ep)
15357 + {
15358 + struct fotg210_request *req = list_entry(ep->queue.next,
15359 + struct fotg210_request, queue);
15360 ++ int disgr1 = ioread32(ep->fotg210->reg + FOTG210_DISGR1);
15361 +
15362 + fotg210_start_dma(ep, req);
15363 +
15364 +- /* finish out transfer */
15365 ++ /* Complete the request when it's full or a short packet arrived.
15366 ++ * Like other drivers, short_not_ok isn't handled.
15367 ++ */
15368 ++
15369 + if (req->req.length == req->req.actual ||
15370 +- req->req.actual < ep->ep.maxpacket)
15371 ++ (disgr1 & DISGR1_SPK_INT(ep->epnum - 1)))
15372 + fotg210_done(ep, req, 0);
15373 + }
15374 +
15375 +@@ -1027,6 +1031,12 @@ static void fotg210_init(struct fotg210_udc *fotg210)
15376 + value &= ~DMCR_GLINT_EN;
15377 + iowrite32(value, fotg210->reg + FOTG210_DMCR);
15378 +
15379 ++ /* enable only grp2 irqs we handle */
15380 ++ iowrite32(~(DISGR2_DMA_ERROR | DISGR2_RX0BYTE_INT | DISGR2_TX0BYTE_INT
15381 ++ | DISGR2_ISO_SEQ_ABORT_INT | DISGR2_ISO_SEQ_ERR_INT
15382 ++ | DISGR2_RESM_INT | DISGR2_SUSP_INT | DISGR2_USBRST_INT),
15383 ++ fotg210->reg + FOTG210_DMISGR2);
15384 ++
15385 + /* disable all fifo interrupt */
15386 + iowrite32(~(u32)0, fotg210->reg + FOTG210_DMISGR1);
15387 +
15388 +diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c
15389 +index a3c1fc9242686..fd3656d0f760c 100644
15390 +--- a/drivers/usb/gadget/udc/pch_udc.c
15391 ++++ b/drivers/usb/gadget/udc/pch_udc.c
15392 +@@ -7,12 +7,14 @@
15393 + #include <linux/module.h>
15394 + #include <linux/pci.h>
15395 + #include <linux/delay.h>
15396 ++#include <linux/dmi.h>
15397 + #include <linux/errno.h>
15398 ++#include <linux/gpio/consumer.h>
15399 ++#include <linux/gpio/machine.h>
15400 + #include <linux/list.h>
15401 + #include <linux/interrupt.h>
15402 + #include <linux/usb/ch9.h>
15403 + #include <linux/usb/gadget.h>
15404 +-#include <linux/gpio/consumer.h>
15405 + #include <linux/irq.h>
15406 +
15407 + #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */
15408 +@@ -596,18 +598,22 @@ static void pch_udc_reconnect(struct pch_udc_dev *dev)
15409 + static inline void pch_udc_vbus_session(struct pch_udc_dev *dev,
15410 + int is_active)
15411 + {
15412 ++ unsigned long iflags;
15413 ++
15414 ++ spin_lock_irqsave(&dev->lock, iflags);
15415 + if (is_active) {
15416 + pch_udc_reconnect(dev);
15417 + dev->vbus_session = 1;
15418 + } else {
15419 + if (dev->driver && dev->driver->disconnect) {
15420 +- spin_lock(&dev->lock);
15421 ++ spin_unlock_irqrestore(&dev->lock, iflags);
15422 + dev->driver->disconnect(&dev->gadget);
15423 +- spin_unlock(&dev->lock);
15424 ++ spin_lock_irqsave(&dev->lock, iflags);
15425 + }
15426 + pch_udc_set_disconnect(dev);
15427 + dev->vbus_session = 0;
15428 + }
15429 ++ spin_unlock_irqrestore(&dev->lock, iflags);
15430 + }
15431 +
15432 + /**
15433 +@@ -1166,20 +1172,25 @@ static int pch_udc_pcd_selfpowered(struct usb_gadget *gadget, int value)
15434 + static int pch_udc_pcd_pullup(struct usb_gadget *gadget, int is_on)
15435 + {
15436 + struct pch_udc_dev *dev;
15437 ++ unsigned long iflags;
15438 +
15439 + if (!gadget)
15440 + return -EINVAL;
15441 ++
15442 + dev = container_of(gadget, struct pch_udc_dev, gadget);
15443 ++
15444 ++ spin_lock_irqsave(&dev->lock, iflags);
15445 + if (is_on) {
15446 + pch_udc_reconnect(dev);
15447 + } else {
15448 + if (dev->driver && dev->driver->disconnect) {
15449 +- spin_lock(&dev->lock);
15450 ++ spin_unlock_irqrestore(&dev->lock, iflags);
15451 + dev->driver->disconnect(&dev->gadget);
15452 +- spin_unlock(&dev->lock);
15453 ++ spin_lock_irqsave(&dev->lock, iflags);
15454 + }
15455 + pch_udc_set_disconnect(dev);
15456 + }
15457 ++ spin_unlock_irqrestore(&dev->lock, iflags);
15458 +
15459 + return 0;
15460 + }
15461 +@@ -1350,6 +1361,43 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
15462 + return IRQ_HANDLED;
15463 + }
15464 +
15465 ++static struct gpiod_lookup_table minnowboard_udc_gpios = {
15466 ++ .dev_id = "0000:02:02.4",
15467 ++ .table = {
15468 ++ GPIO_LOOKUP("sch_gpio.33158", 12, NULL, GPIO_ACTIVE_HIGH),
15469 ++ {}
15470 ++ },
15471 ++};
15472 ++
15473 ++static const struct dmi_system_id pch_udc_gpio_dmi_table[] = {
15474 ++ {
15475 ++ .ident = "MinnowBoard",
15476 ++ .matches = {
15477 ++ DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
15478 ++ },
15479 ++ .driver_data = &minnowboard_udc_gpios,
15480 ++ },
15481 ++ { }
15482 ++};
15483 ++
15484 ++static void pch_vbus_gpio_remove_table(void *table)
15485 ++{
15486 ++ gpiod_remove_lookup_table(table);
15487 ++}
15488 ++
15489 ++static int pch_vbus_gpio_add_table(struct pch_udc_dev *dev)
15490 ++{
15491 ++ struct device *d = &dev->pdev->dev;
15492 ++ const struct dmi_system_id *dmi;
15493 ++
15494 ++ dmi = dmi_first_match(pch_udc_gpio_dmi_table);
15495 ++ if (!dmi)
15496 ++ return 0;
15497 ++
15498 ++ gpiod_add_lookup_table(dmi->driver_data);
15499 ++ return devm_add_action_or_reset(d, pch_vbus_gpio_remove_table, dmi->driver_data);
15500 ++}
15501 ++
15502 + /**
15503 + * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS.
15504 + * @dev: Reference to the driver structure
15505 +@@ -1360,6 +1408,7 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data)
15506 + */
15507 + static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
15508 + {
15509 ++ struct device *d = &dev->pdev->dev;
15510 + int err;
15511 + int irq_num = 0;
15512 + struct gpio_desc *gpiod;
15513 +@@ -1367,8 +1416,12 @@ static int pch_vbus_gpio_init(struct pch_udc_dev *dev)
15514 + dev->vbus_gpio.port = NULL;
15515 + dev->vbus_gpio.intr = 0;
15516 +
15517 ++ err = pch_vbus_gpio_add_table(dev);
15518 ++ if (err)
15519 ++ return err;
15520 ++
15521 + /* Retrieve the GPIO line from the USB gadget device */
15522 +- gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN);
15523 ++ gpiod = devm_gpiod_get_optional(d, NULL, GPIOD_IN);
15524 + if (IS_ERR(gpiod))
15525 + return PTR_ERR(gpiod);
15526 + gpiod_set_consumer_name(gpiod, "pch_vbus");
15527 +@@ -1756,7 +1809,7 @@ static struct usb_request *pch_udc_alloc_request(struct usb_ep *usbep,
15528 + }
15529 + /* prevent from using desc. - set HOST BUSY */
15530 + dma_desc->status |= PCH_UDC_BS_HST_BSY;
15531 +- dma_desc->dataptr = cpu_to_le32(DMA_ADDR_INVALID);
15532 ++ dma_desc->dataptr = lower_32_bits(DMA_ADDR_INVALID);
15533 + req->td_data = dma_desc;
15534 + req->td_data_last = dma_desc;
15535 + req->chain_len = 1;
15536 +@@ -2298,6 +2351,21 @@ static void pch_udc_svc_data_out(struct pch_udc_dev *dev, int ep_num)
15537 + pch_udc_set_dma(dev, DMA_DIR_RX);
15538 + }
15539 +
15540 ++static int pch_udc_gadget_setup(struct pch_udc_dev *dev)
15541 ++ __must_hold(&dev->lock)
15542 ++{
15543 ++ int rc;
15544 ++
15545 ++ /* In some cases we can get an interrupt before driver gets setup */
15546 ++ if (!dev->driver)
15547 ++ return -ESHUTDOWN;
15548 ++
15549 ++ spin_unlock(&dev->lock);
15550 ++ rc = dev->driver->setup(&dev->gadget, &dev->setup_data);
15551 ++ spin_lock(&dev->lock);
15552 ++ return rc;
15553 ++}
15554 ++
15555 + /**
15556 + * pch_udc_svc_control_in() - Handle Control IN endpoint interrupts
15557 + * @dev: Reference to the device structure
15558 +@@ -2369,15 +2437,12 @@ static void pch_udc_svc_control_out(struct pch_udc_dev *dev)
15559 + dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IDX].ep;
15560 + else /* OUT */
15561 + dev->gadget.ep0 = &ep->ep;
15562 +- spin_lock(&dev->lock);
15563 + /* If Mass storage Reset */
15564 + if ((dev->setup_data.bRequestType == 0x21) &&
15565 + (dev->setup_data.bRequest == 0xFF))
15566 + dev->prot_stall = 0;
15567 + /* call gadget with setup data received */
15568 +- setup_supported = dev->driver->setup(&dev->gadget,
15569 +- &dev->setup_data);
15570 +- spin_unlock(&dev->lock);
15571 ++ setup_supported = pch_udc_gadget_setup(dev);
15572 +
15573 + if (dev->setup_data.bRequestType & USB_DIR_IN) {
15574 + ep->td_data->status = (ep->td_data->status &
15575 +@@ -2625,9 +2690,7 @@ static void pch_udc_svc_intf_interrupt(struct pch_udc_dev *dev)
15576 + dev->ep[i].halted = 0;
15577 + }
15578 + dev->stall = 0;
15579 +- spin_unlock(&dev->lock);
15580 +- dev->driver->setup(&dev->gadget, &dev->setup_data);
15581 +- spin_lock(&dev->lock);
15582 ++ pch_udc_gadget_setup(dev);
15583 + }
15584 +
15585 + /**
15586 +@@ -2662,9 +2725,7 @@ static void pch_udc_svc_cfg_interrupt(struct pch_udc_dev *dev)
15587 + dev->stall = 0;
15588 +
15589 + /* call gadget zero with setup data received */
15590 +- spin_unlock(&dev->lock);
15591 +- dev->driver->setup(&dev->gadget, &dev->setup_data);
15592 +- spin_lock(&dev->lock);
15593 ++ pch_udc_gadget_setup(dev);
15594 + }
15595 +
15596 + /**
15597 +@@ -2870,14 +2931,20 @@ static void pch_udc_pcd_reinit(struct pch_udc_dev *dev)
15598 + * @dev: Reference to the driver structure
15599 + *
15600 + * Return codes:
15601 +- * 0: Success
15602 ++ * 0: Success
15603 ++ * -%ERRNO: All kind of errors when retrieving VBUS GPIO
15604 + */
15605 + static int pch_udc_pcd_init(struct pch_udc_dev *dev)
15606 + {
15607 ++ int ret;
15608 ++
15609 + pch_udc_init(dev);
15610 + pch_udc_pcd_reinit(dev);
15611 +- pch_vbus_gpio_init(dev);
15612 +- return 0;
15613 ++
15614 ++ ret = pch_vbus_gpio_init(dev);
15615 ++ if (ret)
15616 ++ pch_udc_exit(dev);
15617 ++ return ret;
15618 + }
15619 +
15620 + /**
15621 +@@ -2938,7 +3005,7 @@ static int init_dma_pools(struct pch_udc_dev *dev)
15622 + dev->dma_addr = dma_map_single(&dev->pdev->dev, ep0out_buf,
15623 + UDC_EP0OUT_BUFF_SIZE * 4,
15624 + DMA_FROM_DEVICE);
15625 +- return 0;
15626 ++ return dma_mapping_error(&dev->pdev->dev, dev->dma_addr);
15627 + }
15628 +
15629 + static int pch_udc_start(struct usb_gadget *g,
15630 +@@ -3063,6 +3130,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
15631 + if (retval)
15632 + return retval;
15633 +
15634 ++ dev->pdev = pdev;
15635 + pci_set_drvdata(pdev, dev);
15636 +
15637 + /* Determine BAR based on PCI ID */
15638 +@@ -3078,16 +3146,10 @@ static int pch_udc_probe(struct pci_dev *pdev,
15639 +
15640 + dev->base_addr = pcim_iomap_table(pdev)[bar];
15641 +
15642 +- /*
15643 +- * FIXME: add a GPIO descriptor table to pdev.dev using
15644 +- * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on
15645 +- * the PCI subsystem ID. The system-dependent GPIO is necessary for
15646 +- * VBUS operation.
15647 +- */
15648 +-
15649 + /* initialize the hardware */
15650 +- if (pch_udc_pcd_init(dev))
15651 +- return -ENODEV;
15652 ++ retval = pch_udc_pcd_init(dev);
15653 ++ if (retval)
15654 ++ return retval;
15655 +
15656 + pci_enable_msi(pdev);
15657 +
15658 +@@ -3104,7 +3166,6 @@ static int pch_udc_probe(struct pci_dev *pdev,
15659 +
15660 + /* device struct setup */
15661 + spin_lock_init(&dev->lock);
15662 +- dev->pdev = pdev;
15663 + dev->gadget.ops = &pch_udc_ops;
15664 +
15665 + retval = init_dma_pools(dev);
15666 +diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
15667 +index 896c1a016d550..65cae48834545 100644
15668 +--- a/drivers/usb/gadget/udc/r8a66597-udc.c
15669 ++++ b/drivers/usb/gadget/udc/r8a66597-udc.c
15670 +@@ -1849,6 +1849,8 @@ static int r8a66597_probe(struct platform_device *pdev)
15671 + return PTR_ERR(reg);
15672 +
15673 + ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
15674 ++ if (!ires)
15675 ++ return -EINVAL;
15676 + irq = ires->start;
15677 + irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
15678 +
15679 +diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
15680 +index 1d3ebb07ccd4d..b154b62abefa1 100644
15681 +--- a/drivers/usb/gadget/udc/s3c2410_udc.c
15682 ++++ b/drivers/usb/gadget/udc/s3c2410_udc.c
15683 +@@ -54,8 +54,6 @@ static struct clk *udc_clock;
15684 + static struct clk *usb_bus_clock;
15685 + static void __iomem *base_addr;
15686 + static int irq_usbd;
15687 +-static u64 rsrc_start;
15688 +-static u64 rsrc_len;
15689 + static struct dentry *s3c2410_udc_debugfs_root;
15690 +
15691 + static inline u32 udc_read(u32 reg)
15692 +@@ -1752,7 +1750,8 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
15693 + udc_clock = clk_get(NULL, "usb-device");
15694 + if (IS_ERR(udc_clock)) {
15695 + dev_err(dev, "failed to get udc clock source\n");
15696 +- return PTR_ERR(udc_clock);
15697 ++ retval = PTR_ERR(udc_clock);
15698 ++ goto err_usb_bus_clk;
15699 + }
15700 +
15701 + clk_prepare_enable(udc_clock);
15702 +@@ -1775,7 +1774,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
15703 + base_addr = devm_platform_ioremap_resource(pdev, 0);
15704 + if (IS_ERR(base_addr)) {
15705 + retval = PTR_ERR(base_addr);
15706 +- goto err_mem;
15707 ++ goto err_udc_clk;
15708 + }
15709 +
15710 + the_controller = udc;
15711 +@@ -1793,7 +1792,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
15712 + if (retval != 0) {
15713 + dev_err(dev, "cannot get irq %i, err %d\n", irq_usbd, retval);
15714 + retval = -EBUSY;
15715 +- goto err_map;
15716 ++ goto err_udc_clk;
15717 + }
15718 +
15719 + dev_dbg(dev, "got irq %i\n", irq_usbd);
15720 +@@ -1864,10 +1863,14 @@ err_gpio_claim:
15721 + gpio_free(udc_info->vbus_pin);
15722 + err_int:
15723 + free_irq(irq_usbd, udc);
15724 +-err_map:
15725 +- iounmap(base_addr);
15726 +-err_mem:
15727 +- release_mem_region(rsrc_start, rsrc_len);
15728 ++err_udc_clk:
15729 ++ clk_disable_unprepare(udc_clock);
15730 ++ clk_put(udc_clock);
15731 ++ udc_clock = NULL;
15732 ++err_usb_bus_clk:
15733 ++ clk_disable_unprepare(usb_bus_clock);
15734 ++ clk_put(usb_bus_clock);
15735 ++ usb_bus_clock = NULL;
15736 +
15737 + return retval;
15738 + }
15739 +@@ -1899,9 +1902,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
15740 +
15741 + free_irq(irq_usbd, udc);
15742 +
15743 +- iounmap(base_addr);
15744 +- release_mem_region(rsrc_start, rsrc_len);
15745 +-
15746 + if (!IS_ERR(udc_clock) && udc_clock != NULL) {
15747 + clk_disable_unprepare(udc_clock);
15748 + clk_put(udc_clock);
15749 +diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
15750 +index 32f1d3e90c264..99805d60a7ab3 100644
15751 +--- a/drivers/usb/gadget/udc/snps_udc_plat.c
15752 ++++ b/drivers/usb/gadget/udc/snps_udc_plat.c
15753 +@@ -114,8 +114,8 @@ static int udc_plat_probe(struct platform_device *pdev)
15754 +
15755 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
15756 + udc->virt_addr = devm_ioremap_resource(dev, res);
15757 +- if (IS_ERR(udc->regs))
15758 +- return PTR_ERR(udc->regs);
15759 ++ if (IS_ERR(udc->virt_addr))
15760 ++ return PTR_ERR(udc->virt_addr);
15761 +
15762 + /* udc csr registers base */
15763 + udc->csr = udc->virt_addr + UDC_CSR_ADDR;
15764 +diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
15765 +index b45e5bf089979..8950d1f10a7fb 100644
15766 +--- a/drivers/usb/host/xhci-mtk-sch.c
15767 ++++ b/drivers/usb/host/xhci-mtk-sch.c
15768 +@@ -378,6 +378,31 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
15769 + sch_ep->allocated = used;
15770 + }
15771 +
15772 ++static int check_fs_bus_bw(struct mu3h_sch_ep_info *sch_ep, int offset)
15773 ++{
15774 ++ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
15775 ++ u32 num_esit, tmp;
15776 ++ int base;
15777 ++ int i, j;
15778 ++
15779 ++ num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
15780 ++ for (i = 0; i < num_esit; i++) {
15781 ++ base = offset + i * sch_ep->esit;
15782 ++
15783 ++ /*
15784 ++ * Compared with hs bus, no matter what ep type,
15785 ++ * the hub will always delay one uframe to send data
15786 ++ */
15787 ++ for (j = 0; j < sch_ep->cs_count; j++) {
15788 ++ tmp = tt->fs_bus_bw[base + j] + sch_ep->bw_cost_per_microframe;
15789 ++ if (tmp > FS_PAYLOAD_MAX)
15790 ++ return -ERANGE;
15791 ++ }
15792 ++ }
15793 ++
15794 ++ return 0;
15795 ++}
15796 ++
15797 + static int check_sch_tt(struct usb_device *udev,
15798 + struct mu3h_sch_ep_info *sch_ep, u32 offset)
15799 + {
15800 +@@ -402,7 +427,7 @@ static int check_sch_tt(struct usb_device *udev,
15801 + return -ERANGE;
15802 +
15803 + for (i = 0; i < sch_ep->cs_count; i++)
15804 +- if (test_bit(offset + i, tt->split_bit_map))
15805 ++ if (test_bit(offset + i, tt->ss_bit_map))
15806 + return -ERANGE;
15807 +
15808 + } else {
15809 +@@ -432,7 +457,7 @@ static int check_sch_tt(struct usb_device *udev,
15810 + cs_count = 7; /* HW limit */
15811 +
15812 + for (i = 0; i < cs_count + 2; i++) {
15813 +- if (test_bit(offset + i, tt->split_bit_map))
15814 ++ if (test_bit(offset + i, tt->ss_bit_map))
15815 + return -ERANGE;
15816 + }
15817 +
15818 +@@ -448,24 +473,44 @@ static int check_sch_tt(struct usb_device *udev,
15819 + sch_ep->num_budget_microframes = sch_ep->esit;
15820 + }
15821 +
15822 +- return 0;
15823 ++ return check_fs_bus_bw(sch_ep, offset);
15824 + }
15825 +
15826 + static void update_sch_tt(struct usb_device *udev,
15827 +- struct mu3h_sch_ep_info *sch_ep)
15828 ++ struct mu3h_sch_ep_info *sch_ep, bool used)
15829 + {
15830 + struct mu3h_sch_tt *tt = sch_ep->sch_tt;
15831 + u32 base, num_esit;
15832 ++ int bw_updated;
15833 ++ int bits;
15834 + int i, j;
15835 +
15836 + num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
15837 ++ bits = (sch_ep->ep_type == ISOC_OUT_EP) ? sch_ep->cs_count : 1;
15838 ++
15839 ++ if (used)
15840 ++ bw_updated = sch_ep->bw_cost_per_microframe;
15841 ++ else
15842 ++ bw_updated = -sch_ep->bw_cost_per_microframe;
15843 ++
15844 + for (i = 0; i < num_esit; i++) {
15845 + base = sch_ep->offset + i * sch_ep->esit;
15846 +- for (j = 0; j < sch_ep->num_budget_microframes; j++)
15847 +- set_bit(base + j, tt->split_bit_map);
15848 ++
15849 ++ for (j = 0; j < bits; j++) {
15850 ++ if (used)
15851 ++ set_bit(base + j, tt->ss_bit_map);
15852 ++ else
15853 ++ clear_bit(base + j, tt->ss_bit_map);
15854 ++ }
15855 ++
15856 ++ for (j = 0; j < sch_ep->cs_count; j++)
15857 ++ tt->fs_bus_bw[base + j] += bw_updated;
15858 + }
15859 +
15860 +- list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
15861 ++ if (used)
15862 ++ list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
15863 ++ else
15864 ++ list_del(&sch_ep->tt_endpoint);
15865 + }
15866 +
15867 + static int check_sch_bw(struct usb_device *udev,
15868 +@@ -535,7 +580,7 @@ static int check_sch_bw(struct usb_device *udev,
15869 + if (!tt_offset_ok)
15870 + return -ERANGE;
15871 +
15872 +- update_sch_tt(udev, sch_ep);
15873 ++ update_sch_tt(udev, sch_ep, 1);
15874 + }
15875 +
15876 + /* update bus bandwidth info */
15877 +@@ -548,15 +593,16 @@ static void destroy_sch_ep(struct usb_device *udev,
15878 + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
15879 + {
15880 + /* only release ep bw check passed by check_sch_bw() */
15881 +- if (sch_ep->allocated)
15882 ++ if (sch_ep->allocated) {
15883 + update_bus_bw(sch_bw, sch_ep, 0);
15884 ++ if (sch_ep->sch_tt)
15885 ++ update_sch_tt(udev, sch_ep, 0);
15886 ++ }
15887 +
15888 +- list_del(&sch_ep->endpoint);
15889 +-
15890 +- if (sch_ep->sch_tt) {
15891 +- list_del(&sch_ep->tt_endpoint);
15892 ++ if (sch_ep->sch_tt)
15893 + drop_tt(udev);
15894 +- }
15895 ++
15896 ++ list_del(&sch_ep->endpoint);
15897 + kfree(sch_ep);
15898 + }
15899 +
15900 +@@ -643,7 +689,7 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
15901 + */
15902 + if (usb_endpoint_xfer_int(&ep->desc)
15903 + || usb_endpoint_xfer_isoc(&ep->desc))
15904 +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
15905 ++ ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(1));
15906 +
15907 + return 0;
15908 + }
15909 +@@ -730,10 +776,10 @@ int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
15910 + list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
15911 +
15912 + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
15913 +- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
15914 ++ ep_ctx->reserved[0] = cpu_to_le32(EP_BPKTS(sch_ep->pkts)
15915 + | EP_BCSCOUNT(sch_ep->cs_count)
15916 + | EP_BBM(sch_ep->burst_mode));
15917 +- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
15918 ++ ep_ctx->reserved[1] = cpu_to_le32(EP_BOFFSET(sch_ep->offset)
15919 + | EP_BREPEAT(sch_ep->repeat));
15920 +
15921 + xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
15922 +diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
15923 +index 080109012b9ac..2fc0568ba054e 100644
15924 +--- a/drivers/usb/host/xhci-mtk.h
15925 ++++ b/drivers/usb/host/xhci-mtk.h
15926 +@@ -20,13 +20,15 @@
15927 + #define XHCI_MTK_MAX_ESIT 64
15928 +
15929 + /**
15930 +- * @split_bit_map: used to avoid split microframes overlay
15931 ++ * @ss_bit_map: used to avoid start split microframes overlay
15932 ++ * @fs_bus_bw: array to keep track of bandwidth already used for FS
15933 + * @ep_list: Endpoints using this TT
15934 + * @usb_tt: usb TT related
15935 + * @tt_port: TT port number
15936 + */
15937 + struct mu3h_sch_tt {
15938 +- DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
15939 ++ DECLARE_BITMAP(ss_bit_map, XHCI_MTK_MAX_ESIT);
15940 ++ u32 fs_bus_bw[XHCI_MTK_MAX_ESIT];
15941 + struct list_head ep_list;
15942 + struct usb_tt *usb_tt;
15943 + int tt_port;
15944 +diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
15945 +index 97f37077b7f97..33b637d0d8d99 100644
15946 +--- a/drivers/usb/roles/class.c
15947 ++++ b/drivers/usb/roles/class.c
15948 +@@ -189,6 +189,8 @@ usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
15949 + return NULL;
15950 +
15951 + dev = class_find_device_by_fwnode(role_class, fwnode);
15952 ++ if (dev)
15953 ++ WARN_ON(!try_module_get(dev->parent->driver->owner));
15954 +
15955 + return dev ? to_role_switch(dev) : NULL;
15956 + }
15957 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
15958 +index 73075b9351c58..622e24b06b4b7 100644
15959 +--- a/drivers/usb/serial/ti_usb_3410_5052.c
15960 ++++ b/drivers/usb/serial/ti_usb_3410_5052.c
15961 +@@ -1420,14 +1420,19 @@ static int ti_set_serial_info(struct tty_struct *tty,
15962 + struct serial_struct *ss)
15963 + {
15964 + struct usb_serial_port *port = tty->driver_data;
15965 +- struct ti_port *tport = usb_get_serial_port_data(port);
15966 ++ struct tty_port *tport = &port->port;
15967 + unsigned cwait;
15968 +
15969 + cwait = ss->closing_wait;
15970 + if (cwait != ASYNC_CLOSING_WAIT_NONE)
15971 + cwait = msecs_to_jiffies(10 * ss->closing_wait);
15972 +
15973 +- tport->tp_port->port.closing_wait = cwait;
15974 ++ if (!capable(CAP_SYS_ADMIN)) {
15975 ++ if (cwait != tport->closing_wait)
15976 ++ return -EPERM;
15977 ++ }
15978 ++
15979 ++ tport->closing_wait = cwait;
15980 +
15981 + return 0;
15982 + }
15983 +diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
15984 +index 4b9845807bee1..b2285d5a869de 100644
15985 +--- a/drivers/usb/serial/usb_wwan.c
15986 ++++ b/drivers/usb/serial/usb_wwan.c
15987 +@@ -140,10 +140,10 @@ int usb_wwan_get_serial_info(struct tty_struct *tty,
15988 + ss->line = port->minor;
15989 + ss->port = port->port_number;
15990 + ss->baud_base = tty_get_baud_rate(port->port.tty);
15991 +- ss->close_delay = port->port.close_delay / 10;
15992 ++ ss->close_delay = jiffies_to_msecs(port->port.close_delay) / 10;
15993 + ss->closing_wait = port->port.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
15994 + ASYNC_CLOSING_WAIT_NONE :
15995 +- port->port.closing_wait / 10;
15996 ++ jiffies_to_msecs(port->port.closing_wait) / 10;
15997 + return 0;
15998 + }
15999 + EXPORT_SYMBOL(usb_wwan_get_serial_info);
16000 +@@ -155,9 +155,10 @@ int usb_wwan_set_serial_info(struct tty_struct *tty,
16001 + unsigned int closing_wait, close_delay;
16002 + int retval = 0;
16003 +
16004 +- close_delay = ss->close_delay * 10;
16005 ++ close_delay = msecs_to_jiffies(ss->close_delay * 10);
16006 + closing_wait = ss->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
16007 +- ASYNC_CLOSING_WAIT_NONE : ss->closing_wait * 10;
16008 ++ ASYNC_CLOSING_WAIT_NONE :
16009 ++ msecs_to_jiffies(ss->closing_wait * 10);
16010 +
16011 + mutex_lock(&port->port.mutex);
16012 +
16013 +diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c
16014 +index d21750bbbb44d..6eaeba9b096e1 100644
16015 +--- a/drivers/usb/typec/stusb160x.c
16016 ++++ b/drivers/usb/typec/stusb160x.c
16017 +@@ -682,8 +682,8 @@ static int stusb160x_probe(struct i2c_client *client)
16018 + }
16019 +
16020 + fwnode = device_get_named_child_node(chip->dev, "connector");
16021 +- if (IS_ERR(fwnode))
16022 +- return PTR_ERR(fwnode);
16023 ++ if (!fwnode)
16024 ++ return -ENODEV;
16025 +
16026 + /*
16027 + * When both VDD and VSYS power supplies are present, the low power
16028 +diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
16029 +index f9f0af64da5f0..a06da1854c10c 100644
16030 +--- a/drivers/usb/typec/tcpm/tcpci.c
16031 ++++ b/drivers/usb/typec/tcpm/tcpci.c
16032 +@@ -20,6 +20,15 @@
16033 +
16034 + #define PD_RETRY_COUNT 3
16035 +
16036 ++#define tcpc_presenting_cc1_rd(reg) \
16037 ++ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
16038 ++ (((reg) & (TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT)) == \
16039 ++ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT)))
16040 ++#define tcpc_presenting_cc2_rd(reg) \
16041 ++ (!(TCPC_ROLE_CTRL_DRP & (reg)) && \
16042 ++ (((reg) & (TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT)) == \
16043 ++ (TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT)))
16044 ++
16045 + struct tcpci {
16046 + struct device *dev;
16047 +
16048 +@@ -174,19 +183,25 @@ static int tcpci_get_cc(struct tcpc_dev *tcpc,
16049 + enum typec_cc_status *cc1, enum typec_cc_status *cc2)
16050 + {
16051 + struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
16052 +- unsigned int reg;
16053 ++ unsigned int reg, role_control;
16054 + int ret;
16055 +
16056 ++ ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, &role_control);
16057 ++ if (ret < 0)
16058 ++ return ret;
16059 ++
16060 + ret = regmap_read(tcpci->regmap, TCPC_CC_STATUS, &reg);
16061 + if (ret < 0)
16062 + return ret;
16063 +
16064 + *cc1 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC1_SHIFT) &
16065 + TCPC_CC_STATUS_CC1_MASK,
16066 +- reg & TCPC_CC_STATUS_TERM);
16067 ++ reg & TCPC_CC_STATUS_TERM ||
16068 ++ tcpc_presenting_cc1_rd(role_control));
16069 + *cc2 = tcpci_to_typec_cc((reg >> TCPC_CC_STATUS_CC2_SHIFT) &
16070 + TCPC_CC_STATUS_CC2_MASK,
16071 +- reg & TCPC_CC_STATUS_TERM);
16072 ++ reg & TCPC_CC_STATUS_TERM ||
16073 ++ tcpc_presenting_cc2_rd(role_control));
16074 +
16075 + return 0;
16076 + }
16077 +diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
16078 +index 563658096b675..912dbf8ca2dac 100644
16079 +--- a/drivers/usb/typec/tcpm/tcpm.c
16080 ++++ b/drivers/usb/typec/tcpm/tcpm.c
16081 +@@ -218,12 +218,27 @@ struct pd_mode_data {
16082 + struct typec_altmode_desc altmode_desc[ALTMODE_DISCOVERY_MAX];
16083 + };
16084 +
16085 ++/*
16086 ++ * @min_volt: Actual min voltage at the local port
16087 ++ * @req_min_volt: Requested min voltage to the port partner
16088 ++ * @max_volt: Actual max voltage at the local port
16089 ++ * @req_max_volt: Requested max voltage to the port partner
16090 ++ * @max_curr: Actual max current at the local port
16091 ++ * @req_max_curr: Requested max current of the port partner
16092 ++ * @req_out_volt: Requested output voltage to the port partner
16093 ++ * @req_op_curr: Requested operating current to the port partner
16094 ++ * @supported: Parter has atleast one APDO hence supports PPS
16095 ++ * @active: PPS mode is active
16096 ++ */
16097 + struct pd_pps_data {
16098 + u32 min_volt;
16099 ++ u32 req_min_volt;
16100 + u32 max_volt;
16101 ++ u32 req_max_volt;
16102 + u32 max_curr;
16103 +- u32 out_volt;
16104 +- u32 op_curr;
16105 ++ u32 req_max_curr;
16106 ++ u32 req_out_volt;
16107 ++ u32 req_op_curr;
16108 + bool supported;
16109 + bool active;
16110 + };
16111 +@@ -326,7 +341,10 @@ struct tcpm_port {
16112 + unsigned int operating_snk_mw;
16113 + bool update_sink_caps;
16114 +
16115 +- /* Requested current / voltage */
16116 ++ /* Requested current / voltage to the port partner */
16117 ++ u32 req_current_limit;
16118 ++ u32 req_supply_voltage;
16119 ++ /* Actual current / voltage limit of the local port */
16120 + u32 current_limit;
16121 + u32 supply_voltage;
16122 +
16123 +@@ -1873,8 +1891,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
16124 + case SNK_TRANSITION_SINK:
16125 + if (port->vbus_present) {
16126 + tcpm_set_current_limit(port,
16127 +- port->current_limit,
16128 +- port->supply_voltage);
16129 ++ port->req_current_limit,
16130 ++ port->req_supply_voltage);
16131 + port->explicit_contract = true;
16132 + tcpm_set_state(port, SNK_READY, 0);
16133 + } else {
16134 +@@ -1916,8 +1934,8 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
16135 + break;
16136 + case SNK_NEGOTIATE_PPS_CAPABILITIES:
16137 + /* Revert data back from any requested PPS updates */
16138 +- port->pps_data.out_volt = port->supply_voltage;
16139 +- port->pps_data.op_curr = port->current_limit;
16140 ++ port->pps_data.req_out_volt = port->supply_voltage;
16141 ++ port->pps_data.req_op_curr = port->current_limit;
16142 + port->pps_status = (type == PD_CTRL_WAIT ?
16143 + -EAGAIN : -EOPNOTSUPP);
16144 + tcpm_set_state(port, SNK_READY, 0);
16145 +@@ -1956,8 +1974,12 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
16146 + break;
16147 + case SNK_NEGOTIATE_PPS_CAPABILITIES:
16148 + port->pps_data.active = true;
16149 +- port->supply_voltage = port->pps_data.out_volt;
16150 +- port->current_limit = port->pps_data.op_curr;
16151 ++ port->pps_data.min_volt = port->pps_data.req_min_volt;
16152 ++ port->pps_data.max_volt = port->pps_data.req_max_volt;
16153 ++ port->pps_data.max_curr = port->pps_data.req_max_curr;
16154 ++ port->req_supply_voltage = port->pps_data.req_out_volt;
16155 ++ port->req_current_limit = port->pps_data.req_op_curr;
16156 ++ power_supply_changed(port->psy);
16157 + tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
16158 + break;
16159 + case SOFT_RESET_SEND:
16160 +@@ -2474,17 +2496,16 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
16161 + src = port->source_caps[src_pdo];
16162 + snk = port->snk_pdo[snk_pdo];
16163 +
16164 +- port->pps_data.min_volt = max(pdo_pps_apdo_min_voltage(src),
16165 +- pdo_pps_apdo_min_voltage(snk));
16166 +- port->pps_data.max_volt = min(pdo_pps_apdo_max_voltage(src),
16167 +- pdo_pps_apdo_max_voltage(snk));
16168 +- port->pps_data.max_curr = min_pps_apdo_current(src, snk);
16169 +- port->pps_data.out_volt = min(port->pps_data.max_volt,
16170 +- max(port->pps_data.min_volt,
16171 +- port->pps_data.out_volt));
16172 +- port->pps_data.op_curr = min(port->pps_data.max_curr,
16173 +- port->pps_data.op_curr);
16174 +- power_supply_changed(port->psy);
16175 ++ port->pps_data.req_min_volt = max(pdo_pps_apdo_min_voltage(src),
16176 ++ pdo_pps_apdo_min_voltage(snk));
16177 ++ port->pps_data.req_max_volt = min(pdo_pps_apdo_max_voltage(src),
16178 ++ pdo_pps_apdo_max_voltage(snk));
16179 ++ port->pps_data.req_max_curr = min_pps_apdo_current(src, snk);
16180 ++ port->pps_data.req_out_volt = min(port->pps_data.max_volt,
16181 ++ max(port->pps_data.min_volt,
16182 ++ port->pps_data.req_out_volt));
16183 ++ port->pps_data.req_op_curr = min(port->pps_data.max_curr,
16184 ++ port->pps_data.req_op_curr);
16185 + }
16186 +
16187 + return src_pdo;
16188 +@@ -2564,8 +2585,8 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
16189 + flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
16190 + }
16191 +
16192 +- port->current_limit = ma;
16193 +- port->supply_voltage = mv;
16194 ++ port->req_current_limit = ma;
16195 ++ port->req_supply_voltage = mv;
16196 +
16197 + return 0;
16198 + }
16199 +@@ -2611,10 +2632,10 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
16200 + tcpm_log(port, "Invalid APDO selected!");
16201 + return -EINVAL;
16202 + }
16203 +- max_mv = port->pps_data.max_volt;
16204 +- max_ma = port->pps_data.max_curr;
16205 +- out_mv = port->pps_data.out_volt;
16206 +- op_ma = port->pps_data.op_curr;
16207 ++ max_mv = port->pps_data.req_max_volt;
16208 ++ max_ma = port->pps_data.req_max_curr;
16209 ++ out_mv = port->pps_data.req_out_volt;
16210 ++ op_ma = port->pps_data.req_op_curr;
16211 + break;
16212 + default:
16213 + tcpm_log(port, "Invalid PDO selected!");
16214 +@@ -2661,8 +2682,8 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
16215 + tcpm_log(port, "Requesting APDO %d: %u mV, %u mA",
16216 + src_pdo_index, out_mv, op_ma);
16217 +
16218 +- port->pps_data.op_curr = op_ma;
16219 +- port->pps_data.out_volt = out_mv;
16220 ++ port->pps_data.req_op_curr = op_ma;
16221 ++ port->pps_data.req_out_volt = out_mv;
16222 +
16223 + return 0;
16224 + }
16225 +@@ -2890,8 +2911,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
16226 + port->sink_cap_done = false;
16227 + if (port->tcpc->enable_frs)
16228 + port->tcpc->enable_frs(port->tcpc, false);
16229 +-
16230 +- power_supply_changed(port->psy);
16231 + }
16232 +
16233 + static void tcpm_detach(struct tcpm_port *port)
16234 +@@ -4503,7 +4522,7 @@ static int tcpm_try_role(struct typec_port *p, int role)
16235 + return ret;
16236 + }
16237 +
16238 +-static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
16239 ++static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 req_op_curr)
16240 + {
16241 + unsigned int target_mw;
16242 + int ret;
16243 +@@ -4521,22 +4540,22 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
16244 + goto port_unlock;
16245 + }
16246 +
16247 +- if (op_curr > port->pps_data.max_curr) {
16248 ++ if (req_op_curr > port->pps_data.max_curr) {
16249 + ret = -EINVAL;
16250 + goto port_unlock;
16251 + }
16252 +
16253 +- target_mw = (op_curr * port->pps_data.out_volt) / 1000;
16254 ++ target_mw = (req_op_curr * port->supply_voltage) / 1000;
16255 + if (target_mw < port->operating_snk_mw) {
16256 + ret = -EINVAL;
16257 + goto port_unlock;
16258 + }
16259 +
16260 + /* Round down operating current to align with PPS valid steps */
16261 +- op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
16262 ++ req_op_curr = req_op_curr - (req_op_curr % RDO_PROG_CURR_MA_STEP);
16263 +
16264 + reinit_completion(&port->pps_complete);
16265 +- port->pps_data.op_curr = op_curr;
16266 ++ port->pps_data.req_op_curr = req_op_curr;
16267 + port->pps_status = 0;
16268 + port->pps_pending = true;
16269 + tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
16270 +@@ -4558,7 +4577,7 @@ swap_unlock:
16271 + return ret;
16272 + }
16273 +
16274 +-static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
16275 ++static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 req_out_volt)
16276 + {
16277 + unsigned int target_mw;
16278 + int ret;
16279 +@@ -4576,23 +4595,23 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
16280 + goto port_unlock;
16281 + }
16282 +
16283 +- if (out_volt < port->pps_data.min_volt ||
16284 +- out_volt > port->pps_data.max_volt) {
16285 ++ if (req_out_volt < port->pps_data.min_volt ||
16286 ++ req_out_volt > port->pps_data.max_volt) {
16287 + ret = -EINVAL;
16288 + goto port_unlock;
16289 + }
16290 +
16291 +- target_mw = (port->pps_data.op_curr * out_volt) / 1000;
16292 ++ target_mw = (port->current_limit * req_out_volt) / 1000;
16293 + if (target_mw < port->operating_snk_mw) {
16294 + ret = -EINVAL;
16295 + goto port_unlock;
16296 + }
16297 +
16298 + /* Round down output voltage to align with PPS valid steps */
16299 +- out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
16300 ++ req_out_volt = req_out_volt - (req_out_volt % RDO_PROG_VOLT_MV_STEP);
16301 +
16302 + reinit_completion(&port->pps_complete);
16303 +- port->pps_data.out_volt = out_volt;
16304 ++ port->pps_data.req_out_volt = req_out_volt;
16305 + port->pps_status = 0;
16306 + port->pps_pending = true;
16307 + tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
16308 +@@ -4641,8 +4660,8 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
16309 +
16310 + /* Trigger PPS request or move back to standard PDO contract */
16311 + if (activate) {
16312 +- port->pps_data.out_volt = port->supply_voltage;
16313 +- port->pps_data.op_curr = port->current_limit;
16314 ++ port->pps_data.req_out_volt = port->supply_voltage;
16315 ++ port->pps_data.req_op_curr = port->current_limit;
16316 + tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
16317 + } else {
16318 + tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
16319 +diff --git a/drivers/usb/typec/tps6598x.c b/drivers/usb/typec/tps6598x.c
16320 +index d8e4594fe0090..30bfc314b743c 100644
16321 +--- a/drivers/usb/typec/tps6598x.c
16322 ++++ b/drivers/usb/typec/tps6598x.c
16323 +@@ -515,8 +515,8 @@ static int tps6598x_probe(struct i2c_client *client)
16324 + return ret;
16325 +
16326 + fwnode = device_get_named_child_node(&client->dev, "connector");
16327 +- if (IS_ERR(fwnode))
16328 +- return PTR_ERR(fwnode);
16329 ++ if (!fwnode)
16330 ++ return -ENODEV;
16331 +
16332 + tps->role_sw = fwnode_usb_role_switch_get(fwnode);
16333 + if (IS_ERR(tps->role_sw)) {
16334 +diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
16335 +index f7633ee655a17..d1cf6b51bf85d 100644
16336 +--- a/drivers/usb/usbip/vudc_sysfs.c
16337 ++++ b/drivers/usb/usbip/vudc_sysfs.c
16338 +@@ -156,12 +156,14 @@ static ssize_t usbip_sockfd_store(struct device *dev,
16339 + tcp_rx = kthread_create(&v_rx_loop, &udc->ud, "vudc_rx");
16340 + if (IS_ERR(tcp_rx)) {
16341 + sockfd_put(socket);
16342 ++ mutex_unlock(&udc->ud.sysfs_lock);
16343 + return -EINVAL;
16344 + }
16345 + tcp_tx = kthread_create(&v_tx_loop, &udc->ud, "vudc_tx");
16346 + if (IS_ERR(tcp_tx)) {
16347 + kthread_stop(tcp_rx);
16348 + sockfd_put(socket);
16349 ++ mutex_unlock(&udc->ud.sysfs_lock);
16350 + return -EINVAL;
16351 + }
16352 +
16353 +diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
16354 +index f27e25112c403..8722f5effacd4 100644
16355 +--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
16356 ++++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
16357 +@@ -568,23 +568,39 @@ static int vfio_fsl_mc_init_device(struct vfio_fsl_mc_device *vdev)
16358 + dev_err(&mc_dev->dev, "VFIO_FSL_MC: Failed to setup DPRC (%d)\n", ret);
16359 + goto out_nc_unreg;
16360 + }
16361 ++ return 0;
16362 ++
16363 ++out_nc_unreg:
16364 ++ bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
16365 ++ return ret;
16366 ++}
16367 +
16368 ++static int vfio_fsl_mc_scan_container(struct fsl_mc_device *mc_dev)
16369 ++{
16370 ++ int ret;
16371 ++
16372 ++ /* non dprc devices do not scan for other devices */
16373 ++ if (!is_fsl_mc_bus_dprc(mc_dev))
16374 ++ return 0;
16375 + ret = dprc_scan_container(mc_dev, false);
16376 + if (ret) {
16377 +- dev_err(&mc_dev->dev, "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
16378 +- goto out_dprc_cleanup;
16379 ++ dev_err(&mc_dev->dev,
16380 ++ "VFIO_FSL_MC: Container scanning failed (%d)\n", ret);
16381 ++ dprc_remove_devices(mc_dev, NULL, 0);
16382 ++ return ret;
16383 + }
16384 +-
16385 + return 0;
16386 ++}
16387 ++
16388 ++static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
16389 ++{
16390 ++ struct fsl_mc_device *mc_dev = vdev->mc_dev;
16391 ++
16392 ++ if (!is_fsl_mc_bus_dprc(mc_dev))
16393 ++ return;
16394 +
16395 +-out_dprc_cleanup:
16396 +- dprc_remove_devices(mc_dev, NULL, 0);
16397 + dprc_cleanup(mc_dev);
16398 +-out_nc_unreg:
16399 + bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
16400 +- vdev->nb.notifier_call = NULL;
16401 +-
16402 +- return ret;
16403 + }
16404 +
16405 + static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
16406 +@@ -607,29 +623,39 @@ static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
16407 + }
16408 +
16409 + vdev->mc_dev = mc_dev;
16410 +-
16411 +- ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
16412 +- if (ret) {
16413 +- dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
16414 +- goto out_group_put;
16415 +- }
16416 ++ mutex_init(&vdev->igate);
16417 +
16418 + ret = vfio_fsl_mc_reflck_attach(vdev);
16419 + if (ret)
16420 +- goto out_group_dev;
16421 ++ goto out_group_put;
16422 +
16423 + ret = vfio_fsl_mc_init_device(vdev);
16424 + if (ret)
16425 + goto out_reflck;
16426 +
16427 +- mutex_init(&vdev->igate);
16428 ++ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
16429 ++ if (ret) {
16430 ++ dev_err(dev, "VFIO_FSL_MC: Failed to add to vfio group\n");
16431 ++ goto out_device;
16432 ++ }
16433 +
16434 ++ /*
16435 ++ * This triggers recursion into vfio_fsl_mc_probe() on another device
16436 ++ * and the vfio_fsl_mc_reflck_attach() must succeed, which relies on the
16437 ++ * vfio_add_group_dev() above. It has no impact on this vdev, so it is
16438 ++ * safe to be after the vfio device is made live.
16439 ++ */
16440 ++ ret = vfio_fsl_mc_scan_container(mc_dev);
16441 ++ if (ret)
16442 ++ goto out_group_dev;
16443 + return 0;
16444 +
16445 +-out_reflck:
16446 +- vfio_fsl_mc_reflck_put(vdev->reflck);
16447 + out_group_dev:
16448 + vfio_del_group_dev(dev);
16449 ++out_device:
16450 ++ vfio_fsl_uninit_device(vdev);
16451 ++out_reflck:
16452 ++ vfio_fsl_mc_reflck_put(vdev->reflck);
16453 + out_group_put:
16454 + vfio_iommu_group_put(group, dev);
16455 + return ret;
16456 +@@ -646,16 +672,10 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
16457 +
16458 + mutex_destroy(&vdev->igate);
16459 +
16460 ++ dprc_remove_devices(mc_dev, NULL, 0);
16461 ++ vfio_fsl_uninit_device(vdev);
16462 + vfio_fsl_mc_reflck_put(vdev->reflck);
16463 +
16464 +- if (is_fsl_mc_bus_dprc(mc_dev)) {
16465 +- dprc_remove_devices(mc_dev, NULL, 0);
16466 +- dprc_cleanup(mc_dev);
16467 +- }
16468 +-
16469 +- if (vdev->nb.notifier_call)
16470 +- bus_unregister_notifier(&fsl_mc_bus_type, &vdev->nb);
16471 +-
16472 + vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
16473 +
16474 + return 0;
16475 +diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
16476 +index 917fd84c1c6f2..367ff5412a387 100644
16477 +--- a/drivers/vfio/mdev/mdev_sysfs.c
16478 ++++ b/drivers/vfio/mdev/mdev_sysfs.c
16479 +@@ -105,6 +105,7 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
16480 + return ERR_PTR(-ENOMEM);
16481 +
16482 + type->kobj.kset = parent->mdev_types_kset;
16483 ++ type->parent = parent;
16484 +
16485 + ret = kobject_init_and_add(&type->kobj, &mdev_type_ktype, NULL,
16486 + "%s-%s", dev_driver_string(parent->dev),
16487 +@@ -132,7 +133,6 @@ static struct mdev_type *add_mdev_supported_type(struct mdev_parent *parent,
16488 + }
16489 +
16490 + type->group = group;
16491 +- type->parent = parent;
16492 + return type;
16493 +
16494 + attrs_failed:
16495 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
16496 +index 465f646e33298..48b048edf1ee8 100644
16497 +--- a/drivers/vfio/pci/vfio_pci.c
16498 ++++ b/drivers/vfio/pci/vfio_pci.c
16499 +@@ -1926,6 +1926,68 @@ static int vfio_pci_bus_notifier(struct notifier_block *nb,
16500 + return 0;
16501 + }
16502 +
16503 ++static int vfio_pci_vf_init(struct vfio_pci_device *vdev)
16504 ++{
16505 ++ struct pci_dev *pdev = vdev->pdev;
16506 ++ int ret;
16507 ++
16508 ++ if (!pdev->is_physfn)
16509 ++ return 0;
16510 ++
16511 ++ vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
16512 ++ if (!vdev->vf_token)
16513 ++ return -ENOMEM;
16514 ++
16515 ++ mutex_init(&vdev->vf_token->lock);
16516 ++ uuid_gen(&vdev->vf_token->uuid);
16517 ++
16518 ++ vdev->nb.notifier_call = vfio_pci_bus_notifier;
16519 ++ ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
16520 ++ if (ret) {
16521 ++ kfree(vdev->vf_token);
16522 ++ return ret;
16523 ++ }
16524 ++ return 0;
16525 ++}
16526 ++
16527 ++static void vfio_pci_vf_uninit(struct vfio_pci_device *vdev)
16528 ++{
16529 ++ if (!vdev->vf_token)
16530 ++ return;
16531 ++
16532 ++ bus_unregister_notifier(&pci_bus_type, &vdev->nb);
16533 ++ WARN_ON(vdev->vf_token->users);
16534 ++ mutex_destroy(&vdev->vf_token->lock);
16535 ++ kfree(vdev->vf_token);
16536 ++}
16537 ++
16538 ++static int vfio_pci_vga_init(struct vfio_pci_device *vdev)
16539 ++{
16540 ++ struct pci_dev *pdev = vdev->pdev;
16541 ++ int ret;
16542 ++
16543 ++ if (!vfio_pci_is_vga(pdev))
16544 ++ return 0;
16545 ++
16546 ++ ret = vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
16547 ++ if (ret)
16548 ++ return ret;
16549 ++ vga_set_legacy_decoding(pdev, vfio_pci_set_vga_decode(vdev, false));
16550 ++ return 0;
16551 ++}
16552 ++
16553 ++static void vfio_pci_vga_uninit(struct vfio_pci_device *vdev)
16554 ++{
16555 ++ struct pci_dev *pdev = vdev->pdev;
16556 ++
16557 ++ if (!vfio_pci_is_vga(pdev))
16558 ++ return;
16559 ++ vga_client_register(pdev, NULL, NULL, NULL);
16560 ++ vga_set_legacy_decoding(pdev, VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
16561 ++ VGA_RSRC_LEGACY_IO |
16562 ++ VGA_RSRC_LEGACY_MEM);
16563 ++}
16564 ++
16565 + static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
16566 + {
16567 + struct vfio_pci_device *vdev;
16568 +@@ -1972,35 +2034,15 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
16569 + INIT_LIST_HEAD(&vdev->vma_list);
16570 + init_rwsem(&vdev->memory_lock);
16571 +
16572 +- ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
16573 ++ ret = vfio_pci_reflck_attach(vdev);
16574 + if (ret)
16575 + goto out_free;
16576 +-
16577 +- ret = vfio_pci_reflck_attach(vdev);
16578 ++ ret = vfio_pci_vf_init(vdev);
16579 + if (ret)
16580 +- goto out_del_group_dev;
16581 +-
16582 +- if (pdev->is_physfn) {
16583 +- vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL);
16584 +- if (!vdev->vf_token) {
16585 +- ret = -ENOMEM;
16586 +- goto out_reflck;
16587 +- }
16588 +-
16589 +- mutex_init(&vdev->vf_token->lock);
16590 +- uuid_gen(&vdev->vf_token->uuid);
16591 +-
16592 +- vdev->nb.notifier_call = vfio_pci_bus_notifier;
16593 +- ret = bus_register_notifier(&pci_bus_type, &vdev->nb);
16594 +- if (ret)
16595 +- goto out_vf_token;
16596 +- }
16597 +-
16598 +- if (vfio_pci_is_vga(pdev)) {
16599 +- vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
16600 +- vga_set_legacy_decoding(pdev,
16601 +- vfio_pci_set_vga_decode(vdev, false));
16602 +- }
16603 ++ goto out_reflck;
16604 ++ ret = vfio_pci_vga_init(vdev);
16605 ++ if (ret)
16606 ++ goto out_vf;
16607 +
16608 + vfio_pci_probe_power_state(vdev);
16609 +
16610 +@@ -2018,15 +2060,20 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
16611 + vfio_pci_set_power_state(vdev, PCI_D3hot);
16612 + }
16613 +
16614 +- return ret;
16615 ++ ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
16616 ++ if (ret)
16617 ++ goto out_power;
16618 ++ return 0;
16619 +
16620 +-out_vf_token:
16621 +- kfree(vdev->vf_token);
16622 ++out_power:
16623 ++ if (!disable_idle_d3)
16624 ++ vfio_pci_set_power_state(vdev, PCI_D0);
16625 ++out_vf:
16626 ++ vfio_pci_vf_uninit(vdev);
16627 + out_reflck:
16628 + vfio_pci_reflck_put(vdev->reflck);
16629 +-out_del_group_dev:
16630 +- vfio_del_group_dev(&pdev->dev);
16631 + out_free:
16632 ++ kfree(vdev->pm_save);
16633 + kfree(vdev);
16634 + out_group_put:
16635 + vfio_iommu_group_put(group, &pdev->dev);
16636 +@@ -2043,33 +2090,19 @@ static void vfio_pci_remove(struct pci_dev *pdev)
16637 + if (!vdev)
16638 + return;
16639 +
16640 +- if (vdev->vf_token) {
16641 +- WARN_ON(vdev->vf_token->users);
16642 +- mutex_destroy(&vdev->vf_token->lock);
16643 +- kfree(vdev->vf_token);
16644 +- }
16645 +-
16646 +- if (vdev->nb.notifier_call)
16647 +- bus_unregister_notifier(&pci_bus_type, &vdev->nb);
16648 +-
16649 ++ vfio_pci_vf_uninit(vdev);
16650 + vfio_pci_reflck_put(vdev->reflck);
16651 ++ vfio_pci_vga_uninit(vdev);
16652 +
16653 + vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
16654 +- kfree(vdev->region);
16655 +- mutex_destroy(&vdev->ioeventfds_lock);
16656 +
16657 + if (!disable_idle_d3)
16658 + vfio_pci_set_power_state(vdev, PCI_D0);
16659 +
16660 ++ mutex_destroy(&vdev->ioeventfds_lock);
16661 ++ kfree(vdev->region);
16662 + kfree(vdev->pm_save);
16663 + kfree(vdev);
16664 +-
16665 +- if (vfio_pci_is_vga(pdev)) {
16666 +- vga_client_register(pdev, NULL, NULL, NULL);
16667 +- vga_set_legacy_decoding(pdev,
16668 +- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
16669 +- VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
16670 +- }
16671 + }
16672 +
16673 + static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
16674 +diff --git a/fs/afs/dir.c b/fs/afs/dir.c
16675 +index 9dc6f4b1c4177..628ba3fed36df 100644
16676 +--- a/fs/afs/dir.c
16677 ++++ b/fs/afs/dir.c
16678 +@@ -1337,6 +1337,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
16679 +
16680 + afs_op_set_vnode(op, 0, dvnode);
16681 + op->file[0].dv_delta = 1;
16682 ++ op->file[0].modification = true;
16683 + op->file[0].update_ctime = true;
16684 + op->dentry = dentry;
16685 + op->create.mode = S_IFDIR | mode;
16686 +@@ -1418,6 +1419,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
16687 +
16688 + afs_op_set_vnode(op, 0, dvnode);
16689 + op->file[0].dv_delta = 1;
16690 ++ op->file[0].modification = true;
16691 + op->file[0].update_ctime = true;
16692 +
16693 + op->dentry = dentry;
16694 +@@ -1554,6 +1556,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
16695 +
16696 + afs_op_set_vnode(op, 0, dvnode);
16697 + op->file[0].dv_delta = 1;
16698 ++ op->file[0].modification = true;
16699 + op->file[0].update_ctime = true;
16700 +
16701 + /* Try to make sure we have a callback promise on the victim. */
16702 +@@ -1636,6 +1639,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
16703 +
16704 + afs_op_set_vnode(op, 0, dvnode);
16705 + op->file[0].dv_delta = 1;
16706 ++ op->file[0].modification = true;
16707 + op->file[0].update_ctime = true;
16708 +
16709 + op->dentry = dentry;
16710 +@@ -1710,6 +1714,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
16711 + afs_op_set_vnode(op, 0, dvnode);
16712 + afs_op_set_vnode(op, 1, vnode);
16713 + op->file[0].dv_delta = 1;
16714 ++ op->file[0].modification = true;
16715 + op->file[0].update_ctime = true;
16716 + op->file[1].update_ctime = true;
16717 +
16718 +@@ -1905,6 +1910,8 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
16719 + afs_op_set_vnode(op, 1, new_dvnode); /* May be same as orig_dvnode */
16720 + op->file[0].dv_delta = 1;
16721 + op->file[1].dv_delta = 1;
16722 ++ op->file[0].modification = true;
16723 ++ op->file[1].modification = true;
16724 + op->file[0].update_ctime = true;
16725 + op->file[1].update_ctime = true;
16726 +
16727 +diff --git a/fs/afs/dir_silly.c b/fs/afs/dir_silly.c
16728 +index 04f75a44f2432..dae9a57d7ec0c 100644
16729 +--- a/fs/afs/dir_silly.c
16730 ++++ b/fs/afs/dir_silly.c
16731 +@@ -73,6 +73,8 @@ static int afs_do_silly_rename(struct afs_vnode *dvnode, struct afs_vnode *vnode
16732 + afs_op_set_vnode(op, 1, dvnode);
16733 + op->file[0].dv_delta = 1;
16734 + op->file[1].dv_delta = 1;
16735 ++ op->file[0].modification = true;
16736 ++ op->file[1].modification = true;
16737 + op->file[0].update_ctime = true;
16738 + op->file[1].update_ctime = true;
16739 +
16740 +@@ -201,6 +203,7 @@ static int afs_do_silly_unlink(struct afs_vnode *dvnode, struct afs_vnode *vnode
16741 + afs_op_set_vnode(op, 0, dvnode);
16742 + afs_op_set_vnode(op, 1, vnode);
16743 + op->file[0].dv_delta = 1;
16744 ++ op->file[0].modification = true;
16745 + op->file[0].update_ctime = true;
16746 + op->file[1].op_unlinked = true;
16747 + op->file[1].update_ctime = true;
16748 +diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c
16749 +index 71c58723763d2..a82515b47350e 100644
16750 +--- a/fs/afs/fs_operation.c
16751 ++++ b/fs/afs/fs_operation.c
16752 +@@ -118,6 +118,8 @@ static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *
16753 + vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
16754 + if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
16755 + op->flags |= AFS_OPERATION_CUR_ONLY;
16756 ++ if (vp->modification)
16757 ++ set_bit(AFS_VNODE_MODIFYING, &vnode->flags);
16758 + }
16759 +
16760 + if (vp->fid.vnode)
16761 +@@ -223,6 +225,10 @@ int afs_put_operation(struct afs_operation *op)
16762 +
16763 + if (op->ops && op->ops->put)
16764 + op->ops->put(op);
16765 ++ if (op->file[0].modification)
16766 ++ clear_bit(AFS_VNODE_MODIFYING, &op->file[0].vnode->flags);
16767 ++ if (op->file[1].modification && op->file[1].vnode != op->file[0].vnode)
16768 ++ clear_bit(AFS_VNODE_MODIFYING, &op->file[1].vnode->flags);
16769 + if (op->file[0].put_vnode)
16770 + iput(&op->file[0].vnode->vfs_inode);
16771 + if (op->file[1].put_vnode)
16772 +diff --git a/fs/afs/inode.c b/fs/afs/inode.c
16773 +index 1d03eb1920ec0..ae3016a9fb23c 100644
16774 +--- a/fs/afs/inode.c
16775 ++++ b/fs/afs/inode.c
16776 +@@ -102,13 +102,13 @@ static int afs_inode_init_from_status(struct afs_operation *op,
16777 +
16778 + switch (status->type) {
16779 + case AFS_FTYPE_FILE:
16780 +- inode->i_mode = S_IFREG | status->mode;
16781 ++ inode->i_mode = S_IFREG | (status->mode & S_IALLUGO);
16782 + inode->i_op = &afs_file_inode_operations;
16783 + inode->i_fop = &afs_file_operations;
16784 + inode->i_mapping->a_ops = &afs_fs_aops;
16785 + break;
16786 + case AFS_FTYPE_DIR:
16787 +- inode->i_mode = S_IFDIR | status->mode;
16788 ++ inode->i_mode = S_IFDIR | (status->mode & S_IALLUGO);
16789 + inode->i_op = &afs_dir_inode_operations;
16790 + inode->i_fop = &afs_dir_file_operations;
16791 + inode->i_mapping->a_ops = &afs_dir_aops;
16792 +@@ -198,7 +198,7 @@ static void afs_apply_status(struct afs_operation *op,
16793 + if (status->mode != vnode->status.mode) {
16794 + mode = inode->i_mode;
16795 + mode &= ~S_IALLUGO;
16796 +- mode |= status->mode;
16797 ++ mode |= status->mode & S_IALLUGO;
16798 + WRITE_ONCE(inode->i_mode, mode);
16799 + }
16800 +
16801 +@@ -293,8 +293,9 @@ void afs_vnode_commit_status(struct afs_operation *op, struct afs_vnode_param *v
16802 + op->flags &= ~AFS_OPERATION_DIR_CONFLICT;
16803 + }
16804 + } else if (vp->scb.have_status) {
16805 +- if (vp->dv_before + vp->dv_delta != vp->scb.status.data_version &&
16806 +- vp->speculative)
16807 ++ if (vp->speculative &&
16808 ++ (test_bit(AFS_VNODE_MODIFYING, &vnode->flags) ||
16809 ++ vp->dv_before != vnode->status.data_version))
16810 + /* Ignore the result of a speculative bulk status fetch
16811 + * if it splits around a modification op, thereby
16812 + * appearing to regress the data version.
16813 +@@ -909,6 +910,7 @@ int afs_setattr(struct dentry *dentry, struct iattr *attr)
16814 + }
16815 + op->ctime = attr->ia_ctime;
16816 + op->file[0].update_ctime = 1;
16817 ++ op->file[0].modification = true;
16818 +
16819 + op->ops = &afs_setattr_operation;
16820 + ret = afs_do_sync_operation(op);
16821 +diff --git a/fs/afs/internal.h b/fs/afs/internal.h
16822 +index 525ef075fcd90..ffe318ad2e026 100644
16823 +--- a/fs/afs/internal.h
16824 ++++ b/fs/afs/internal.h
16825 +@@ -640,6 +640,7 @@ struct afs_vnode {
16826 + #define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
16827 + #define AFS_VNODE_NEW_CONTENT 8 /* Set if file has new content (create/trunc-0) */
16828 + #define AFS_VNODE_SILLY_DELETED 9 /* Set if file has been silly-deleted */
16829 ++#define AFS_VNODE_MODIFYING 10 /* Set if we're performing a modification op */
16830 +
16831 + struct list_head wb_keys; /* List of keys available for writeback */
16832 + struct list_head pending_locks; /* locks waiting to be granted */
16833 +@@ -756,6 +757,7 @@ struct afs_vnode_param {
16834 + bool set_size:1; /* Must update i_size */
16835 + bool op_unlinked:1; /* True if file was unlinked by op */
16836 + bool speculative:1; /* T if speculative status fetch (no vnode lock) */
16837 ++ bool modification:1; /* Set if the content gets modified */
16838 + };
16839 +
16840 + /*
16841 +diff --git a/fs/afs/write.c b/fs/afs/write.c
16842 +index c9195fc67fd8f..d37b5cfcf28f5 100644
16843 +--- a/fs/afs/write.c
16844 ++++ b/fs/afs/write.c
16845 +@@ -450,6 +450,7 @@ static int afs_store_data(struct address_space *mapping,
16846 + afs_op_set_vnode(op, 0, vnode);
16847 + op->file[0].dv_delta = 1;
16848 + op->store.mapping = mapping;
16849 ++ op->file[0].modification = true;
16850 + op->store.first = first;
16851 + op->store.last = last;
16852 + op->store.first_offset = offset;
16853 +diff --git a/fs/io_uring.c b/fs/io_uring.c
16854 +index dc1b0f6fd49b5..369ec81033d67 100644
16855 +--- a/fs/io_uring.c
16856 ++++ b/fs/io_uring.c
16857 +@@ -222,7 +222,7 @@ struct fixed_file_data {
16858 + struct io_buffer {
16859 + struct list_head list;
16860 + __u64 addr;
16861 +- __s32 len;
16862 ++ __u32 len;
16863 + __u16 bid;
16864 + };
16865 +
16866 +@@ -527,7 +527,7 @@ struct io_splice {
16867 + struct io_provide_buf {
16868 + struct file *file;
16869 + __u64 addr;
16870 +- __s32 len;
16871 ++ __u32 len;
16872 + __u32 bgid;
16873 + __u16 nbufs;
16874 + __u16 bid;
16875 +@@ -3996,7 +3996,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
16876 + static int io_provide_buffers_prep(struct io_kiocb *req,
16877 + const struct io_uring_sqe *sqe)
16878 + {
16879 +- unsigned long size;
16880 ++ unsigned long size, tmp_check;
16881 + struct io_provide_buf *p = &req->pbuf;
16882 + u64 tmp;
16883 +
16884 +@@ -4010,6 +4010,12 @@ static int io_provide_buffers_prep(struct io_kiocb *req,
16885 + p->addr = READ_ONCE(sqe->addr);
16886 + p->len = READ_ONCE(sqe->len);
16887 +
16888 ++ if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
16889 ++ &size))
16890 ++ return -EOVERFLOW;
16891 ++ if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
16892 ++ return -EOVERFLOW;
16893 ++
16894 + size = (unsigned long)p->len * p->nbufs;
16895 + if (!access_ok(u64_to_user_ptr(p->addr), size))
16896 + return -EFAULT;
16897 +@@ -4034,7 +4040,7 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
16898 + break;
16899 +
16900 + buf->addr = addr;
16901 +- buf->len = pbuf->len;
16902 ++ buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
16903 + buf->bid = bid;
16904 + addr += pbuf->len;
16905 + bid++;
16906 +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
16907 +index 2e68cea148e0d..00440337efc1f 100644
16908 +--- a/fs/nfsd/nfs4proc.c
16909 ++++ b/fs/nfsd/nfs4proc.c
16910 +@@ -1425,7 +1425,7 @@ static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
16911 + return status;
16912 + }
16913 +
16914 +-static int dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
16915 ++static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
16916 + {
16917 + dst->cp_src_pos = src->cp_src_pos;
16918 + dst->cp_dst_pos = src->cp_dst_pos;
16919 +@@ -1444,8 +1444,6 @@ static int dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
16920 + memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
16921 + memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
16922 + dst->ss_mnt = src->ss_mnt;
16923 +-
16924 +- return 0;
16925 + }
16926 +
16927 + static void cleanup_async_copy(struct nfsd4_copy *copy)
16928 +@@ -1537,11 +1535,9 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
16929 + if (!nfs4_init_copy_state(nn, copy))
16930 + goto out_err;
16931 + refcount_set(&async_copy->refcount, 1);
16932 +- memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
16933 +- sizeof(copy->cp_stateid));
16934 +- status = dup_copy_fields(copy, async_copy);
16935 +- if (status)
16936 +- goto out_err;
16937 ++ memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.stid,
16938 ++ sizeof(copy->cp_res.cb_stateid));
16939 ++ dup_copy_fields(copy, async_copy);
16940 + async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
16941 + async_copy, "%s", "copy thread");
16942 + if (IS_ERR(async_copy->copy_task))
16943 +diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
16944 +index 89d5d59c7d7a4..e466c58f9ec4c 100644
16945 +--- a/fs/overlayfs/copy_up.c
16946 ++++ b/fs/overlayfs/copy_up.c
16947 +@@ -928,7 +928,7 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
16948 + static int ovl_copy_up_flags(struct dentry *dentry, int flags)
16949 + {
16950 + int err = 0;
16951 +- const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
16952 ++ const struct cred *old_cred;
16953 + bool disconnected = (dentry->d_flags & DCACHE_DISCONNECTED);
16954 +
16955 + /*
16956 +@@ -939,6 +939,7 @@ static int ovl_copy_up_flags(struct dentry *dentry, int flags)
16957 + if (WARN_ON(disconnected && d_is_dir(dentry)))
16958 + return -EIO;
16959 +
16960 ++ old_cred = ovl_override_creds(dentry->d_sb);
16961 + while (!err) {
16962 + struct dentry *next;
16963 + struct dentry *parent = NULL;
16964 +diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
16965 +index 9f7af98ae2005..e43dc68bd1b54 100644
16966 +--- a/fs/overlayfs/overlayfs.h
16967 ++++ b/fs/overlayfs/overlayfs.h
16968 +@@ -308,9 +308,6 @@ int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry,
16969 + enum ovl_xattr ox, const void *value, size_t size,
16970 + int xerr);
16971 + int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry);
16972 +-void ovl_set_flag(unsigned long flag, struct inode *inode);
16973 +-void ovl_clear_flag(unsigned long flag, struct inode *inode);
16974 +-bool ovl_test_flag(unsigned long flag, struct inode *inode);
16975 + bool ovl_inuse_trylock(struct dentry *dentry);
16976 + void ovl_inuse_unlock(struct dentry *dentry);
16977 + bool ovl_is_inuse(struct dentry *dentry);
16978 +@@ -324,6 +321,21 @@ char *ovl_get_redirect_xattr(struct ovl_fs *ofs, struct dentry *dentry,
16979 + int padding);
16980 + int ovl_sync_status(struct ovl_fs *ofs);
16981 +
16982 ++static inline void ovl_set_flag(unsigned long flag, struct inode *inode)
16983 ++{
16984 ++ set_bit(flag, &OVL_I(inode)->flags);
16985 ++}
16986 ++
16987 ++static inline void ovl_clear_flag(unsigned long flag, struct inode *inode)
16988 ++{
16989 ++ clear_bit(flag, &OVL_I(inode)->flags);
16990 ++}
16991 ++
16992 ++static inline bool ovl_test_flag(unsigned long flag, struct inode *inode)
16993 ++{
16994 ++ return test_bit(flag, &OVL_I(inode)->flags);
16995 ++}
16996 ++
16997 + static inline bool ovl_is_impuredir(struct super_block *sb,
16998 + struct dentry *dentry)
16999 + {
17000 +@@ -427,6 +439,18 @@ int ovl_workdir_cleanup(struct inode *dir, struct vfsmount *mnt,
17001 + struct dentry *dentry, int level);
17002 + int ovl_indexdir_cleanup(struct ovl_fs *ofs);
17003 +
17004 ++/*
17005 ++ * Can we iterate real dir directly?
17006 ++ *
17007 ++ * Non-merge dir may contain whiteouts from a time it was a merge upper, before
17008 ++ * lower dir was removed under it and possibly before it was rotated from upper
17009 ++ * to lower layer.
17010 ++ */
17011 ++static inline bool ovl_dir_is_real(struct dentry *dir)
17012 ++{
17013 ++ return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
17014 ++}
17015 ++
17016 + /* inode.c */
17017 + int ovl_set_nlink_upper(struct dentry *dentry);
17018 + int ovl_set_nlink_lower(struct dentry *dentry);
17019 +diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
17020 +index f404a78e6b607..cc1e802570644 100644
17021 +--- a/fs/overlayfs/readdir.c
17022 ++++ b/fs/overlayfs/readdir.c
17023 +@@ -319,18 +319,6 @@ static inline int ovl_dir_read(struct path *realpath,
17024 + return err;
17025 + }
17026 +
17027 +-/*
17028 +- * Can we iterate real dir directly?
17029 +- *
17030 +- * Non-merge dir may contain whiteouts from a time it was a merge upper, before
17031 +- * lower dir was removed under it and possibly before it was rotated from upper
17032 +- * to lower layer.
17033 +- */
17034 +-static bool ovl_dir_is_real(struct dentry *dir)
17035 +-{
17036 +- return !ovl_test_flag(OVL_WHITEOUTS, d_inode(dir));
17037 +-}
17038 +-
17039 + static void ovl_dir_reset(struct file *file)
17040 + {
17041 + struct ovl_dir_file *od = file->private_data;
17042 +diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
17043 +index 6e7b8c882045c..e8b14d2c180c6 100644
17044 +--- a/fs/overlayfs/util.c
17045 ++++ b/fs/overlayfs/util.c
17046 +@@ -419,18 +419,20 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
17047 + }
17048 + }
17049 +
17050 +-static void ovl_dentry_version_inc(struct dentry *dentry, bool impurity)
17051 ++static void ovl_dir_version_inc(struct dentry *dentry, bool impurity)
17052 + {
17053 + struct inode *inode = d_inode(dentry);
17054 +
17055 + WARN_ON(!inode_is_locked(inode));
17056 ++ WARN_ON(!d_is_dir(dentry));
17057 + /*
17058 +- * Version is used by readdir code to keep cache consistent. For merge
17059 +- * dirs all changes need to be noted. For non-merge dirs, cache only
17060 +- * contains impure (ones which have been copied up and have origins)
17061 +- * entries, so only need to note changes to impure entries.
17062 ++ * Version is used by readdir code to keep cache consistent.
17063 ++ * For merge dirs (or dirs with origin) all changes need to be noted.
17064 ++ * For non-merge dirs, cache contains only impure entries (i.e. ones
17065 ++ * which have been copied up and have origins), so only need to note
17066 ++ * changes to impure entries.
17067 + */
17068 +- if (OVL_TYPE_MERGE(ovl_path_type(dentry)) || impurity)
17069 ++ if (!ovl_dir_is_real(dentry) || impurity)
17070 + OVL_I(inode)->version++;
17071 + }
17072 +
17073 +@@ -439,7 +441,7 @@ void ovl_dir_modified(struct dentry *dentry, bool impurity)
17074 + /* Copy mtime/ctime */
17075 + ovl_copyattr(d_inode(ovl_dentry_upper(dentry)), d_inode(dentry));
17076 +
17077 +- ovl_dentry_version_inc(dentry, impurity);
17078 ++ ovl_dir_version_inc(dentry, impurity);
17079 + }
17080 +
17081 + u64 ovl_dentry_version_get(struct dentry *dentry)
17082 +@@ -634,21 +636,6 @@ int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry)
17083 + return err;
17084 + }
17085 +
17086 +-void ovl_set_flag(unsigned long flag, struct inode *inode)
17087 +-{
17088 +- set_bit(flag, &OVL_I(inode)->flags);
17089 +-}
17090 +-
17091 +-void ovl_clear_flag(unsigned long flag, struct inode *inode)
17092 +-{
17093 +- clear_bit(flag, &OVL_I(inode)->flags);
17094 +-}
17095 +-
17096 +-bool ovl_test_flag(unsigned long flag, struct inode *inode)
17097 +-{
17098 +- return test_bit(flag, &OVL_I(inode)->flags);
17099 +-}
17100 +-
17101 + /**
17102 + * Caller must hold a reference to inode to prevent it from being freed while
17103 + * it is marked inuse.
17104 +diff --git a/fs/proc/array.c b/fs/proc/array.c
17105 +index 65ec2029fa802..18a4588c35be6 100644
17106 +--- a/fs/proc/array.c
17107 ++++ b/fs/proc/array.c
17108 +@@ -341,8 +341,10 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p)
17109 + seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p));
17110 + #ifdef CONFIG_SECCOMP
17111 + seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode);
17112 ++#ifdef CONFIG_SECCOMP_FILTER
17113 + seq_put_decimal_ull(m, "\nSeccomp_filters:\t",
17114 + atomic_read(&p->seccomp.filter_count));
17115 ++#endif
17116 + #endif
17117 + seq_puts(m, "\nSpeculation_Store_Bypass:\t");
17118 + switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) {
17119 +diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
17120 +index fd8e6418a0d31..96ac7e562b871 100644
17121 +--- a/fs/xfs/libxfs/xfs_attr.c
17122 ++++ b/fs/xfs/libxfs/xfs_attr.c
17123 +@@ -928,6 +928,7 @@ restart:
17124 + * Search to see if name already exists, and get back a pointer
17125 + * to where it should go.
17126 + */
17127 ++ error = 0;
17128 + retval = xfs_attr_node_hasname(args, &state);
17129 + if (retval != -ENOATTR && retval != -EEXIST)
17130 + goto out;
17131 +diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
17132 +index 064e52ca52480..196aa769f2968 100644
17133 +--- a/include/crypto/internal/poly1305.h
17134 ++++ b/include/crypto/internal/poly1305.h
17135 +@@ -18,7 +18,8 @@
17136 + * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
17137 + */
17138 +
17139 +-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
17140 ++void poly1305_core_setkey(struct poly1305_core_key *key,
17141 ++ const u8 raw_key[POLY1305_BLOCK_SIZE]);
17142 + static inline void poly1305_core_init(struct poly1305_state *state)
17143 + {
17144 + *state = (struct poly1305_state){};
17145 +diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
17146 +index f1f67fc749cf4..090692ec3bc73 100644
17147 +--- a/include/crypto/poly1305.h
17148 ++++ b/include/crypto/poly1305.h
17149 +@@ -58,8 +58,10 @@ struct poly1305_desc_ctx {
17150 + };
17151 + };
17152 +
17153 +-void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
17154 +-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
17155 ++void poly1305_init_arch(struct poly1305_desc_ctx *desc,
17156 ++ const u8 key[POLY1305_KEY_SIZE]);
17157 ++void poly1305_init_generic(struct poly1305_desc_ctx *desc,
17158 ++ const u8 key[POLY1305_KEY_SIZE]);
17159 +
17160 + static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
17161 + {
17162 +diff --git a/include/keys/trusted-type.h b/include/keys/trusted-type.h
17163 +index a94c03a61d8f9..b2ed3481c6a02 100644
17164 +--- a/include/keys/trusted-type.h
17165 ++++ b/include/keys/trusted-type.h
17166 +@@ -30,6 +30,7 @@ struct trusted_key_options {
17167 + uint16_t keytype;
17168 + uint32_t keyhandle;
17169 + unsigned char keyauth[TPM_DIGEST_SIZE];
17170 ++ uint32_t blobauth_len;
17171 + unsigned char blobauth[TPM_DIGEST_SIZE];
17172 + uint32_t pcrinfo_len;
17173 + unsigned char pcrinfo[MAX_PCRINFO_SIZE];
17174 +diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
17175 +index 41a1bab98b7e1..4930ece07fd88 100644
17176 +--- a/include/linux/firmware/xlnx-zynqmp.h
17177 ++++ b/include/linux/firmware/xlnx-zynqmp.h
17178 +@@ -354,111 +354,131 @@ int zynqmp_pm_read_pggs(u32 index, u32 *value);
17179 + int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype);
17180 + int zynqmp_pm_set_boot_health_status(u32 value);
17181 + #else
17182 +-static inline struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void)
17183 +-{
17184 +- return ERR_PTR(-ENODEV);
17185 +-}
17186 + static inline int zynqmp_pm_get_api_version(u32 *version)
17187 + {
17188 + return -ENODEV;
17189 + }
17190 ++
17191 + static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version)
17192 + {
17193 + return -ENODEV;
17194 + }
17195 ++
17196 + static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata,
17197 + u32 *out)
17198 + {
17199 + return -ENODEV;
17200 + }
17201 ++
17202 + static inline int zynqmp_pm_clock_enable(u32 clock_id)
17203 + {
17204 + return -ENODEV;
17205 + }
17206 ++
17207 + static inline int zynqmp_pm_clock_disable(u32 clock_id)
17208 + {
17209 + return -ENODEV;
17210 + }
17211 ++
17212 + static inline int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state)
17213 + {
17214 + return -ENODEV;
17215 + }
17216 ++
17217 + static inline int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider)
17218 + {
17219 + return -ENODEV;
17220 + }
17221 ++
17222 + static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider)
17223 + {
17224 + return -ENODEV;
17225 + }
17226 ++
17227 + static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate)
17228 + {
17229 + return -ENODEV;
17230 + }
17231 ++
17232 + static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate)
17233 + {
17234 + return -ENODEV;
17235 + }
17236 ++
17237 + static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id)
17238 + {
17239 + return -ENODEV;
17240 + }
17241 ++
17242 + static inline int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id)
17243 + {
17244 + return -ENODEV;
17245 + }
17246 ++
17247 + static inline int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode)
17248 + {
17249 + return -ENODEV;
17250 + }
17251 ++
17252 + static inline int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode)
17253 + {
17254 + return -ENODEV;
17255 + }
17256 ++
17257 + static inline int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data)
17258 + {
17259 + return -ENODEV;
17260 + }
17261 ++
17262 + static inline int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data)
17263 + {
17264 + return -ENODEV;
17265 + }
17266 ++
17267 + static inline int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
17268 + {
17269 + return -ENODEV;
17270 + }
17271 ++
17272 + static inline int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
17273 + {
17274 + return -ENODEV;
17275 + }
17276 ++
17277 + static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset,
17278 + const enum zynqmp_pm_reset_action assert_flag)
17279 + {
17280 + return -ENODEV;
17281 + }
17282 ++
17283 + static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset,
17284 + u32 *status)
17285 + {
17286 + return -ENODEV;
17287 + }
17288 ++
17289 + static inline int zynqmp_pm_init_finalize(void)
17290 + {
17291 + return -ENODEV;
17292 + }
17293 ++
17294 + static inline int zynqmp_pm_set_suspend_mode(u32 mode)
17295 + {
17296 + return -ENODEV;
17297 + }
17298 ++
17299 + static inline int zynqmp_pm_request_node(const u32 node, const u32 capabilities,
17300 + const u32 qos,
17301 + const enum zynqmp_pm_request_ack ack)
17302 + {
17303 + return -ENODEV;
17304 + }
17305 ++
17306 + static inline int zynqmp_pm_release_node(const u32 node)
17307 + {
17308 + return -ENODEV;
17309 + }
17310 ++
17311 + static inline int zynqmp_pm_set_requirement(const u32 node,
17312 + const u32 capabilities,
17313 + const u32 qos,
17314 +@@ -466,39 +486,48 @@ static inline int zynqmp_pm_set_requirement(const u32 node,
17315 + {
17316 + return -ENODEV;
17317 + }
17318 ++
17319 + static inline int zynqmp_pm_aes_engine(const u64 address, u32 *out)
17320 + {
17321 + return -ENODEV;
17322 + }
17323 ++
17324 + static inline int zynqmp_pm_fpga_load(const u64 address, const u32 size,
17325 + const u32 flags)
17326 + {
17327 + return -ENODEV;
17328 + }
17329 ++
17330 + static inline int zynqmp_pm_fpga_get_status(u32 *value)
17331 + {
17332 + return -ENODEV;
17333 + }
17334 ++
17335 + static inline int zynqmp_pm_write_ggs(u32 index, u32 value)
17336 + {
17337 + return -ENODEV;
17338 + }
17339 ++
17340 + static inline int zynqmp_pm_read_ggs(u32 index, u32 *value)
17341 + {
17342 + return -ENODEV;
17343 + }
17344 ++
17345 + static inline int zynqmp_pm_write_pggs(u32 index, u32 value)
17346 + {
17347 + return -ENODEV;
17348 + }
17349 ++
17350 + static inline int zynqmp_pm_read_pggs(u32 index, u32 *value)
17351 + {
17352 + return -ENODEV;
17353 + }
17354 ++
17355 + static inline int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype)
17356 + {
17357 + return -ENODEV;
17358 + }
17359 ++
17360 + static inline int zynqmp_pm_set_boot_health_status(u32 value)
17361 + {
17362 + return -ENODEV;
17363 +diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
17364 +index 4a7e295c36401..8e144306e2622 100644
17365 +--- a/include/linux/gpio/driver.h
17366 ++++ b/include/linux/gpio/driver.h
17367 +@@ -637,8 +637,17 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gc,
17368 + bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
17369 + unsigned int offset);
17370 +
17371 ++#ifdef CONFIG_GPIOLIB_IRQCHIP
17372 + int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
17373 + struct irq_domain *domain);
17374 ++#else
17375 ++static inline int gpiochip_irqchip_add_domain(struct gpio_chip *gc,
17376 ++ struct irq_domain *domain)
17377 ++{
17378 ++ WARN_ON(1);
17379 ++ return -EINVAL;
17380 ++}
17381 ++#endif
17382 +
17383 + #ifdef CONFIG_LOCKDEP
17384 +
17385 +diff --git a/include/linux/hid.h b/include/linux/hid.h
17386 +index 58684657960bf..8578db50ad734 100644
17387 +--- a/include/linux/hid.h
17388 ++++ b/include/linux/hid.h
17389 +@@ -262,6 +262,8 @@ struct hid_item {
17390 + #define HID_CP_SELECTION 0x000c0080
17391 + #define HID_CP_MEDIASELECTION 0x000c0087
17392 + #define HID_CP_SELECTDISC 0x000c00ba
17393 ++#define HID_CP_VOLUMEUP 0x000c00e9
17394 ++#define HID_CP_VOLUMEDOWN 0x000c00ea
17395 + #define HID_CP_PLAYBACKSPEED 0x000c00f1
17396 + #define HID_CP_PROXIMITY 0x000c0109
17397 + #define HID_CP_SPEAKERSYSTEM 0x000c0160
17398 +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
17399 +index 94522685a0d94..c00ee3458a919 100644
17400 +--- a/include/linux/intel-iommu.h
17401 ++++ b/include/linux/intel-iommu.h
17402 +@@ -42,6 +42,8 @@
17403 +
17404 + #define DMA_FL_PTE_PRESENT BIT_ULL(0)
17405 + #define DMA_FL_PTE_US BIT_ULL(2)
17406 ++#define DMA_FL_PTE_ACCESS BIT_ULL(5)
17407 ++#define DMA_FL_PTE_DIRTY BIT_ULL(6)
17408 + #define DMA_FL_PTE_XD BIT_ULL(63)
17409 +
17410 + #define ADDR_WIDTH_5LEVEL (57)
17411 +@@ -367,6 +369,7 @@ enum {
17412 + /* PASID cache invalidation granu */
17413 + #define QI_PC_ALL_PASIDS 0
17414 + #define QI_PC_PASID_SEL 1
17415 ++#define QI_PC_GLOBAL 3
17416 +
17417 + #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
17418 + #define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
17419 +diff --git a/include/linux/iommu.h b/include/linux/iommu.h
17420 +index f11f5072af5dc..e90c267e7f3e1 100644
17421 +--- a/include/linux/iommu.h
17422 ++++ b/include/linux/iommu.h
17423 +@@ -544,7 +544,7 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
17424 + * structure can be rewritten.
17425 + */
17426 + if (gather->pgsize != size ||
17427 +- end < gather->start || start > gather->end) {
17428 ++ end + 1 < gather->start || start > gather->end + 1) {
17429 + if (gather->pgsize)
17430 + iommu_iotlb_sync(domain, gather);
17431 + gather->pgsize = size;
17432 +diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
17433 +index 7f2e2a09ebbd9..a2278b9ff57d2 100644
17434 +--- a/include/linux/kvm_host.h
17435 ++++ b/include/linux/kvm_host.h
17436 +@@ -190,8 +190,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
17437 + int len, void *val);
17438 + int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
17439 + int len, struct kvm_io_device *dev);
17440 +-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
17441 +- struct kvm_io_device *dev);
17442 ++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
17443 ++ struct kvm_io_device *dev);
17444 + struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
17445 + gpa_t addr);
17446 +
17447 +diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
17448 +index 77a2aada106dc..17f9cd5626c83 100644
17449 +--- a/include/linux/platform_device.h
17450 ++++ b/include/linux/platform_device.h
17451 +@@ -350,4 +350,7 @@ static inline int is_sh_early_platform_device(struct platform_device *pdev)
17452 + }
17453 + #endif /* CONFIG_SUPERH */
17454 +
17455 ++/* For now only SuperH uses it */
17456 ++void early_platform_cleanup(void);
17457 ++
17458 + #endif /* _PLATFORM_DEVICE_H_ */
17459 +diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
17460 +index b492ae00cc908..6c08a085367bf 100644
17461 +--- a/include/linux/pm_runtime.h
17462 ++++ b/include/linux/pm_runtime.h
17463 +@@ -265,7 +265,7 @@ static inline void pm_runtime_no_callbacks(struct device *dev) {}
17464 + static inline void pm_runtime_irq_safe(struct device *dev) {}
17465 + static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
17466 +
17467 +-static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
17468 ++static inline bool pm_runtime_has_no_callbacks(struct device *dev) { return false; }
17469 + static inline void pm_runtime_mark_last_busy(struct device *dev) {}
17470 + static inline void __pm_runtime_use_autosuspend(struct device *dev,
17471 + bool use) {}
17472 +diff --git a/include/linux/smp.h b/include/linux/smp.h
17473 +index 9f13966d3d92d..04f44e0aa2e0b 100644
17474 +--- a/include/linux/smp.h
17475 ++++ b/include/linux/smp.h
17476 +@@ -74,7 +74,7 @@ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
17477 + void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
17478 + void *info, bool wait, const struct cpumask *mask);
17479 +
17480 +-int smp_call_function_single_async(int cpu, call_single_data_t *csd);
17481 ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
17482 +
17483 + #ifdef CONFIG_SMP
17484 +
17485 +diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
17486 +index b390fdac15876..2d906b9c14992 100644
17487 +--- a/include/linux/spi/spi.h
17488 ++++ b/include/linux/spi/spi.h
17489 +@@ -511,6 +511,9 @@ struct spi_controller {
17490 +
17491 + #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
17492 +
17493 ++ /* flag indicating this is a non-devres managed controller */
17494 ++ bool devm_allocated;
17495 ++
17496 + /* flag indicating this is an SPI slave controller */
17497 + bool slave;
17498 +
17499 +diff --git a/include/linux/tty.h b/include/linux/tty.h
17500 +index bc8caac390fce..5972f43b9d5ae 100644
17501 +--- a/include/linux/tty.h
17502 ++++ b/include/linux/tty.h
17503 +@@ -303,7 +303,6 @@ struct tty_struct {
17504 + spinlock_t flow_lock;
17505 + /* Termios values are protected by the termios rwsem */
17506 + struct ktermios termios, termios_locked;
17507 +- struct termiox *termiox; /* May be NULL for unsupported */
17508 + char name[64];
17509 + struct pid *pgrp; /* Protected by ctrl lock */
17510 + /*
17511 +diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
17512 +index 358446247ccdc..2f719b471d524 100644
17513 +--- a/include/linux/tty_driver.h
17514 ++++ b/include/linux/tty_driver.h
17515 +@@ -224,19 +224,11 @@
17516 + * line). See tty_do_resize() if you need to wrap the standard method
17517 + * in your own logic - the usual case.
17518 + *
17519 +- * void (*set_termiox)(struct tty_struct *tty, struct termiox *new);
17520 +- *
17521 +- * Called when the device receives a termiox based ioctl. Passes down
17522 +- * the requested data from user space. This method will not be invoked
17523 +- * unless the tty also has a valid tty->termiox pointer.
17524 +- *
17525 +- * Optional: Called under the termios lock
17526 +- *
17527 + * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
17528 + *
17529 + * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
17530 + * structure to complete. This method is optional and will only be called
17531 +- * if provided (otherwise EINVAL will be returned).
17532 ++ * if provided (otherwise ENOTTY will be returned).
17533 + */
17534 +
17535 + #include <linux/export.h>
17536 +@@ -285,7 +277,6 @@ struct tty_operations {
17537 + int (*tiocmset)(struct tty_struct *tty,
17538 + unsigned int set, unsigned int clear);
17539 + int (*resize)(struct tty_struct *tty, struct winsize *ws);
17540 +- int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew);
17541 + int (*get_icount)(struct tty_struct *tty,
17542 + struct serial_icounter_struct *icount);
17543 + int (*get_serial)(struct tty_struct *tty, struct serial_struct *p);
17544 +diff --git a/include/linux/udp.h b/include/linux/udp.h
17545 +index aa84597bdc33c..ae58ff3b6b5b8 100644
17546 +--- a/include/linux/udp.h
17547 ++++ b/include/linux/udp.h
17548 +@@ -51,7 +51,9 @@ struct udp_sock {
17549 + * different encapsulation layer set
17550 + * this
17551 + */
17552 +- gro_enabled:1; /* Can accept GRO packets */
17553 ++ gro_enabled:1, /* Request GRO aggregation */
17554 ++ accept_udp_l4:1,
17555 ++ accept_udp_fraglist:1;
17556 + /*
17557 + * Following member retains the information to create a UDP header
17558 + * when the socket is uncorked.
17559 +@@ -131,8 +133,16 @@ static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
17560 +
17561 + static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
17562 + {
17563 +- return !udp_sk(sk)->gro_enabled && skb_is_gso(skb) &&
17564 +- skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4;
17565 ++ if (!skb_is_gso(skb))
17566 ++ return false;
17567 ++
17568 ++ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
17569 ++ return true;
17570 ++
17571 ++ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
17572 ++ return true;
17573 ++
17574 ++ return false;
17575 + }
17576 +
17577 + #define udp_portaddr_for_each_entry(__sk, list) \
17578 +diff --git a/include/net/addrconf.h b/include/net/addrconf.h
17579 +index 18f783dcd55fa..78ea3e332688f 100644
17580 +--- a/include/net/addrconf.h
17581 ++++ b/include/net/addrconf.h
17582 +@@ -233,7 +233,6 @@ void ipv6_mc_unmap(struct inet6_dev *idev);
17583 + void ipv6_mc_remap(struct inet6_dev *idev);
17584 + void ipv6_mc_init_dev(struct inet6_dev *idev);
17585 + void ipv6_mc_destroy_dev(struct inet6_dev *idev);
17586 +-int ipv6_mc_check_icmpv6(struct sk_buff *skb);
17587 + int ipv6_mc_check_mld(struct sk_buff *skb);
17588 + void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp);
17589 +
17590 +diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
17591 +index 9873e1c8cd163..df611c8b6b595 100644
17592 +--- a/include/net/bluetooth/hci_core.h
17593 ++++ b/include/net/bluetooth/hci_core.h
17594 +@@ -669,6 +669,7 @@ struct hci_chan {
17595 + struct sk_buff_head data_q;
17596 + unsigned int sent;
17597 + __u8 state;
17598 ++ bool amp;
17599 + };
17600 +
17601 + struct hci_conn_params {
17602 +diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
17603 +index 1d34fe154fe0b..434a6158852f3 100644
17604 +--- a/include/net/netfilter/nf_tables_offload.h
17605 ++++ b/include/net/netfilter/nf_tables_offload.h
17606 +@@ -4,11 +4,16 @@
17607 + #include <net/flow_offload.h>
17608 + #include <net/netfilter/nf_tables.h>
17609 +
17610 ++enum nft_offload_reg_flags {
17611 ++ NFT_OFFLOAD_F_NETWORK2HOST = (1 << 0),
17612 ++};
17613 ++
17614 + struct nft_offload_reg {
17615 + u32 key;
17616 + u32 len;
17617 + u32 base_offset;
17618 + u32 offset;
17619 ++ u32 flags;
17620 + struct nft_data data;
17621 + struct nft_data mask;
17622 + };
17623 +@@ -45,6 +50,7 @@ struct nft_flow_key {
17624 + struct flow_dissector_key_ports tp;
17625 + struct flow_dissector_key_ip ip;
17626 + struct flow_dissector_key_vlan vlan;
17627 ++ struct flow_dissector_key_vlan cvlan;
17628 + struct flow_dissector_key_eth_addrs eth_addrs;
17629 + struct flow_dissector_key_meta meta;
17630 + } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
17631 +@@ -71,13 +77,17 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rul
17632 + void nft_flow_rule_destroy(struct nft_flow_rule *flow);
17633 + int nft_flow_rule_offload_commit(struct net *net);
17634 +
17635 +-#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
17636 ++#define NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, __flags) \
17637 + (__reg)->base_offset = \
17638 + offsetof(struct nft_flow_key, __base); \
17639 + (__reg)->offset = \
17640 + offsetof(struct nft_flow_key, __base.__field); \
17641 + (__reg)->len = __len; \
17642 + (__reg)->key = __key; \
17643 ++ (__reg)->flags = __flags;
17644 ++
17645 ++#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
17646 ++ NFT_OFFLOAD_MATCH_FLAGS(__key, __base, __field, __len, __reg, 0)
17647 +
17648 + #define NFT_OFFLOAD_MATCH_EXACT(__key, __base, __field, __len, __reg) \
17649 + NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
17650 +diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
17651 +index 3d884d68eb301..c07caf7b40db6 100644
17652 +--- a/include/uapi/linux/if_packet.h
17653 ++++ b/include/uapi/linux/if_packet.h
17654 +@@ -2,6 +2,7 @@
17655 + #ifndef __LINUX_IF_PACKET_H
17656 + #define __LINUX_IF_PACKET_H
17657 +
17658 ++#include <asm/byteorder.h>
17659 + #include <linux/types.h>
17660 +
17661 + struct sockaddr_pkt {
17662 +@@ -296,6 +297,17 @@ struct packet_mreq {
17663 + unsigned char mr_address[8];
17664 + };
17665 +
17666 ++struct fanout_args {
17667 ++#if defined(__LITTLE_ENDIAN_BITFIELD)
17668 ++ __u16 id;
17669 ++ __u16 type_flags;
17670 ++#else
17671 ++ __u16 type_flags;
17672 ++ __u16 id;
17673 ++#endif
17674 ++ __u32 max_num_members;
17675 ++};
17676 ++
17677 + #define PACKET_MR_MULTICAST 0
17678 + #define PACKET_MR_PROMISC 1
17679 + #define PACKET_MR_ALLMULTI 2
17680 +diff --git a/include/uapi/linux/tty_flags.h b/include/uapi/linux/tty_flags.h
17681 +index 900a32e634247..6a3ac496a56c1 100644
17682 +--- a/include/uapi/linux/tty_flags.h
17683 ++++ b/include/uapi/linux/tty_flags.h
17684 +@@ -39,7 +39,7 @@
17685 + * WARNING: These flags are no longer used and have been superceded by the
17686 + * TTY_PORT_ flags in the iflags field (and not userspace-visible)
17687 + */
17688 +-#ifndef _KERNEL_
17689 ++#ifndef __KERNEL__
17690 + #define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
17691 + #define ASYNCB_SUSPENDED 30 /* Serial port is suspended */
17692 + #define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
17693 +@@ -81,7 +81,7 @@
17694 + #define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
17695 + #define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
17696 +
17697 +-#ifndef _KERNEL_
17698 ++#ifndef __KERNEL__
17699 + /* These flags are no longer used (and were always masked from userspace) */
17700 + #define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
17701 + #define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
17702 +diff --git a/init/init_task.c b/init/init_task.c
17703 +index 16d14c2ebb552..5fa18ed59d33e 100644
17704 +--- a/init/init_task.c
17705 ++++ b/init/init_task.c
17706 +@@ -210,7 +210,7 @@ struct task_struct init_task
17707 + #ifdef CONFIG_SECURITY
17708 + .security = NULL,
17709 + #endif
17710 +-#ifdef CONFIG_SECCOMP
17711 ++#ifdef CONFIG_SECCOMP_FILTER
17712 + .seccomp = { .filter_count = ATOMIC_INIT(0) },
17713 + #endif
17714 + };
17715 +diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
17716 +index 31cb04a4dd2d5..add0b34f2b340 100644
17717 +--- a/kernel/bpf/ringbuf.c
17718 ++++ b/kernel/bpf/ringbuf.c
17719 +@@ -240,25 +240,20 @@ static int ringbuf_map_get_next_key(struct bpf_map *map, void *key,
17720 + return -ENOTSUPP;
17721 + }
17722 +
17723 +-static size_t bpf_ringbuf_mmap_page_cnt(const struct bpf_ringbuf *rb)
17724 +-{
17725 +- size_t data_pages = (rb->mask + 1) >> PAGE_SHIFT;
17726 +-
17727 +- /* consumer page + producer page + 2 x data pages */
17728 +- return RINGBUF_POS_PAGES + 2 * data_pages;
17729 +-}
17730 +-
17731 + static int ringbuf_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
17732 + {
17733 + struct bpf_ringbuf_map *rb_map;
17734 +- size_t mmap_sz;
17735 +
17736 + rb_map = container_of(map, struct bpf_ringbuf_map, map);
17737 +- mmap_sz = bpf_ringbuf_mmap_page_cnt(rb_map->rb) << PAGE_SHIFT;
17738 +-
17739 +- if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) > mmap_sz)
17740 +- return -EINVAL;
17741 +
17742 ++ if (vma->vm_flags & VM_WRITE) {
17743 ++ /* allow writable mapping for the consumer_pos only */
17744 ++ if (vma->vm_pgoff != 0 || vma->vm_end - vma->vm_start != PAGE_SIZE)
17745 ++ return -EPERM;
17746 ++ } else {
17747 ++ vma->vm_flags &= ~VM_MAYWRITE;
17748 ++ }
17749 ++ /* remap_vmalloc_range() checks size and offset constraints */
17750 + return remap_vmalloc_range(vma, rb_map->rb,
17751 + vma->vm_pgoff + RINGBUF_PGOFF);
17752 + }
17753 +@@ -334,6 +329,9 @@ static void *__bpf_ringbuf_reserve(struct bpf_ringbuf *rb, u64 size)
17754 + return NULL;
17755 +
17756 + len = round_up(size + BPF_RINGBUF_HDR_SZ, 8);
17757 ++ if (len > rb->mask + 1)
17758 ++ return NULL;
17759 ++
17760 + cons_pos = smp_load_acquire(&rb->consumer_pos);
17761 +
17762 + if (in_nmi()) {
17763 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
17764 +index b6656d181c9e7..69730943eaf80 100644
17765 +--- a/kernel/bpf/verifier.c
17766 ++++ b/kernel/bpf/verifier.c
17767 +@@ -1303,9 +1303,7 @@ static bool __reg64_bound_s32(s64 a)
17768 +
17769 + static bool __reg64_bound_u32(u64 a)
17770 + {
17771 +- if (a > U32_MIN && a < U32_MAX)
17772 +- return true;
17773 +- return false;
17774 ++ return a > U32_MIN && a < U32_MAX;
17775 + }
17776 +
17777 + static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
17778 +@@ -1316,10 +1314,10 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
17779 + reg->s32_min_value = (s32)reg->smin_value;
17780 + reg->s32_max_value = (s32)reg->smax_value;
17781 + }
17782 +- if (__reg64_bound_u32(reg->umin_value))
17783 ++ if (__reg64_bound_u32(reg->umin_value) && __reg64_bound_u32(reg->umax_value)) {
17784 + reg->u32_min_value = (u32)reg->umin_value;
17785 +- if (__reg64_bound_u32(reg->umax_value))
17786 + reg->u32_max_value = (u32)reg->umax_value;
17787 ++ }
17788 +
17789 + /* Intersecting with the old var_off might have improved our bounds
17790 + * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
17791 +@@ -6343,11 +6341,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
17792 + s32 smin_val = src_reg->s32_min_value;
17793 + u32 umax_val = src_reg->u32_max_value;
17794 +
17795 +- /* Assuming scalar64_min_max_and will be called so its safe
17796 +- * to skip updating register for known 32-bit case.
17797 +- */
17798 +- if (src_known && dst_known)
17799 ++ if (src_known && dst_known) {
17800 ++ __mark_reg32_known(dst_reg, var32_off.value);
17801 + return;
17802 ++ }
17803 +
17804 + /* We get our minimum from the var_off, since that's inherently
17805 + * bitwise. Our maximum is the minimum of the operands' maxima.
17806 +@@ -6367,7 +6364,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
17807 + dst_reg->s32_min_value = dst_reg->u32_min_value;
17808 + dst_reg->s32_max_value = dst_reg->u32_max_value;
17809 + }
17810 +-
17811 + }
17812 +
17813 + static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
17814 +@@ -6414,11 +6410,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
17815 + s32 smin_val = src_reg->s32_min_value;
17816 + u32 umin_val = src_reg->u32_min_value;
17817 +
17818 +- /* Assuming scalar64_min_max_or will be called so it is safe
17819 +- * to skip updating register for known case.
17820 +- */
17821 +- if (src_known && dst_known)
17822 ++ if (src_known && dst_known) {
17823 ++ __mark_reg32_known(dst_reg, var32_off.value);
17824 + return;
17825 ++ }
17826 +
17827 + /* We get our maximum from the var_off, and our minimum is the
17828 + * maximum of the operands' minima
17829 +@@ -6483,11 +6478,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
17830 + struct tnum var32_off = tnum_subreg(dst_reg->var_off);
17831 + s32 smin_val = src_reg->s32_min_value;
17832 +
17833 +- /* Assuming scalar64_min_max_xor will be called so it is safe
17834 +- * to skip updating register for known case.
17835 +- */
17836 +- if (src_known && dst_known)
17837 ++ if (src_known && dst_known) {
17838 ++ __mark_reg32_known(dst_reg, var32_off.value);
17839 + return;
17840 ++ }
17841 +
17842 + /* We get both minimum and maximum from the var32_off. */
17843 + dst_reg->u32_min_value = var32_off.value;
17844 +diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
17845 +index 8a5cc76ecac96..61e250cdd7c9c 100644
17846 +--- a/kernel/rcu/tree.c
17847 ++++ b/kernel/rcu/tree.c
17848 +@@ -1019,7 +1019,6 @@ noinstr void rcu_nmi_enter(void)
17849 + } else if (!in_nmi()) {
17850 + instrumentation_begin();
17851 + rcu_irq_enter_check_tick();
17852 +- instrumentation_end();
17853 + } else {
17854 + instrumentation_begin();
17855 + }
17856 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
17857 +index 3a150445e0cba..3c3554d9ee50b 100644
17858 +--- a/kernel/sched/core.c
17859 ++++ b/kernel/sched/core.c
17860 +@@ -321,7 +321,7 @@ void update_rq_clock(struct rq *rq)
17861 + }
17862 +
17863 + static inline void
17864 +-rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func)
17865 ++rq_csd_init(struct rq *rq, struct __call_single_data *csd, smp_call_func_t func)
17866 + {
17867 + csd->flags = 0;
17868 + csd->func = func;
17869 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
17870 +index 2357921580f9c..6264584b51c25 100644
17871 +--- a/kernel/sched/debug.c
17872 ++++ b/kernel/sched/debug.c
17873 +@@ -8,8 +8,6 @@
17874 + */
17875 + #include "sched.h"
17876 +
17877 +-static DEFINE_SPINLOCK(sched_debug_lock);
17878 +-
17879 + /*
17880 + * This allows printing both to /proc/sched_debug and
17881 + * to the console
17882 +@@ -470,16 +468,37 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
17883 + #endif
17884 +
17885 + #ifdef CONFIG_CGROUP_SCHED
17886 ++static DEFINE_SPINLOCK(sched_debug_lock);
17887 + static char group_path[PATH_MAX];
17888 +
17889 +-static char *task_group_path(struct task_group *tg)
17890 ++static void task_group_path(struct task_group *tg, char *path, int plen)
17891 + {
17892 +- if (autogroup_path(tg, group_path, PATH_MAX))
17893 +- return group_path;
17894 ++ if (autogroup_path(tg, path, plen))
17895 ++ return;
17896 +
17897 +- cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
17898 ++ cgroup_path(tg->css.cgroup, path, plen);
17899 ++}
17900 +
17901 +- return group_path;
17902 ++/*
17903 ++ * Only 1 SEQ_printf_task_group_path() caller can use the full length
17904 ++ * group_path[] for cgroup path. Other simultaneous callers will have
17905 ++ * to use a shorter stack buffer. A "..." suffix is appended at the end
17906 ++ * of the stack buffer so that it will show up in case the output length
17907 ++ * matches the given buffer size to indicate possible path name truncation.
17908 ++ */
17909 ++#define SEQ_printf_task_group_path(m, tg, fmt...) \
17910 ++{ \
17911 ++ if (spin_trylock(&sched_debug_lock)) { \
17912 ++ task_group_path(tg, group_path, sizeof(group_path)); \
17913 ++ SEQ_printf(m, fmt, group_path); \
17914 ++ spin_unlock(&sched_debug_lock); \
17915 ++ } else { \
17916 ++ char buf[128]; \
17917 ++ char *bufend = buf + sizeof(buf) - 3; \
17918 ++ task_group_path(tg, buf, bufend - buf); \
17919 ++ strcpy(bufend - 1, "..."); \
17920 ++ SEQ_printf(m, fmt, buf); \
17921 ++ } \
17922 + }
17923 + #endif
17924 +
17925 +@@ -506,7 +525,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
17926 + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p));
17927 + #endif
17928 + #ifdef CONFIG_CGROUP_SCHED
17929 +- SEQ_printf(m, " %s", task_group_path(task_group(p)));
17930 ++ SEQ_printf_task_group_path(m, task_group(p), " %s")
17931 + #endif
17932 +
17933 + SEQ_printf(m, "\n");
17934 +@@ -543,7 +562,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
17935 +
17936 + #ifdef CONFIG_FAIR_GROUP_SCHED
17937 + SEQ_printf(m, "\n");
17938 +- SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
17939 ++ SEQ_printf_task_group_path(m, cfs_rq->tg, "cfs_rq[%d]:%s\n", cpu);
17940 + #else
17941 + SEQ_printf(m, "\n");
17942 + SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
17943 +@@ -614,7 +633,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
17944 + {
17945 + #ifdef CONFIG_RT_GROUP_SCHED
17946 + SEQ_printf(m, "\n");
17947 +- SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
17948 ++ SEQ_printf_task_group_path(m, rt_rq->tg, "rt_rq[%d]:%s\n", cpu);
17949 + #else
17950 + SEQ_printf(m, "\n");
17951 + SEQ_printf(m, "rt_rq[%d]:\n", cpu);
17952 +@@ -666,7 +685,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
17953 + static void print_cpu(struct seq_file *m, int cpu)
17954 + {
17955 + struct rq *rq = cpu_rq(cpu);
17956 +- unsigned long flags;
17957 +
17958 + #ifdef CONFIG_X86
17959 + {
17960 +@@ -717,13 +735,11 @@ do { \
17961 + }
17962 + #undef P
17963 +
17964 +- spin_lock_irqsave(&sched_debug_lock, flags);
17965 + print_cfs_stats(m, cpu);
17966 + print_rt_stats(m, cpu);
17967 + print_dl_stats(m, cpu);
17968 +
17969 + print_rq(m, rq, cpu);
17970 +- spin_unlock_irqrestore(&sched_debug_lock, flags);
17971 + SEQ_printf(m, "\n");
17972 + }
17973 +
17974 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
17975 +index a0239649c7419..c80d1a039d19a 100644
17976 +--- a/kernel/sched/fair.c
17977 ++++ b/kernel/sched/fair.c
17978 +@@ -7735,8 +7735,7 @@ static int detach_tasks(struct lb_env *env)
17979 + * scheduler fails to find a good waiting task to
17980 + * migrate.
17981 + */
17982 +-
17983 +- if ((load >> env->sd->nr_balance_failed) > env->imbalance)
17984 ++ if (shr_bound(load, env->sd->nr_balance_failed) > env->imbalance)
17985 + goto next;
17986 +
17987 + env->imbalance -= load;
17988 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
17989 +index fac1b121d1130..fdebfcbdfca94 100644
17990 +--- a/kernel/sched/sched.h
17991 ++++ b/kernel/sched/sched.h
17992 +@@ -205,6 +205,13 @@ static inline void update_avg(u64 *avg, u64 sample)
17993 + *avg += diff / 8;
17994 + }
17995 +
17996 ++/*
17997 ++ * Shifting a value by an exponent greater *or equal* to the size of said value
17998 ++ * is UB; cap at size-1.
17999 ++ */
18000 ++#define shr_bound(val, shift) \
18001 ++ (val >> min_t(typeof(shift), shift, BITS_PER_TYPE(typeof(val)) - 1))
18002 ++
18003 + /*
18004 + * !! For sched_setattr_nocheck() (kernel) only !!
18005 + *
18006 +diff --git a/kernel/smp.c b/kernel/smp.c
18007 +index 25240fb2df949..f73a597c8e4cf 100644
18008 +--- a/kernel/smp.c
18009 ++++ b/kernel/smp.c
18010 +@@ -110,7 +110,7 @@ static DEFINE_PER_CPU(void *, cur_csd_info);
18011 + static atomic_t csd_bug_count = ATOMIC_INIT(0);
18012 +
18013 + /* Record current CSD work for current CPU, NULL to erase. */
18014 +-static void csd_lock_record(call_single_data_t *csd)
18015 ++static void csd_lock_record(struct __call_single_data *csd)
18016 + {
18017 + if (!csd) {
18018 + smp_mb(); /* NULL cur_csd after unlock. */
18019 +@@ -125,7 +125,7 @@ static void csd_lock_record(call_single_data_t *csd)
18020 + /* Or before unlock, as the case may be. */
18021 + }
18022 +
18023 +-static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
18024 ++static __always_inline int csd_lock_wait_getcpu(struct __call_single_data *csd)
18025 + {
18026 + unsigned int csd_type;
18027 +
18028 +@@ -140,7 +140,7 @@ static __always_inline int csd_lock_wait_getcpu(call_single_data_t *csd)
18029 + * the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
18030 + * so waiting on other types gets much less information.
18031 + */
18032 +-static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 ts0, u64 *ts1, int *bug_id)
18033 ++static __always_inline bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *ts1, int *bug_id)
18034 + {
18035 + int cpu = -1;
18036 + int cpux;
18037 +@@ -204,7 +204,7 @@ static __always_inline bool csd_lock_wait_toolong(call_single_data_t *csd, u64 t
18038 + * previous function call. For multi-cpu calls its even more interesting
18039 + * as we'll have to ensure no other cpu is observing our csd.
18040 + */
18041 +-static __always_inline void csd_lock_wait(call_single_data_t *csd)
18042 ++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
18043 + {
18044 + int bug_id = 0;
18045 + u64 ts0, ts1;
18046 +@@ -219,17 +219,17 @@ static __always_inline void csd_lock_wait(call_single_data_t *csd)
18047 + }
18048 +
18049 + #else
18050 +-static void csd_lock_record(call_single_data_t *csd)
18051 ++static void csd_lock_record(struct __call_single_data *csd)
18052 + {
18053 + }
18054 +
18055 +-static __always_inline void csd_lock_wait(call_single_data_t *csd)
18056 ++static __always_inline void csd_lock_wait(struct __call_single_data *csd)
18057 + {
18058 + smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
18059 + }
18060 + #endif
18061 +
18062 +-static __always_inline void csd_lock(call_single_data_t *csd)
18063 ++static __always_inline void csd_lock(struct __call_single_data *csd)
18064 + {
18065 + csd_lock_wait(csd);
18066 + csd->flags |= CSD_FLAG_LOCK;
18067 +@@ -242,7 +242,7 @@ static __always_inline void csd_lock(call_single_data_t *csd)
18068 + smp_wmb();
18069 + }
18070 +
18071 +-static __always_inline void csd_unlock(call_single_data_t *csd)
18072 ++static __always_inline void csd_unlock(struct __call_single_data *csd)
18073 + {
18074 + WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
18075 +
18076 +@@ -276,7 +276,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
18077 + * for execution on the given CPU. data must already have
18078 + * ->func, ->info, and ->flags set.
18079 + */
18080 +-static int generic_exec_single(int cpu, call_single_data_t *csd)
18081 ++static int generic_exec_single(int cpu, struct __call_single_data *csd)
18082 + {
18083 + if (cpu == smp_processor_id()) {
18084 + smp_call_func_t func = csd->func;
18085 +@@ -542,7 +542,7 @@ EXPORT_SYMBOL(smp_call_function_single);
18086 + * NOTE: Be careful, there is unfortunately no current debugging facility to
18087 + * validate the correctness of this serialization.
18088 + */
18089 +-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
18090 ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
18091 + {
18092 + int err = 0;
18093 +
18094 +diff --git a/kernel/up.c b/kernel/up.c
18095 +index c6f323dcd45bb..4edd5493eba24 100644
18096 +--- a/kernel/up.c
18097 ++++ b/kernel/up.c
18098 +@@ -25,7 +25,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
18099 + }
18100 + EXPORT_SYMBOL(smp_call_function_single);
18101 +
18102 +-int smp_call_function_single_async(int cpu, call_single_data_t *csd)
18103 ++int smp_call_function_single_async(int cpu, struct __call_single_data *csd)
18104 + {
18105 + unsigned long flags;
18106 +
18107 +diff --git a/lib/bug.c b/lib/bug.c
18108 +index 7103440c0ee1a..4ab398a2de938 100644
18109 +--- a/lib/bug.c
18110 ++++ b/lib/bug.c
18111 +@@ -158,30 +158,27 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
18112 +
18113 + file = NULL;
18114 + line = 0;
18115 +- warning = 0;
18116 +
18117 +- if (bug) {
18118 + #ifdef CONFIG_DEBUG_BUGVERBOSE
18119 + #ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
18120 +- file = bug->file;
18121 ++ file = bug->file;
18122 + #else
18123 +- file = (const char *)bug + bug->file_disp;
18124 ++ file = (const char *)bug + bug->file_disp;
18125 + #endif
18126 +- line = bug->line;
18127 ++ line = bug->line;
18128 + #endif
18129 +- warning = (bug->flags & BUGFLAG_WARNING) != 0;
18130 +- once = (bug->flags & BUGFLAG_ONCE) != 0;
18131 +- done = (bug->flags & BUGFLAG_DONE) != 0;
18132 +-
18133 +- if (warning && once) {
18134 +- if (done)
18135 +- return BUG_TRAP_TYPE_WARN;
18136 +-
18137 +- /*
18138 +- * Since this is the only store, concurrency is not an issue.
18139 +- */
18140 +- bug->flags |= BUGFLAG_DONE;
18141 +- }
18142 ++ warning = (bug->flags & BUGFLAG_WARNING) != 0;
18143 ++ once = (bug->flags & BUGFLAG_ONCE) != 0;
18144 ++ done = (bug->flags & BUGFLAG_DONE) != 0;
18145 ++
18146 ++ if (warning && once) {
18147 ++ if (done)
18148 ++ return BUG_TRAP_TYPE_WARN;
18149 ++
18150 ++ /*
18151 ++ * Since this is the only store, concurrency is not an issue.
18152 ++ */
18153 ++ bug->flags |= BUGFLAG_DONE;
18154 + }
18155 +
18156 + /*
18157 +diff --git a/lib/crypto/poly1305-donna32.c b/lib/crypto/poly1305-donna32.c
18158 +index 3cc77d94390b2..7fb71845cc846 100644
18159 +--- a/lib/crypto/poly1305-donna32.c
18160 ++++ b/lib/crypto/poly1305-donna32.c
18161 +@@ -10,7 +10,8 @@
18162 + #include <asm/unaligned.h>
18163 + #include <crypto/internal/poly1305.h>
18164 +
18165 +-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
18166 ++void poly1305_core_setkey(struct poly1305_core_key *key,
18167 ++ const u8 raw_key[POLY1305_BLOCK_SIZE])
18168 + {
18169 + /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */
18170 + key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff;
18171 +diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c
18172 +index 6ae181bb43450..d34cf40536689 100644
18173 +--- a/lib/crypto/poly1305-donna64.c
18174 ++++ b/lib/crypto/poly1305-donna64.c
18175 +@@ -12,7 +12,8 @@
18176 +
18177 + typedef __uint128_t u128;
18178 +
18179 +-void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16])
18180 ++void poly1305_core_setkey(struct poly1305_core_key *key,
18181 ++ const u8 raw_key[POLY1305_BLOCK_SIZE])
18182 + {
18183 + u64 t0, t1;
18184 +
18185 +diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c
18186 +index 9d2d14df0fee5..26d87fc3823e8 100644
18187 +--- a/lib/crypto/poly1305.c
18188 ++++ b/lib/crypto/poly1305.c
18189 +@@ -12,7 +12,8 @@
18190 + #include <linux/module.h>
18191 + #include <asm/unaligned.h>
18192 +
18193 +-void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key)
18194 ++void poly1305_init_generic(struct poly1305_desc_ctx *desc,
18195 ++ const u8 key[POLY1305_KEY_SIZE])
18196 + {
18197 + poly1305_core_setkey(&desc->core_r, key);
18198 + desc->s[0] = get_unaligned_le32(key + 16);
18199 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
18200 +index d72d2b90474a4..8d9f5fa4c6d39 100644
18201 +--- a/mm/memcontrol.c
18202 ++++ b/mm/memcontrol.c
18203 +@@ -3162,9 +3162,17 @@ static void drain_obj_stock(struct memcg_stock_pcp *stock)
18204 + unsigned int nr_bytes = stock->nr_bytes & (PAGE_SIZE - 1);
18205 +
18206 + if (nr_pages) {
18207 ++ struct mem_cgroup *memcg;
18208 ++
18209 + rcu_read_lock();
18210 +- __memcg_kmem_uncharge(obj_cgroup_memcg(old), nr_pages);
18211 ++retry:
18212 ++ memcg = obj_cgroup_memcg(old);
18213 ++ if (unlikely(!css_tryget(&memcg->css)))
18214 ++ goto retry;
18215 + rcu_read_unlock();
18216 ++
18217 ++ __memcg_kmem_uncharge(memcg, nr_pages);
18218 ++ css_put(&memcg->css);
18219 + }
18220 +
18221 + /*
18222 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
18223 +index 570a20b425613..2d7a667f8e609 100644
18224 +--- a/mm/memory-failure.c
18225 ++++ b/mm/memory-failure.c
18226 +@@ -1293,7 +1293,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
18227 + * communicated in siginfo, see kill_proc()
18228 + */
18229 + start = (page->index << PAGE_SHIFT) & ~(size - 1);
18230 +- unmap_mapping_range(page->mapping, start, start + size, 0);
18231 ++ unmap_mapping_range(page->mapping, start, size, 0);
18232 + }
18233 + kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
18234 + rc = 0;
18235 +diff --git a/mm/slab.c b/mm/slab.c
18236 +index b1113561b98b1..b2cc2cf7d8a33 100644
18237 +--- a/mm/slab.c
18238 ++++ b/mm/slab.c
18239 +@@ -1789,8 +1789,7 @@ static int __ref setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
18240 + }
18241 +
18242 + slab_flags_t kmem_cache_flags(unsigned int object_size,
18243 +- slab_flags_t flags, const char *name,
18244 +- void (*ctor)(void *))
18245 ++ slab_flags_t flags, const char *name)
18246 + {
18247 + return flags;
18248 + }
18249 +diff --git a/mm/slab.h b/mm/slab.h
18250 +index f9977d6613d61..e258ffcfb0ef2 100644
18251 +--- a/mm/slab.h
18252 ++++ b/mm/slab.h
18253 +@@ -110,8 +110,7 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
18254 + slab_flags_t flags, void (*ctor)(void *));
18255 +
18256 + slab_flags_t kmem_cache_flags(unsigned int object_size,
18257 +- slab_flags_t flags, const char *name,
18258 +- void (*ctor)(void *));
18259 ++ slab_flags_t flags, const char *name);
18260 + #else
18261 + static inline struct kmem_cache *
18262 + __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
18263 +@@ -119,8 +118,7 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
18264 + { return NULL; }
18265 +
18266 + static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
18267 +- slab_flags_t flags, const char *name,
18268 +- void (*ctor)(void *))
18269 ++ slab_flags_t flags, const char *name)
18270 + {
18271 + return flags;
18272 + }
18273 +diff --git a/mm/slab_common.c b/mm/slab_common.c
18274 +index 8d96679668b4e..8f27ccf9f7f35 100644
18275 +--- a/mm/slab_common.c
18276 ++++ b/mm/slab_common.c
18277 +@@ -196,7 +196,7 @@ struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
18278 + size = ALIGN(size, sizeof(void *));
18279 + align = calculate_alignment(flags, align, size);
18280 + size = ALIGN(size, align);
18281 +- flags = kmem_cache_flags(size, flags, name, NULL);
18282 ++ flags = kmem_cache_flags(size, flags, name);
18283 +
18284 + if (flags & SLAB_NEVER_MERGE)
18285 + return NULL;
18286 +diff --git a/mm/slub.c b/mm/slub.c
18287 +index fbc415c340095..05a501b67cd59 100644
18288 +--- a/mm/slub.c
18289 ++++ b/mm/slub.c
18290 +@@ -1397,7 +1397,6 @@ __setup("slub_debug", setup_slub_debug);
18291 + * @object_size: the size of an object without meta data
18292 + * @flags: flags to set
18293 + * @name: name of the cache
18294 +- * @ctor: constructor function
18295 + *
18296 + * Debug option(s) are applied to @flags. In addition to the debug
18297 + * option(s), if a slab name (or multiple) is specified i.e.
18298 +@@ -1405,8 +1404,7 @@ __setup("slub_debug", setup_slub_debug);
18299 + * then only the select slabs will receive the debug option(s).
18300 + */
18301 + slab_flags_t kmem_cache_flags(unsigned int object_size,
18302 +- slab_flags_t flags, const char *name,
18303 +- void (*ctor)(void *))
18304 ++ slab_flags_t flags, const char *name)
18305 + {
18306 + char *iter;
18307 + size_t len;
18308 +@@ -1471,8 +1469,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
18309 + static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
18310 + struct page *page) {}
18311 + slab_flags_t kmem_cache_flags(unsigned int object_size,
18312 +- slab_flags_t flags, const char *name,
18313 +- void (*ctor)(void *))
18314 ++ slab_flags_t flags, const char *name)
18315 + {
18316 + return flags;
18317 + }
18318 +@@ -3782,7 +3779,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
18319 +
18320 + static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
18321 + {
18322 +- s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
18323 ++ s->flags = kmem_cache_flags(s->size, flags, s->name);
18324 + #ifdef CONFIG_SLAB_FREELIST_HARDENED
18325 + s->random = get_random_long();
18326 + #endif
18327 +diff --git a/mm/sparse.c b/mm/sparse.c
18328 +index 7bd23f9d6cef6..33406ea2ecc44 100644
18329 +--- a/mm/sparse.c
18330 ++++ b/mm/sparse.c
18331 +@@ -547,6 +547,7 @@ static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
18332 + pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
18333 + __func__, nid);
18334 + pnum_begin = pnum;
18335 ++ sparse_buffer_fini();
18336 + goto failed;
18337 + }
18338 + check_usemap_section_nr(nid, usage);
18339 +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
18340 +index d0c1024bf6008..1c5a0a60292d2 100644
18341 +--- a/net/bluetooth/hci_conn.c
18342 ++++ b/net/bluetooth/hci_conn.c
18343 +@@ -1789,8 +1789,6 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
18344 + {
18345 + u32 phys = 0;
18346 +
18347 +- hci_dev_lock(conn->hdev);
18348 +-
18349 + /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
18350 + * Table 6.2: Packets defined for synchronous, asynchronous, and
18351 + * CSB logical transport types.
18352 +@@ -1887,7 +1885,5 @@ u32 hci_conn_get_phy(struct hci_conn *conn)
18353 + break;
18354 + }
18355 +
18356 +- hci_dev_unlock(conn->hdev);
18357 +-
18358 + return phys;
18359 + }
18360 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
18361 +index 17a72695865b5..e0a5428497352 100644
18362 +--- a/net/bluetooth/hci_event.c
18363 ++++ b/net/bluetooth/hci_event.c
18364 +@@ -4990,6 +4990,7 @@ static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
18365 + return;
18366 +
18367 + hchan->handle = le16_to_cpu(ev->handle);
18368 ++ hchan->amp = true;
18369 +
18370 + BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
18371 +
18372 +@@ -5022,7 +5023,7 @@ static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
18373 + hci_dev_lock(hdev);
18374 +
18375 + hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
18376 +- if (!hchan)
18377 ++ if (!hchan || !hchan->amp)
18378 + goto unlock;
18379 +
18380 + amp_destroy_logical_link(hchan, ev->reason);
18381 +diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
18382 +index 610ed0817bd77..161ea93a53828 100644
18383 +--- a/net/bluetooth/hci_request.c
18384 ++++ b/net/bluetooth/hci_request.c
18385 +@@ -271,12 +271,16 @@ int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
18386 + {
18387 + int ret;
18388 +
18389 +- if (!test_bit(HCI_UP, &hdev->flags))
18390 +- return -ENETDOWN;
18391 +-
18392 + /* Serialize all requests */
18393 + hci_req_sync_lock(hdev);
18394 +- ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
18395 ++ /* check the state after obtaing the lock to protect the HCI_UP
18396 ++ * against any races from hci_dev_do_close when the controller
18397 ++ * gets removed.
18398 ++ */
18399 ++ if (test_bit(HCI_UP, &hdev->flags))
18400 ++ ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
18401 ++ else
18402 ++ ret = -ENETDOWN;
18403 + hci_req_sync_unlock(hdev);
18404 +
18405 + return ret;
18406 +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
18407 +index 54cb82a69056c..5015ece7adf7a 100644
18408 +--- a/net/bridge/br_multicast.c
18409 ++++ b/net/bridge/br_multicast.c
18410 +@@ -3070,25 +3070,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
18411 + }
18412 +
18413 + #if IS_ENABLED(CONFIG_IPV6)
18414 +-static int br_ip6_multicast_mrd_rcv(struct net_bridge *br,
18415 +- struct net_bridge_port *port,
18416 +- struct sk_buff *skb)
18417 ++static void br_ip6_multicast_mrd_rcv(struct net_bridge *br,
18418 ++ struct net_bridge_port *port,
18419 ++ struct sk_buff *skb)
18420 + {
18421 +- int ret;
18422 +-
18423 +- if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
18424 +- return -ENOMSG;
18425 +-
18426 +- ret = ipv6_mc_check_icmpv6(skb);
18427 +- if (ret < 0)
18428 +- return ret;
18429 +-
18430 + if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
18431 +- return -ENOMSG;
18432 ++ return;
18433 +
18434 + br_multicast_mark_router(br, port);
18435 +-
18436 +- return 0;
18437 + }
18438 +
18439 + static int br_multicast_ipv6_rcv(struct net_bridge *br,
18440 +@@ -3102,18 +3091,12 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
18441 +
18442 + err = ipv6_mc_check_mld(skb);
18443 +
18444 +- if (err == -ENOMSG) {
18445 ++ if (err == -ENOMSG || err == -ENODATA) {
18446 + if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
18447 + BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
18448 +-
18449 +- if (ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr)) {
18450 +- err = br_ip6_multicast_mrd_rcv(br, port, skb);
18451 +-
18452 +- if (err < 0 && err != -ENOMSG) {
18453 +- br_multicast_err_count(br, port, skb->protocol);
18454 +- return err;
18455 +- }
18456 +- }
18457 ++ if (err == -ENODATA &&
18458 ++ ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
18459 ++ br_ip6_multicast_mrd_rcv(br, port, skb);
18460 +
18461 + return 0;
18462 + } else if (err < 0) {
18463 +diff --git a/net/core/dev.c b/net/core/dev.c
18464 +index 64f4c7ec729dc..2f17a4ac82f0e 100644
18465 +--- a/net/core/dev.c
18466 ++++ b/net/core/dev.c
18467 +@@ -5857,7 +5857,7 @@ static struct list_head *gro_list_prepare(struct napi_struct *napi,
18468 + return head;
18469 + }
18470 +
18471 +-static void skb_gro_reset_offset(struct sk_buff *skb)
18472 ++static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
18473 + {
18474 + const struct skb_shared_info *pinfo = skb_shinfo(skb);
18475 + const skb_frag_t *frag0 = &pinfo->frags[0];
18476 +@@ -5868,7 +5868,7 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
18477 +
18478 + if (!skb_headlen(skb) && pinfo->nr_frags &&
18479 + !PageHighMem(skb_frag_page(frag0)) &&
18480 +- (!NET_IP_ALIGN || !(skb_frag_off(frag0) & 3))) {
18481 ++ (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
18482 + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
18483 + NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
18484 + skb_frag_size(frag0),
18485 +@@ -6101,7 +6101,7 @@ gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
18486 + skb_mark_napi_id(skb, napi);
18487 + trace_napi_gro_receive_entry(skb);
18488 +
18489 +- skb_gro_reset_offset(skb);
18490 ++ skb_gro_reset_offset(skb, 0);
18491 +
18492 + ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
18493 + trace_napi_gro_receive_exit(ret);
18494 +@@ -6194,7 +6194,7 @@ static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
18495 + napi->skb = NULL;
18496 +
18497 + skb_reset_mac_header(skb);
18498 +- skb_gro_reset_offset(skb);
18499 ++ skb_gro_reset_offset(skb, hlen);
18500 +
18501 + if (unlikely(skb_gro_header_hard(skb, hlen))) {
18502 + eth = skb_gro_header_slow(skb, hlen, 0);
18503 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
18504 +index 50a6d935376f5..798dc85bde5b7 100644
18505 +--- a/net/ipv4/route.c
18506 ++++ b/net/ipv4/route.c
18507 +@@ -66,6 +66,7 @@
18508 + #include <linux/types.h>
18509 + #include <linux/kernel.h>
18510 + #include <linux/mm.h>
18511 ++#include <linux/memblock.h>
18512 + #include <linux/string.h>
18513 + #include <linux/socket.h>
18514 + #include <linux/sockios.h>
18515 +@@ -476,8 +477,10 @@ static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
18516 + __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
18517 + }
18518 +
18519 +-#define IP_IDENTS_SZ 2048u
18520 +-
18521 ++/* Hash tables of size 2048..262144 depending on RAM size.
18522 ++ * Each bucket uses 8 bytes.
18523 ++ */
18524 ++static u32 ip_idents_mask __read_mostly;
18525 + static atomic_t *ip_idents __read_mostly;
18526 + static u32 *ip_tstamps __read_mostly;
18527 +
18528 +@@ -487,12 +490,16 @@ static u32 *ip_tstamps __read_mostly;
18529 + */
18530 + u32 ip_idents_reserve(u32 hash, int segs)
18531 + {
18532 +- u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
18533 +- atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
18534 +- u32 old = READ_ONCE(*p_tstamp);
18535 +- u32 now = (u32)jiffies;
18536 ++ u32 bucket, old, now = (u32)jiffies;
18537 ++ atomic_t *p_id;
18538 ++ u32 *p_tstamp;
18539 + u32 delta = 0;
18540 +
18541 ++ bucket = hash & ip_idents_mask;
18542 ++ p_tstamp = ip_tstamps + bucket;
18543 ++ p_id = ip_idents + bucket;
18544 ++ old = READ_ONCE(*p_tstamp);
18545 ++
18546 + if (old != now && cmpxchg(p_tstamp, old, now) == old)
18547 + delta = prandom_u32_max(now - old);
18548 +
18549 +@@ -3544,18 +3551,25 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
18550 +
18551 + int __init ip_rt_init(void)
18552 + {
18553 ++ void *idents_hash;
18554 + int cpu;
18555 +
18556 +- ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
18557 +- GFP_KERNEL);
18558 +- if (!ip_idents)
18559 +- panic("IP: failed to allocate ip_idents\n");
18560 ++ /* For modern hosts, this will use 2 MB of memory */
18561 ++ idents_hash = alloc_large_system_hash("IP idents",
18562 ++ sizeof(*ip_idents) + sizeof(*ip_tstamps),
18563 ++ 0,
18564 ++ 16, /* one bucket per 64 KB */
18565 ++ HASH_ZERO,
18566 ++ NULL,
18567 ++ &ip_idents_mask,
18568 ++ 2048,
18569 ++ 256*1024);
18570 ++
18571 ++ ip_idents = idents_hash;
18572 +
18573 +- prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
18574 ++ prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
18575 +
18576 +- ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
18577 +- if (!ip_tstamps)
18578 +- panic("IP: failed to allocate ip_tstamps\n");
18579 ++ ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
18580 +
18581 + for_each_possible_cpu(cpu) {
18582 + struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
18583 +diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
18584 +index 563d016e74783..db5831e6c136a 100644
18585 +--- a/net/ipv4/tcp_cong.c
18586 ++++ b/net/ipv4/tcp_cong.c
18587 +@@ -230,6 +230,10 @@ int tcp_set_default_congestion_control(struct net *net, const char *name)
18588 + ret = -ENOENT;
18589 + } else if (!bpf_try_module_get(ca, ca->owner)) {
18590 + ret = -EBUSY;
18591 ++ } else if (!net_eq(net, &init_net) &&
18592 ++ !(ca->flags & TCP_CONG_NON_RESTRICTED)) {
18593 ++ /* Only init netns can set default to a restricted algorithm */
18594 ++ ret = -EPERM;
18595 + } else {
18596 + prev = xchg(&net->ipv4.tcp_congestion_control, ca);
18597 + if (prev)
18598 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
18599 +index 4a2fd286787c0..9d28b2778e8fe 100644
18600 +--- a/net/ipv4/udp.c
18601 ++++ b/net/ipv4/udp.c
18602 +@@ -2657,9 +2657,12 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
18603 +
18604 + case UDP_GRO:
18605 + lock_sock(sk);
18606 ++
18607 ++ /* when enabling GRO, accept the related GSO packet type */
18608 + if (valbool)
18609 + udp_tunnel_encap_enable(sk->sk_socket);
18610 + up->gro_enabled = valbool;
18611 ++ up->accept_udp_l4 = valbool;
18612 + release_sock(sk);
18613 + break;
18614 +
18615 +diff --git a/net/ipv6/mcast_snoop.c b/net/ipv6/mcast_snoop.c
18616 +index d3d6b6a66e5fa..04d5fcdfa6e00 100644
18617 +--- a/net/ipv6/mcast_snoop.c
18618 ++++ b/net/ipv6/mcast_snoop.c
18619 +@@ -109,7 +109,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
18620 + struct mld_msg *mld;
18621 +
18622 + if (!ipv6_mc_may_pull(skb, len))
18623 +- return -EINVAL;
18624 ++ return -ENODATA;
18625 +
18626 + mld = (struct mld_msg *)skb_transport_header(skb);
18627 +
18628 +@@ -122,7 +122,7 @@ static int ipv6_mc_check_mld_msg(struct sk_buff *skb)
18629 + case ICMPV6_MGM_QUERY:
18630 + return ipv6_mc_check_mld_query(skb);
18631 + default:
18632 +- return -ENOMSG;
18633 ++ return -ENODATA;
18634 + }
18635 + }
18636 +
18637 +@@ -131,7 +131,7 @@ static inline __sum16 ipv6_mc_validate_checksum(struct sk_buff *skb)
18638 + return skb_checksum_validate(skb, IPPROTO_ICMPV6, ip6_compute_pseudo);
18639 + }
18640 +
18641 +-int ipv6_mc_check_icmpv6(struct sk_buff *skb)
18642 ++static int ipv6_mc_check_icmpv6(struct sk_buff *skb)
18643 + {
18644 + unsigned int len = skb_transport_offset(skb) + sizeof(struct icmp6hdr);
18645 + unsigned int transport_len = ipv6_transport_len(skb);
18646 +@@ -150,7 +150,6 @@ int ipv6_mc_check_icmpv6(struct sk_buff *skb)
18647 +
18648 + return 0;
18649 + }
18650 +-EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
18651 +
18652 + /**
18653 + * ipv6_mc_check_mld - checks whether this is a sane MLD packet
18654 +@@ -161,7 +160,10 @@ EXPORT_SYMBOL(ipv6_mc_check_icmpv6);
18655 + *
18656 + * -EINVAL: A broken packet was detected, i.e. it violates some internet
18657 + * standard
18658 +- * -ENOMSG: IP header validation succeeded but it is not an MLD packet.
18659 ++ * -ENOMSG: IP header validation succeeded but it is not an ICMPv6 packet
18660 ++ * with a hop-by-hop option.
18661 ++ * -ENODATA: IP+ICMPv6 header with hop-by-hop option validation succeeded
18662 ++ * but it is not an MLD packet.
18663 + * -ENOMEM: A memory allocation failure happened.
18664 + *
18665 + * Caller needs to set the skb network header and free any returned skb if it
18666 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
18667 +index 19c093bb3876e..73893025922fd 100644
18668 +--- a/net/mac80211/main.c
18669 ++++ b/net/mac80211/main.c
18670 +@@ -1150,8 +1150,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
18671 + if (local->hw.wiphy->max_scan_ie_len)
18672 + local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
18673 +
18674 +- WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
18675 +- local->hw.n_cipher_schemes));
18676 ++ if (WARN_ON(!ieee80211_cs_list_valid(local->hw.cipher_schemes,
18677 ++ local->hw.n_cipher_schemes))) {
18678 ++ result = -EINVAL;
18679 ++ goto fail_workqueue;
18680 ++ }
18681 +
18682 + result = ieee80211_init_cipher_suites(local);
18683 + if (result < 0)
18684 +diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
18685 +index 9ae14270c543e..2b00f7f47693b 100644
18686 +--- a/net/netfilter/nf_tables_offload.c
18687 ++++ b/net/netfilter/nf_tables_offload.c
18688 +@@ -45,6 +45,48 @@ void nft_flow_rule_set_addr_type(struct nft_flow_rule *flow,
18689 + offsetof(struct nft_flow_key, control);
18690 + }
18691 +
18692 ++struct nft_offload_ethertype {
18693 ++ __be16 value;
18694 ++ __be16 mask;
18695 ++};
18696 ++
18697 ++static void nft_flow_rule_transfer_vlan(struct nft_offload_ctx *ctx,
18698 ++ struct nft_flow_rule *flow)
18699 ++{
18700 ++ struct nft_flow_match *match = &flow->match;
18701 ++ struct nft_offload_ethertype ethertype;
18702 ++
18703 ++ if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) &&
18704 ++ match->key.basic.n_proto != htons(ETH_P_8021Q) &&
18705 ++ match->key.basic.n_proto != htons(ETH_P_8021AD))
18706 ++ return;
18707 ++
18708 ++ ethertype.value = match->key.basic.n_proto;
18709 ++ ethertype.mask = match->mask.basic.n_proto;
18710 ++
18711 ++ if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) &&
18712 ++ (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) ||
18713 ++ match->key.vlan.vlan_tpid == htons(ETH_P_8021AD))) {
18714 ++ match->key.basic.n_proto = match->key.cvlan.vlan_tpid;
18715 ++ match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid;
18716 ++ match->key.cvlan.vlan_tpid = match->key.vlan.vlan_tpid;
18717 ++ match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid;
18718 ++ match->key.vlan.vlan_tpid = ethertype.value;
18719 ++ match->mask.vlan.vlan_tpid = ethertype.mask;
18720 ++ match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] =
18721 ++ offsetof(struct nft_flow_key, cvlan);
18722 ++ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN);
18723 ++ } else {
18724 ++ match->key.basic.n_proto = match->key.vlan.vlan_tpid;
18725 ++ match->mask.basic.n_proto = match->mask.vlan.vlan_tpid;
18726 ++ match->key.vlan.vlan_tpid = ethertype.value;
18727 ++ match->mask.vlan.vlan_tpid = ethertype.mask;
18728 ++ match->dissector.offset[FLOW_DISSECTOR_KEY_VLAN] =
18729 ++ offsetof(struct nft_flow_key, vlan);
18730 ++ match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_VLAN);
18731 ++ }
18732 ++}
18733 ++
18734 + struct nft_flow_rule *nft_flow_rule_create(struct net *net,
18735 + const struct nft_rule *rule)
18736 + {
18737 +@@ -89,6 +131,8 @@ struct nft_flow_rule *nft_flow_rule_create(struct net *net,
18738 +
18739 + expr = nft_expr_next(expr);
18740 + }
18741 ++ nft_flow_rule_transfer_vlan(ctx, flow);
18742 ++
18743 + flow->proto = ctx->dep.l3num;
18744 + kfree(ctx);
18745 +
18746 +diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
18747 +index 00e563a72d3d7..1d42d06f5b64b 100644
18748 +--- a/net/netfilter/nft_cmp.c
18749 ++++ b/net/netfilter/nft_cmp.c
18750 +@@ -115,19 +115,56 @@ nla_put_failure:
18751 + return -1;
18752 + }
18753 +
18754 ++union nft_cmp_offload_data {
18755 ++ u16 val16;
18756 ++ u32 val32;
18757 ++ u64 val64;
18758 ++};
18759 ++
18760 ++static void nft_payload_n2h(union nft_cmp_offload_data *data,
18761 ++ const u8 *val, u32 len)
18762 ++{
18763 ++ switch (len) {
18764 ++ case 2:
18765 ++ data->val16 = ntohs(*((u16 *)val));
18766 ++ break;
18767 ++ case 4:
18768 ++ data->val32 = ntohl(*((u32 *)val));
18769 ++ break;
18770 ++ case 8:
18771 ++ data->val64 = be64_to_cpu(*((u64 *)val));
18772 ++ break;
18773 ++ default:
18774 ++ WARN_ON_ONCE(1);
18775 ++ break;
18776 ++ }
18777 ++}
18778 ++
18779 + static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
18780 + struct nft_flow_rule *flow,
18781 + const struct nft_cmp_expr *priv)
18782 + {
18783 + struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
18784 ++ union nft_cmp_offload_data _data, _datamask;
18785 + u8 *mask = (u8 *)&flow->match.mask;
18786 + u8 *key = (u8 *)&flow->match.key;
18787 ++ u8 *data, *datamask;
18788 +
18789 + if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
18790 + return -EOPNOTSUPP;
18791 +
18792 +- memcpy(key + reg->offset, &priv->data, reg->len);
18793 +- memcpy(mask + reg->offset, &reg->mask, reg->len);
18794 ++ if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
18795 ++ nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
18796 ++ nft_payload_n2h(&_datamask, (u8 *)&reg->mask, reg->len);
18797 ++ data = (u8 *)&_data;
18798 ++ datamask = (u8 *)&_datamask;
18799 ++ } else {
18800 ++ data = (u8 *)&priv->data;
18801 ++ datamask = (u8 *)&reg->mask;
18802 ++ }
18803 ++
18804 ++ memcpy(key + reg->offset, data, reg->len);
18805 ++ memcpy(mask + reg->offset, datamask, reg->len);
18806 +
18807 + flow->match.dissector.used_keys |= BIT(reg->key);
18808 + flow->match.dissector.offset[reg->key] = reg->base_offset;
18809 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
18810 +index 47d4e0e216514..1ebee25de6772 100644
18811 +--- a/net/netfilter/nft_payload.c
18812 ++++ b/net/netfilter/nft_payload.c
18813 +@@ -226,8 +226,9 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
18814 + if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
18815 + return -EOPNOTSUPP;
18816 +
18817 +- NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
18818 +- vlan_tci, sizeof(__be16), reg);
18819 ++ NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
18820 ++ vlan_tci, sizeof(__be16), reg,
18821 ++ NFT_OFFLOAD_F_NETWORK2HOST);
18822 + break;
18823 + case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
18824 + if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
18825 +@@ -241,16 +242,18 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
18826 + if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
18827 + return -EOPNOTSUPP;
18828 +
18829 +- NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
18830 +- vlan_tci, sizeof(__be16), reg);
18831 ++ NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
18832 ++ vlan_tci, sizeof(__be16), reg,
18833 ++ NFT_OFFLOAD_F_NETWORK2HOST);
18834 + break;
18835 + case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
18836 + sizeof(struct vlan_hdr):
18837 + if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
18838 + return -EOPNOTSUPP;
18839 +
18840 +- NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, vlan,
18841 ++ NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
18842 + vlan_tpid, sizeof(__be16), reg);
18843 ++ nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
18844 + break;
18845 + default:
18846 + return -EOPNOTSUPP;
18847 +diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
18848 +index 5971fb6f51cc7..dc21b4141b0af 100644
18849 +--- a/net/nfc/digital_dep.c
18850 ++++ b/net/nfc/digital_dep.c
18851 +@@ -1273,6 +1273,8 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
18852 + }
18853 +
18854 + rc = nfc_tm_data_received(ddev->nfc_dev, resp);
18855 ++ if (rc)
18856 ++ resp = NULL;
18857 +
18858 + exit:
18859 + kfree_skb(ddev->chaining_skb);
18860 +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
18861 +index a3b46f8888033..53dbe733f9981 100644
18862 +--- a/net/nfc/llcp_sock.c
18863 ++++ b/net/nfc/llcp_sock.c
18864 +@@ -109,12 +109,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
18865 + GFP_KERNEL);
18866 + if (!llcp_sock->service_name) {
18867 + nfc_llcp_local_put(llcp_sock->local);
18868 ++ llcp_sock->local = NULL;
18869 + ret = -ENOMEM;
18870 + goto put_dev;
18871 + }
18872 + llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
18873 + if (llcp_sock->ssap == LLCP_SAP_MAX) {
18874 + nfc_llcp_local_put(llcp_sock->local);
18875 ++ llcp_sock->local = NULL;
18876 + kfree(llcp_sock->service_name);
18877 + llcp_sock->service_name = NULL;
18878 + ret = -EADDRINUSE;
18879 +@@ -709,6 +711,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
18880 + llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
18881 + if (llcp_sock->ssap == LLCP_SAP_MAX) {
18882 + nfc_llcp_local_put(llcp_sock->local);
18883 ++ llcp_sock->local = NULL;
18884 + ret = -ENOMEM;
18885 + goto put_dev;
18886 + }
18887 +@@ -756,6 +759,7 @@ sock_unlink:
18888 + sock_llcp_release:
18889 + nfc_llcp_put_ssap(local, llcp_sock->ssap);
18890 + nfc_llcp_local_put(llcp_sock->local);
18891 ++ llcp_sock->local = NULL;
18892 +
18893 + put_dev:
18894 + nfc_put_device(dev);
18895 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
18896 +index a0121e7c98b14..449625c2ccc7a 100644
18897 +--- a/net/packet/af_packet.c
18898 ++++ b/net/packet/af_packet.c
18899 +@@ -1358,7 +1358,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
18900 + struct packet_sock *po, *po_next, *po_skip = NULL;
18901 + unsigned int i, j, room = ROOM_NONE;
18902 +
18903 +- po = pkt_sk(f->arr[idx]);
18904 ++ po = pkt_sk(rcu_dereference(f->arr[idx]));
18905 +
18906 + if (try_self) {
18907 + room = packet_rcv_has_room(po, skb);
18908 +@@ -1370,7 +1370,7 @@ static unsigned int fanout_demux_rollover(struct packet_fanout *f,
18909 +
18910 + i = j = min_t(int, po->rollover->sock, num - 1);
18911 + do {
18912 +- po_next = pkt_sk(f->arr[i]);
18913 ++ po_next = pkt_sk(rcu_dereference(f->arr[i]));
18914 + if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
18915 + packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
18916 + if (i != j)
18917 +@@ -1465,7 +1465,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
18918 + if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
18919 + idx = fanout_demux_rollover(f, skb, idx, true, num);
18920 +
18921 +- po = pkt_sk(f->arr[idx]);
18922 ++ po = pkt_sk(rcu_dereference(f->arr[idx]));
18923 + return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
18924 + }
18925 +
18926 +@@ -1479,7 +1479,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
18927 + struct packet_fanout *f = po->fanout;
18928 +
18929 + spin_lock(&f->lock);
18930 +- f->arr[f->num_members] = sk;
18931 ++ rcu_assign_pointer(f->arr[f->num_members], sk);
18932 + smp_wmb();
18933 + f->num_members++;
18934 + if (f->num_members == 1)
18935 +@@ -1494,11 +1494,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
18936 +
18937 + spin_lock(&f->lock);
18938 + for (i = 0; i < f->num_members; i++) {
18939 +- if (f->arr[i] == sk)
18940 ++ if (rcu_dereference_protected(f->arr[i],
18941 ++ lockdep_is_held(&f->lock)) == sk)
18942 + break;
18943 + }
18944 + BUG_ON(i >= f->num_members);
18945 +- f->arr[i] = f->arr[f->num_members - 1];
18946 ++ rcu_assign_pointer(f->arr[i],
18947 ++ rcu_dereference_protected(f->arr[f->num_members - 1],
18948 ++ lockdep_is_held(&f->lock)));
18949 + f->num_members--;
18950 + if (f->num_members == 0)
18951 + __dev_remove_pack(&f->prot_hook);
18952 +@@ -1636,13 +1639,15 @@ static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
18953 + return false;
18954 + }
18955 +
18956 +-static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
18957 ++static int fanout_add(struct sock *sk, struct fanout_args *args)
18958 + {
18959 + struct packet_rollover *rollover = NULL;
18960 + struct packet_sock *po = pkt_sk(sk);
18961 ++ u16 type_flags = args->type_flags;
18962 + struct packet_fanout *f, *match;
18963 + u8 type = type_flags & 0xff;
18964 + u8 flags = type_flags >> 8;
18965 ++ u16 id = args->id;
18966 + int err;
18967 +
18968 + switch (type) {
18969 +@@ -1700,11 +1705,21 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
18970 + }
18971 + }
18972 + err = -EINVAL;
18973 +- if (match && match->flags != flags)
18974 +- goto out;
18975 +- if (!match) {
18976 ++ if (match) {
18977 ++ if (match->flags != flags)
18978 ++ goto out;
18979 ++ if (args->max_num_members &&
18980 ++ args->max_num_members != match->max_num_members)
18981 ++ goto out;
18982 ++ } else {
18983 ++ if (args->max_num_members > PACKET_FANOUT_MAX)
18984 ++ goto out;
18985 ++ if (!args->max_num_members)
18986 ++ /* legacy PACKET_FANOUT_MAX */
18987 ++ args->max_num_members = 256;
18988 + err = -ENOMEM;
18989 +- match = kzalloc(sizeof(*match), GFP_KERNEL);
18990 ++ match = kvzalloc(struct_size(match, arr, args->max_num_members),
18991 ++ GFP_KERNEL);
18992 + if (!match)
18993 + goto out;
18994 + write_pnet(&match->net, sock_net(sk));
18995 +@@ -1720,6 +1735,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
18996 + match->prot_hook.func = packet_rcv_fanout;
18997 + match->prot_hook.af_packet_priv = match;
18998 + match->prot_hook.id_match = match_fanout_group;
18999 ++ match->max_num_members = args->max_num_members;
19000 + list_add(&match->list, &fanout_list);
19001 + }
19002 + err = -EINVAL;
19003 +@@ -1730,7 +1746,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
19004 + match->prot_hook.type == po->prot_hook.type &&
19005 + match->prot_hook.dev == po->prot_hook.dev) {
19006 + err = -ENOSPC;
19007 +- if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
19008 ++ if (refcount_read(&match->sk_ref) < match->max_num_members) {
19009 + __dev_remove_pack(&po->prot_hook);
19010 + po->fanout = match;
19011 + po->rollover = rollover;
19012 +@@ -1744,7 +1760,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
19013 +
19014 + if (err && !refcount_read(&match->sk_ref)) {
19015 + list_del(&match->list);
19016 +- kfree(match);
19017 ++ kvfree(match);
19018 + }
19019 +
19020 + out:
19021 +@@ -3075,7 +3091,7 @@ static int packet_release(struct socket *sock)
19022 + kfree(po->rollover);
19023 + if (f) {
19024 + fanout_release_data(f);
19025 +- kfree(f);
19026 ++ kvfree(f);
19027 + }
19028 + /*
19029 + * Now the socket is dead. No more input will appear.
19030 +@@ -3866,14 +3882,14 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
19031 + }
19032 + case PACKET_FANOUT:
19033 + {
19034 +- int val;
19035 ++ struct fanout_args args = { 0 };
19036 +
19037 +- if (optlen != sizeof(val))
19038 ++ if (optlen != sizeof(int) && optlen != sizeof(args))
19039 + return -EINVAL;
19040 +- if (copy_from_sockptr(&val, optval, sizeof(val)))
19041 ++ if (copy_from_sockptr(&args, optval, optlen))
19042 + return -EFAULT;
19043 +
19044 +- return fanout_add(sk, val & 0xffff, val >> 16);
19045 ++ return fanout_add(sk, &args);
19046 + }
19047 + case PACKET_FANOUT_DATA:
19048 + {
19049 +diff --git a/net/packet/internal.h b/net/packet/internal.h
19050 +index fd41ecb7f6059..7af1e9179385f 100644
19051 +--- a/net/packet/internal.h
19052 ++++ b/net/packet/internal.h
19053 +@@ -77,11 +77,12 @@ struct packet_ring_buffer {
19054 + };
19055 +
19056 + extern struct mutex fanout_mutex;
19057 +-#define PACKET_FANOUT_MAX 256
19058 ++#define PACKET_FANOUT_MAX (1 << 16)
19059 +
19060 + struct packet_fanout {
19061 + possible_net_t net;
19062 + unsigned int num_members;
19063 ++ u32 max_num_members;
19064 + u16 id;
19065 + u8 type;
19066 + u8 flags;
19067 +@@ -90,10 +91,10 @@ struct packet_fanout {
19068 + struct bpf_prog __rcu *bpf_prog;
19069 + };
19070 + struct list_head list;
19071 +- struct sock *arr[PACKET_FANOUT_MAX];
19072 + spinlock_t lock;
19073 + refcount_t sk_ref;
19074 + struct packet_type prot_hook ____cacheline_aligned_in_smp;
19075 ++ struct sock __rcu *arr[];
19076 + };
19077 +
19078 + struct packet_rollover {
19079 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
19080 +index 9463c54c465af..3ac6b21ecf2c1 100644
19081 +--- a/net/sctp/socket.c
19082 ++++ b/net/sctp/socket.c
19083 +@@ -357,6 +357,18 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
19084 + return af;
19085 + }
19086 +
19087 ++static void sctp_auto_asconf_init(struct sctp_sock *sp)
19088 ++{
19089 ++ struct net *net = sock_net(&sp->inet.sk);
19090 ++
19091 ++ if (net->sctp.default_auto_asconf) {
19092 ++ spin_lock(&net->sctp.addr_wq_lock);
19093 ++ list_add_tail(&sp->auto_asconf_list, &net->sctp.auto_asconf_splist);
19094 ++ spin_unlock(&net->sctp.addr_wq_lock);
19095 ++ sp->do_auto_asconf = 1;
19096 ++ }
19097 ++}
19098 ++
19099 + /* Bind a local address either to an endpoint or to an association. */
19100 + static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
19101 + {
19102 +@@ -418,8 +430,10 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
19103 + return -EADDRINUSE;
19104 +
19105 + /* Refresh ephemeral port. */
19106 +- if (!bp->port)
19107 ++ if (!bp->port) {
19108 + bp->port = inet_sk(sk)->inet_num;
19109 ++ sctp_auto_asconf_init(sp);
19110 ++ }
19111 +
19112 + /* Add the address to the bind address list.
19113 + * Use GFP_ATOMIC since BHs will be disabled.
19114 +@@ -1520,9 +1534,11 @@ static void sctp_close(struct sock *sk, long timeout)
19115 +
19116 + /* Supposedly, no process has access to the socket, but
19117 + * the net layers still may.
19118 ++ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
19119 ++ * held and that should be grabbed before socket lock.
19120 + */
19121 +- local_bh_disable();
19122 +- bh_lock_sock(sk);
19123 ++ spin_lock_bh(&net->sctp.addr_wq_lock);
19124 ++ bh_lock_sock_nested(sk);
19125 +
19126 + /* Hold the sock, since sk_common_release() will put sock_put()
19127 + * and we have just a little more cleanup.
19128 +@@ -1531,7 +1547,7 @@ static void sctp_close(struct sock *sk, long timeout)
19129 + sk_common_release(sk);
19130 +
19131 + bh_unlock_sock(sk);
19132 +- local_bh_enable();
19133 ++ spin_unlock_bh(&net->sctp.addr_wq_lock);
19134 +
19135 + sock_put(sk);
19136 +
19137 +@@ -4937,16 +4953,6 @@ static int sctp_init_sock(struct sock *sk)
19138 + sk_sockets_allocated_inc(sk);
19139 + sock_prot_inuse_add(net, sk->sk_prot, 1);
19140 +
19141 +- if (net->sctp.default_auto_asconf) {
19142 +- spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
19143 +- list_add_tail(&sp->auto_asconf_list,
19144 +- &net->sctp.auto_asconf_splist);
19145 +- sp->do_auto_asconf = 1;
19146 +- spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
19147 +- } else {
19148 +- sp->do_auto_asconf = 0;
19149 +- }
19150 +-
19151 + local_bh_enable();
19152 +
19153 + return 0;
19154 +@@ -4971,9 +4977,7 @@ static void sctp_destroy_sock(struct sock *sk)
19155 +
19156 + if (sp->do_auto_asconf) {
19157 + sp->do_auto_asconf = 0;
19158 +- spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
19159 + list_del(&sp->auto_asconf_list);
19160 +- spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
19161 + }
19162 + sctp_endpoint_free(sp->ep);
19163 + local_bh_disable();
19164 +@@ -9282,6 +9286,8 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
19165 + return err;
19166 + }
19167 +
19168 ++ sctp_auto_asconf_init(newsp);
19169 ++
19170 + /* Move any messages in the old socket's receive queue that are for the
19171 + * peeled off association to the new socket's receive queue.
19172 + */
19173 +diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
19174 +index 86eb6d679225c..2301b66280def 100644
19175 +--- a/net/tipc/crypto.c
19176 ++++ b/net/tipc/crypto.c
19177 +@@ -1485,6 +1485,8 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
19178 + /* Allocate statistic structure */
19179 + c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
19180 + if (!c->stats) {
19181 ++ if (c->wq)
19182 ++ destroy_workqueue(c->wq);
19183 + kfree_sensitive(c);
19184 + return -ENOMEM;
19185 + }
19186 +diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
19187 +index e4370b1b74947..902cb6dd710bd 100644
19188 +--- a/net/vmw_vsock/virtio_transport_common.c
19189 ++++ b/net/vmw_vsock/virtio_transport_common.c
19190 +@@ -733,6 +733,23 @@ static int virtio_transport_reset_no_sock(const struct virtio_transport *t,
19191 + return t->send_pkt(reply);
19192 + }
19193 +
19194 ++/* This function should be called with sk_lock held and SOCK_DONE set */
19195 ++static void virtio_transport_remove_sock(struct vsock_sock *vsk)
19196 ++{
19197 ++ struct virtio_vsock_sock *vvs = vsk->trans;
19198 ++ struct virtio_vsock_pkt *pkt, *tmp;
19199 ++
19200 ++ /* We don't need to take rx_lock, as the socket is closing and we are
19201 ++ * removing it.
19202 ++ */
19203 ++ list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
19204 ++ list_del(&pkt->list);
19205 ++ virtio_transport_free_pkt(pkt);
19206 ++ }
19207 ++
19208 ++ vsock_remove_sock(vsk);
19209 ++}
19210 ++
19211 + static void virtio_transport_wait_close(struct sock *sk, long timeout)
19212 + {
19213 + if (timeout) {
19214 +@@ -765,7 +782,7 @@ static void virtio_transport_do_close(struct vsock_sock *vsk,
19215 + (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
19216 + vsk->close_work_scheduled = false;
19217 +
19218 +- vsock_remove_sock(vsk);
19219 ++ virtio_transport_remove_sock(vsk);
19220 +
19221 + /* Release refcnt obtained when we scheduled the timeout */
19222 + sock_put(sk);
19223 +@@ -828,22 +845,15 @@ static bool virtio_transport_close(struct vsock_sock *vsk)
19224 +
19225 + void virtio_transport_release(struct vsock_sock *vsk)
19226 + {
19227 +- struct virtio_vsock_sock *vvs = vsk->trans;
19228 +- struct virtio_vsock_pkt *pkt, *tmp;
19229 + struct sock *sk = &vsk->sk;
19230 + bool remove_sock = true;
19231 +
19232 + if (sk->sk_type == SOCK_STREAM)
19233 + remove_sock = virtio_transport_close(vsk);
19234 +
19235 +- list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) {
19236 +- list_del(&pkt->list);
19237 +- virtio_transport_free_pkt(pkt);
19238 +- }
19239 +-
19240 + if (remove_sock) {
19241 + sock_set_flag(sk, SOCK_DONE);
19242 +- vsock_remove_sock(vsk);
19243 ++ virtio_transport_remove_sock(vsk);
19244 + }
19245 + }
19246 + EXPORT_SYMBOL_GPL(virtio_transport_release);
19247 +diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
19248 +index 8b65323207db5..1c9ecb18b8e64 100644
19249 +--- a/net/vmw_vsock/vmci_transport.c
19250 ++++ b/net/vmw_vsock/vmci_transport.c
19251 +@@ -568,8 +568,7 @@ vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
19252 + peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
19253 + out:
19254 + if (err < 0) {
19255 +- pr_err("Could not attach to queue pair with %d\n",
19256 +- err);
19257 ++ pr_err_once("Could not attach to queue pair with %d\n", err);
19258 + err = vmci_transport_error_to_vsock_error(err);
19259 + }
19260 +
19261 +diff --git a/net/wireless/scan.c b/net/wireless/scan.c
19262 +index 345ef1c967685..87fc56bc4f1e7 100644
19263 +--- a/net/wireless/scan.c
19264 ++++ b/net/wireless/scan.c
19265 +@@ -1753,6 +1753,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
19266 +
19267 + if (rdev->bss_entries >= bss_entries_limit &&
19268 + !cfg80211_bss_expire_oldest(rdev)) {
19269 ++ if (!list_empty(&new->hidden_list))
19270 ++ list_del(&new->hidden_list);
19271 + kfree(new);
19272 + goto drop;
19273 + }
19274 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
19275 +index 52fd1f96b241e..ca4716b92774b 100644
19276 +--- a/net/xdp/xsk.c
19277 ++++ b/net/xdp/xsk.c
19278 +@@ -380,12 +380,16 @@ static int xsk_generic_xmit(struct sock *sk)
19279 + struct sk_buff *skb;
19280 + unsigned long flags;
19281 + int err = 0;
19282 ++ u32 hr, tr;
19283 +
19284 + mutex_lock(&xs->mutex);
19285 +
19286 + if (xs->queue_id >= xs->dev->real_num_tx_queues)
19287 + goto out;
19288 +
19289 ++ hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
19290 ++ tr = xs->dev->needed_tailroom;
19291 ++
19292 + while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
19293 + char *buffer;
19294 + u64 addr;
19295 +@@ -397,11 +401,13 @@ static int xsk_generic_xmit(struct sock *sk)
19296 + }
19297 +
19298 + len = desc.len;
19299 +- skb = sock_alloc_send_skb(sk, len, 1, &err);
19300 ++ skb = sock_alloc_send_skb(sk, hr + len + tr, 1, &err);
19301 + if (unlikely(!skb))
19302 + goto out;
19303 +
19304 ++ skb_reserve(skb, hr);
19305 + skb_put(skb, len);
19306 ++
19307 + addr = desc.addr;
19308 + buffer = xsk_buff_raw_get_data(xs->pool, addr);
19309 + err = skb_store_bits(skb, 0, buffer, len);
19310 +diff --git a/samples/kfifo/bytestream-example.c b/samples/kfifo/bytestream-example.c
19311 +index c406f03ee5519..5a90aa5278775 100644
19312 +--- a/samples/kfifo/bytestream-example.c
19313 ++++ b/samples/kfifo/bytestream-example.c
19314 +@@ -122,8 +122,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
19315 + ret = kfifo_from_user(&test, buf, count, &copied);
19316 +
19317 + mutex_unlock(&write_lock);
19318 ++ if (ret)
19319 ++ return ret;
19320 +
19321 +- return ret ? ret : copied;
19322 ++ return copied;
19323 + }
19324 +
19325 + static ssize_t fifo_read(struct file *file, char __user *buf,
19326 +@@ -138,8 +140,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
19327 + ret = kfifo_to_user(&test, buf, count, &copied);
19328 +
19329 + mutex_unlock(&read_lock);
19330 ++ if (ret)
19331 ++ return ret;
19332 +
19333 +- return ret ? ret : copied;
19334 ++ return copied;
19335 + }
19336 +
19337 + static const struct proc_ops fifo_proc_ops = {
19338 +diff --git a/samples/kfifo/inttype-example.c b/samples/kfifo/inttype-example.c
19339 +index 78977fc4a23f7..e5403d8c971a5 100644
19340 +--- a/samples/kfifo/inttype-example.c
19341 ++++ b/samples/kfifo/inttype-example.c
19342 +@@ -115,8 +115,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
19343 + ret = kfifo_from_user(&test, buf, count, &copied);
19344 +
19345 + mutex_unlock(&write_lock);
19346 ++ if (ret)
19347 ++ return ret;
19348 +
19349 +- return ret ? ret : copied;
19350 ++ return copied;
19351 + }
19352 +
19353 + static ssize_t fifo_read(struct file *file, char __user *buf,
19354 +@@ -131,8 +133,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
19355 + ret = kfifo_to_user(&test, buf, count, &copied);
19356 +
19357 + mutex_unlock(&read_lock);
19358 ++ if (ret)
19359 ++ return ret;
19360 +
19361 +- return ret ? ret : copied;
19362 ++ return copied;
19363 + }
19364 +
19365 + static const struct proc_ops fifo_proc_ops = {
19366 +diff --git a/samples/kfifo/record-example.c b/samples/kfifo/record-example.c
19367 +index c507998a2617c..f64f3d62d6c2a 100644
19368 +--- a/samples/kfifo/record-example.c
19369 ++++ b/samples/kfifo/record-example.c
19370 +@@ -129,8 +129,10 @@ static ssize_t fifo_write(struct file *file, const char __user *buf,
19371 + ret = kfifo_from_user(&test, buf, count, &copied);
19372 +
19373 + mutex_unlock(&write_lock);
19374 ++ if (ret)
19375 ++ return ret;
19376 +
19377 +- return ret ? ret : copied;
19378 ++ return copied;
19379 + }
19380 +
19381 + static ssize_t fifo_read(struct file *file, char __user *buf,
19382 +@@ -145,8 +147,10 @@ static ssize_t fifo_read(struct file *file, char __user *buf,
19383 + ret = kfifo_to_user(&test, buf, count, &copied);
19384 +
19385 + mutex_unlock(&read_lock);
19386 ++ if (ret)
19387 ++ return ret;
19388 +
19389 +- return ret ? ret : copied;
19390 ++ return copied;
19391 + }
19392 +
19393 + static const struct proc_ops fifo_proc_ops = {
19394 +diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
19395 +index 1e89e2d3851f9..f83255a39e653 100644
19396 +--- a/security/integrity/ima/ima_template.c
19397 ++++ b/security/integrity/ima/ima_template.c
19398 +@@ -468,8 +468,8 @@ int ima_restore_measurement_list(loff_t size, void *buf)
19399 + }
19400 + }
19401 +
19402 +- entry->pcr = !ima_canonical_fmt ? *(hdr[HDR_PCR].data) :
19403 +- le32_to_cpu(*(hdr[HDR_PCR].data));
19404 ++ entry->pcr = !ima_canonical_fmt ? *(u32 *)(hdr[HDR_PCR].data) :
19405 ++ le32_to_cpu(*(u32 *)(hdr[HDR_PCR].data));
19406 + ret = ima_restore_measurement_entry(entry);
19407 + if (ret < 0)
19408 + break;
19409 +diff --git a/security/keys/trusted-keys/trusted_tpm1.c b/security/keys/trusted-keys/trusted_tpm1.c
19410 +index 7a937c3c52834..230c0b27b77d1 100644
19411 +--- a/security/keys/trusted-keys/trusted_tpm1.c
19412 ++++ b/security/keys/trusted-keys/trusted_tpm1.c
19413 +@@ -791,13 +791,33 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
19414 + return -EINVAL;
19415 + break;
19416 + case Opt_blobauth:
19417 +- if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
19418 +- return -EINVAL;
19419 +- res = hex2bin(opt->blobauth, args[0].from,
19420 +- SHA1_DIGEST_SIZE);
19421 +- if (res < 0)
19422 +- return -EINVAL;
19423 ++ /*
19424 ++ * TPM 1.2 authorizations are sha1 hashes passed in as
19425 ++ * hex strings. TPM 2.0 authorizations are simple
19426 ++ * passwords (although it can take a hash as well)
19427 ++ */
19428 ++ opt->blobauth_len = strlen(args[0].from);
19429 ++
19430 ++ if (opt->blobauth_len == 2 * TPM_DIGEST_SIZE) {
19431 ++ res = hex2bin(opt->blobauth, args[0].from,
19432 ++ TPM_DIGEST_SIZE);
19433 ++ if (res < 0)
19434 ++ return -EINVAL;
19435 ++
19436 ++ opt->blobauth_len = TPM_DIGEST_SIZE;
19437 ++ break;
19438 ++ }
19439 ++
19440 ++ if (tpm2 && opt->blobauth_len <= sizeof(opt->blobauth)) {
19441 ++ memcpy(opt->blobauth, args[0].from,
19442 ++ opt->blobauth_len);
19443 ++ break;
19444 ++ }
19445 ++
19446 ++ return -EINVAL;
19447 ++
19448 + break;
19449 ++
19450 + case Opt_migratable:
19451 + if (*args[0].from == '0')
19452 + pay->migratable = 0;
19453 +diff --git a/security/keys/trusted-keys/trusted_tpm2.c b/security/keys/trusted-keys/trusted_tpm2.c
19454 +index c87c4df8703d4..4c19d3abddbee 100644
19455 +--- a/security/keys/trusted-keys/trusted_tpm2.c
19456 ++++ b/security/keys/trusted-keys/trusted_tpm2.c
19457 +@@ -97,10 +97,12 @@ int tpm2_seal_trusted(struct tpm_chip *chip,
19458 + TPM_DIGEST_SIZE);
19459 +
19460 + /* sensitive */
19461 +- tpm_buf_append_u16(&buf, 4 + TPM_DIGEST_SIZE + payload->key_len + 1);
19462 ++ tpm_buf_append_u16(&buf, 4 + options->blobauth_len + payload->key_len + 1);
19463 ++
19464 ++ tpm_buf_append_u16(&buf, options->blobauth_len);
19465 ++ if (options->blobauth_len)
19466 ++ tpm_buf_append(&buf, options->blobauth, options->blobauth_len);
19467 +
19468 +- tpm_buf_append_u16(&buf, TPM_DIGEST_SIZE);
19469 +- tpm_buf_append(&buf, options->blobauth, TPM_DIGEST_SIZE);
19470 + tpm_buf_append_u16(&buf, payload->key_len + 1);
19471 + tpm_buf_append(&buf, payload->key, payload->key_len);
19472 + tpm_buf_append_u8(&buf, payload->migratable);
19473 +@@ -265,7 +267,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip,
19474 + NULL /* nonce */, 0,
19475 + TPM2_SA_CONTINUE_SESSION,
19476 + options->blobauth /* hmac */,
19477 +- TPM_DIGEST_SIZE);
19478 ++ options->blobauth_len);
19479 +
19480 + rc = tpm_transmit_cmd(chip, &buf, 6, "unsealing");
19481 + if (rc > 0)
19482 +diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
19483 +index 40cebde62856a..b9fdba2ff4163 100644
19484 +--- a/security/selinux/include/classmap.h
19485 ++++ b/security/selinux/include/classmap.h
19486 +@@ -242,11 +242,12 @@ struct security_class_mapping secclass_map[] = {
19487 + { "infiniband_endport",
19488 + { "manage_subnet", NULL } },
19489 + { "bpf",
19490 +- {"map_create", "map_read", "map_write", "prog_load", "prog_run"} },
19491 ++ { "map_create", "map_read", "map_write", "prog_load", "prog_run",
19492 ++ NULL } },
19493 + { "xdp_socket",
19494 + { COMMON_SOCK_PERMS, NULL } },
19495 + { "perf_event",
19496 +- {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
19497 ++ { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
19498 + { "lockdown",
19499 + { "integrity", "confidentiality", NULL } },
19500 + { NULL }
19501 +diff --git a/sound/core/init.c b/sound/core/init.c
19502 +index 018ce4ef12ec8..9f5270c90a10a 100644
19503 +--- a/sound/core/init.c
19504 ++++ b/sound/core/init.c
19505 +@@ -390,10 +390,8 @@ int snd_card_disconnect(struct snd_card *card)
19506 + return 0;
19507 + }
19508 + card->shutdown = 1;
19509 +- spin_unlock(&card->files_lock);
19510 +
19511 + /* replace file->f_op with special dummy operations */
19512 +- spin_lock(&card->files_lock);
19513 + list_for_each_entry(mfile, &card->files_list, list) {
19514 + /* it's critical part, use endless loop */
19515 + /* we have no room to fail */
19516 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
19517 +index d05d16ddbdf2c..8ec57bd351dfe 100644
19518 +--- a/sound/pci/hda/patch_realtek.c
19519 ++++ b/sound/pci/hda/patch_realtek.c
19520 +@@ -2470,13 +2470,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
19521 + ALC882_FIXUP_ACER_ASPIRE_8930G),
19522 + SND_PCI_QUIRK(0x1025, 0x0146, "Acer Aspire 6935G",
19523 + ALC882_FIXUP_ACER_ASPIRE_8930G),
19524 ++ SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
19525 ++ ALC882_FIXUP_ACER_ASPIRE_4930G),
19526 ++ SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
19527 + SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
19528 + ALC882_FIXUP_ACER_ASPIRE_4930G),
19529 + SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
19530 + ALC882_FIXUP_ACER_ASPIRE_4930G),
19531 +- SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G",
19532 +- ALC882_FIXUP_ACER_ASPIRE_4930G),
19533 +- SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
19534 + SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G",
19535 + ALC882_FIXUP_ACER_ASPIRE_4930G),
19536 + SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
19537 +@@ -2489,11 +2489,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
19538 + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
19539 + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
19540 + SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
19541 ++ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
19542 ++ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
19543 + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
19544 + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
19545 + SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
19546 +- SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
19547 +- SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
19548 +
19549 + /* All Apple entries are in codec SSIDs */
19550 + SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
19551 +@@ -2536,9 +2536,19 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
19552 + SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
19553 + SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
19554 + SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
19555 ++ SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19556 ++ SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19557 ++ SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19558 ++ SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19559 ++ SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19560 ++ SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19561 ++ SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19562 ++ SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19563 ++ SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19564 ++ SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19565 + SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
19566 + SND_PCI_QUIRK(0x1558, 0x9506, "Clevo P955HQ", ALC1220_FIXUP_CLEVO_P950),
19567 +- SND_PCI_QUIRK(0x1558, 0x950A, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
19568 ++ SND_PCI_QUIRK(0x1558, 0x950a, "Clevo P955H[PR]", ALC1220_FIXUP_CLEVO_P950),
19569 + SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
19570 + SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
19571 + SND_PCI_QUIRK(0x1558, 0x95e3, "Clevo P955[ER]T", ALC1220_FIXUP_CLEVO_P950),
19572 +@@ -2548,16 +2558,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
19573 + SND_PCI_QUIRK(0x1558, 0x96e1, "Clevo P960[ER][CDFN]-K", ALC1220_FIXUP_CLEVO_P950),
19574 + SND_PCI_QUIRK(0x1558, 0x97e1, "Clevo P970[ER][CDFN]", ALC1220_FIXUP_CLEVO_P950),
19575 + SND_PCI_QUIRK(0x1558, 0x97e2, "Clevo P970RC-M", ALC1220_FIXUP_CLEVO_P950),
19576 +- SND_PCI_QUIRK(0x1558, 0x50d3, "Clevo PC50[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19577 +- SND_PCI_QUIRK(0x1558, 0x65d1, "Clevo PB51[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19578 +- SND_PCI_QUIRK(0x1558, 0x65d2, "Clevo PB51R[CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19579 +- SND_PCI_QUIRK(0x1558, 0x65e1, "Clevo PB51[ED][DF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19580 +- SND_PCI_QUIRK(0x1558, 0x65e5, "Clevo PC50D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19581 +- SND_PCI_QUIRK(0x1558, 0x67d1, "Clevo PB71[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19582 +- SND_PCI_QUIRK(0x1558, 0x67e1, "Clevo PB71[DE][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19583 +- SND_PCI_QUIRK(0x1558, 0x67e5, "Clevo PC70D[PRS](?:-D|-G)?", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19584 +- SND_PCI_QUIRK(0x1558, 0x70d1, "Clevo PC70[ER][CDF]", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19585 +- SND_PCI_QUIRK(0x1558, 0x7714, "Clevo X170", ALC1220_FIXUP_CLEVO_PB51ED_PINS),
19586 + SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
19587 + SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
19588 + SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
19589 +@@ -4331,6 +4331,35 @@ static void alc245_fixup_hp_x360_amp(struct hda_codec *codec,
19590 + }
19591 + }
19592 +
19593 ++/* toggle GPIO2 at each time stream is started; we use PREPARE state instead */
19594 ++static void alc274_hp_envy_pcm_hook(struct hda_pcm_stream *hinfo,
19595 ++ struct hda_codec *codec,
19596 ++ struct snd_pcm_substream *substream,
19597 ++ int action)
19598 ++{
19599 ++ switch (action) {
19600 ++ case HDA_GEN_PCM_ACT_PREPARE:
19601 ++ alc_update_gpio_data(codec, 0x04, true);
19602 ++ break;
19603 ++ case HDA_GEN_PCM_ACT_CLEANUP:
19604 ++ alc_update_gpio_data(codec, 0x04, false);
19605 ++ break;
19606 ++ }
19607 ++}
19608 ++
19609 ++static void alc274_fixup_hp_envy_gpio(struct hda_codec *codec,
19610 ++ const struct hda_fixup *fix,
19611 ++ int action)
19612 ++{
19613 ++ struct alc_spec *spec = codec->spec;
19614 ++
19615 ++ if (action == HDA_FIXUP_ACT_PROBE) {
19616 ++ spec->gpio_mask |= 0x04;
19617 ++ spec->gpio_dir |= 0x04;
19618 ++ spec->gen.pcm_playback_hook = alc274_hp_envy_pcm_hook;
19619 ++ }
19620 ++}
19621 ++
19622 + static void alc_update_coef_led(struct hda_codec *codec,
19623 + struct alc_coef_led *led,
19624 + bool polarity, bool on)
19625 +@@ -6443,6 +6472,7 @@ enum {
19626 + ALC255_FIXUP_XIAOMI_HEADSET_MIC,
19627 + ALC274_FIXUP_HP_MIC,
19628 + ALC274_FIXUP_HP_HEADSET_MIC,
19629 ++ ALC274_FIXUP_HP_ENVY_GPIO,
19630 + ALC256_FIXUP_ASUS_HPE,
19631 + ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
19632 + ALC287_FIXUP_HP_GPIO_LED,
19633 +@@ -7882,6 +7912,10 @@ static const struct hda_fixup alc269_fixups[] = {
19634 + .chained = true,
19635 + .chain_id = ALC274_FIXUP_HP_MIC
19636 + },
19637 ++ [ALC274_FIXUP_HP_ENVY_GPIO] = {
19638 ++ .type = HDA_FIXUP_FUNC,
19639 ++ .v.func = alc274_fixup_hp_envy_gpio,
19640 ++ },
19641 + [ALC256_FIXUP_ASUS_HPE] = {
19642 + .type = HDA_FIXUP_VERBS,
19643 + .v.verbs = (const struct hda_verb[]) {
19644 +@@ -7947,12 +7981,12 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19645 + SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
19646 + SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
19647 + SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
19648 +- SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
19649 + SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
19650 + SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
19651 + SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
19652 + SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
19653 + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
19654 ++ SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC),
19655 + SND_PCI_QUIRK(0x1025, 0x0840, "Acer Aspire E1", ALC269VB_FIXUP_ASPIRE_E1_COEF),
19656 + SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
19657 + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
19658 +@@ -8008,8 +8042,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19659 + SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
19660 + SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
19661 + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
19662 +- SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
19663 + SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
19664 ++ SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
19665 + SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
19666 + SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
19667 + SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
19668 +@@ -8019,8 +8053,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19669 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
19670 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
19671 + SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
19672 +- SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
19673 + SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
19674 ++ SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
19675 + SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
19676 + SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
19677 + SND_PCI_QUIRK(0x1028, 0x0a2e, "Dell", ALC236_FIXUP_DELL_AIO_HEADSET_MIC),
19678 +@@ -8031,35 +8065,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19679 + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
19680 + SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
19681 + SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
19682 +- SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
19683 +- /* ALC282 */
19684 + SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19685 + SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19686 + SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19687 ++ SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19688 ++ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
19689 ++ SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19690 ++ SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19691 + SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
19692 + SND_PCI_QUIRK(0x103c, 0x2237, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
19693 + SND_PCI_QUIRK(0x103c, 0x2238, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
19694 + SND_PCI_QUIRK(0x103c, 0x2239, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
19695 + SND_PCI_QUIRK(0x103c, 0x224b, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
19696 +- SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19697 +- SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19698 +- SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19699 +- SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19700 +- SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
19701 +- SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
19702 +- SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
19703 +- SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19704 +- SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19705 +- SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19706 +- SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19707 +- SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19708 +- SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
19709 +- SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19710 +- SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19711 +- /* ALC290 */
19712 +- SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19713 +- SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19714 +- SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19715 + SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19716 + SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19717 + SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19718 +@@ -8067,26 +8084,41 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19719 + SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19720 + SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19721 + SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED),
19722 ++ SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
19723 + SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19724 + SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19725 + SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19726 + SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19727 ++ SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19728 ++ SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19729 ++ SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19730 ++ SND_PCI_QUIRK(0x103c, 0x226e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19731 ++ SND_PCI_QUIRK(0x103c, 0x2271, "HP", ALC286_FIXUP_HP_GPIO_LED),
19732 + SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19733 ++ SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC280_FIXUP_HP_DOCK_PINS),
19734 + SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19735 ++ SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC280_FIXUP_HP_DOCK_PINS),
19736 + SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19737 + SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19738 + SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19739 + SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19740 + SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19741 ++ SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19742 ++ SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19743 ++ SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19744 ++ SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19745 ++ SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19746 + SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19747 + SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19748 + SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19749 +- SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19750 ++ SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19751 ++ SND_PCI_QUIRK(0x103c, 0x22db, "HP", ALC280_FIXUP_HP_9480M),
19752 ++ SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19753 ++ SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
19754 + SND_PCI_QUIRK(0x103c, 0x2334, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19755 + SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19756 + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19757 + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
19758 +- SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
19759 + SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
19760 + SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
19761 + SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC),
19762 +@@ -8101,6 +8133,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19763 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
19764 + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
19765 + SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
19766 ++ SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
19767 + SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
19768 + SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED),
19769 + SND_PCI_QUIRK(0x103c, 0x8730, "HP ProBook 445 G7", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
19770 +@@ -8128,16 +8161,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19771 + SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
19772 + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
19773 + SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
19774 ++ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
19775 + SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
19776 + SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
19777 + SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
19778 +- SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
19779 + SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
19780 ++ SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
19781 + SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
19782 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
19783 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
19784 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
19785 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
19786 ++ SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
19787 + SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
19788 + SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
19789 + SND_PCI_QUIRK(0x1043, 0x194e, "ASUS UX563FD", ALC294_FIXUP_ASUS_HPE),
19790 +@@ -8150,32 +8185,31 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19791 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
19792 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
19793 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
19794 +- SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
19795 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
19796 + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
19797 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
19798 + SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401),
19799 + SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
19800 +- SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
19801 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
19802 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
19803 + SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC),
19804 + SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
19805 + SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
19806 + SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
19807 +- SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
19808 +- SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
19809 + SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
19810 + SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
19811 + SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
19812 + SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
19813 ++ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
19814 ++ SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
19815 + SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
19816 + SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT),
19817 + SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
19818 +- SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
19819 + SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
19820 ++ SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
19821 + SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
19822 + SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
19823 ++ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
19824 + SND_PCI_QUIRK(0x10ec, 0x1230, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
19825 + SND_PCI_QUIRK(0x10ec, 0x1252, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
19826 + SND_PCI_QUIRK(0x10ec, 0x1254, "Intel Reference board", ALC295_FIXUP_CHROME_BOOK),
19827 +@@ -8185,9 +8219,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19828 + SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
19829 + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
19830 + SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NP930XCJ-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
19831 +- SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
19832 + SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
19833 + SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
19834 ++ SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
19835 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
19836 + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
19837 + SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
19838 +@@ -8243,9 +8277,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19839 + SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
19840 + SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
19841 + SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
19842 ++ SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
19843 + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK_LIMIT_BOOST),
19844 + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
19845 +- SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
19846 + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
19847 + SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
19848 + SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
19849 +@@ -8289,6 +8323,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19850 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
19851 + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
19852 + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
19853 ++ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
19854 + SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
19855 + SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
19856 + SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
19857 +@@ -8307,20 +8342,18 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
19858 + SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
19859 + SND_PCI_QUIRK(0x17aa, 0x511e, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
19860 + SND_PCI_QUIRK(0x17aa, 0x511f, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
19861 +- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
19862 + SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
19863 + SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
19864 + SND_PCI_QUIRK(0x1b35, 0x1235, "CZC B20", ALC269_FIXUP_CZC_B20),
19865 + SND_PCI_QUIRK(0x1b35, 0x1236, "CZC TMI", ALC269_FIXUP_CZC_TMI),
19866 + SND_PCI_QUIRK(0x1b35, 0x1237, "CZC L101", ALC269_FIXUP_CZC_L101),
19867 + SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
19868 ++ SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
19869 ++ SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
19870 + SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
19871 + SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE),
19872 + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
19873 + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC),
19874 +- SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
19875 +- SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
19876 +- SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
19877 + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC),
19878 + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
19879 + SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10),
19880 +@@ -8777,6 +8810,16 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
19881 + {0x19, 0x03a11020},
19882 + {0x21, 0x0321101f}),
19883 + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
19884 ++ {0x12, 0x90a60130},
19885 ++ {0x14, 0x90170110},
19886 ++ {0x19, 0x04a11040},
19887 ++ {0x21, 0x04211020}),
19888 ++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
19889 ++ {0x14, 0x90170110},
19890 ++ {0x19, 0x04a11040},
19891 ++ {0x1d, 0x40600001},
19892 ++ {0x21, 0x04211020}),
19893 ++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
19894 + {0x14, 0x90170110},
19895 + {0x19, 0x04a11040},
19896 + {0x21, 0x04211020}),
19897 +@@ -8947,10 +8990,6 @@ static const struct snd_hda_pin_quirk alc269_fallback_pin_fixup_tbl[] = {
19898 + SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
19899 + {0x19, 0x40000000},
19900 + {0x1a, 0x40000000}),
19901 +- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
19902 +- {0x14, 0x90170110},
19903 +- {0x19, 0x04a11040},
19904 +- {0x21, 0x04211020}),
19905 + {}
19906 + };
19907 +
19908 +@@ -9266,8 +9305,7 @@ static const struct snd_pci_quirk alc861_fixup_tbl[] = {
19909 + SND_PCI_QUIRK(0x1043, 0x1393, "ASUS A6Rp", ALC861_FIXUP_ASUS_A6RP),
19910 + SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", ALC861_FIXUP_AMP_VREF_0F),
19911 + SND_PCI_QUIRK(0x1462, 0x7254, "HP DX2200", ALC861_FIXUP_NO_JACK_DETECT),
19912 +- SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", ALC861_FIXUP_AMP_VREF_0F),
19913 +- SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", ALC861_FIXUP_AMP_VREF_0F),
19914 ++ SND_PCI_QUIRK_VENDOR(0x1584, "Haier/Uniwill", ALC861_FIXUP_AMP_VREF_0F),
19915 + SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", ALC861_FIXUP_FSC_AMILO_PI1505),
19916 + {}
19917 + };
19918 +@@ -10062,6 +10100,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
19919 + SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
19920 + SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
19921 + SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
19922 ++ SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
19923 + SND_PCI_QUIRK(0x1025, 0x123c, "Acer Nitro N50-600", ALC662_FIXUP_ACER_NITRO_HEADSET_MODE),
19924 + SND_PCI_QUIRK(0x1025, 0x124e, "Acer 2660G", ALC662_FIXUP_ACER_X2660G_HEADSET_MODE),
19925 + SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
19926 +@@ -10078,9 +10117,9 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
19927 + SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2),
19928 + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
19929 + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
19930 +- SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
19931 + SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
19932 + SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
19933 ++ SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
19934 + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
19935 + SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
19936 + SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
19937 +@@ -10100,7 +10139,6 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
19938 + SND_PCI_QUIRK(0x1b0a, 0x01b8, "ACER Veriton", ALC662_FIXUP_ACER_VERITON),
19939 + SND_PCI_QUIRK(0x1b35, 0x1234, "CZC ET26", ALC662_FIXUP_CZC_ET26),
19940 + SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
19941 +- SND_PCI_QUIRK(0x1025, 0x0566, "Acer Aspire Ethos 8951G", ALC669_FIXUP_ACER_ASPIRE_ETHOS),
19942 +
19943 + #if 0
19944 + /* Below is a quirk table taken from the old code.
19945 +diff --git a/sound/soc/codecs/ak5558.c b/sound/soc/codecs/ak5558.c
19946 +index 65a248c92f669..adbdfdbc7a38b 100644
19947 +--- a/sound/soc/codecs/ak5558.c
19948 ++++ b/sound/soc/codecs/ak5558.c
19949 +@@ -272,7 +272,7 @@ static void ak5558_power_off(struct ak5558_priv *ak5558)
19950 + if (!ak5558->reset_gpiod)
19951 + return;
19952 +
19953 +- gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
19954 ++ gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
19955 + usleep_range(1000, 2000);
19956 + }
19957 +
19958 +@@ -281,7 +281,7 @@ static void ak5558_power_on(struct ak5558_priv *ak5558)
19959 + if (!ak5558->reset_gpiod)
19960 + return;
19961 +
19962 +- gpiod_set_value_cansleep(ak5558->reset_gpiod, 1);
19963 ++ gpiod_set_value_cansleep(ak5558->reset_gpiod, 0);
19964 + usleep_range(1000, 2000);
19965 + }
19966 +
19967 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
19968 +index 9e3de9ded0efb..b8950758471fa 100644
19969 +--- a/sound/soc/codecs/tlv320aic32x4.c
19970 ++++ b/sound/soc/codecs/tlv320aic32x4.c
19971 +@@ -577,12 +577,12 @@ static const struct regmap_range_cfg aic32x4_regmap_pages[] = {
19972 + .window_start = 0,
19973 + .window_len = 128,
19974 + .range_min = 0,
19975 +- .range_max = AIC32X4_RMICPGAVOL,
19976 ++ .range_max = AIC32X4_REFPOWERUP,
19977 + },
19978 + };
19979 +
19980 + const struct regmap_config aic32x4_regmap_config = {
19981 +- .max_register = AIC32X4_RMICPGAVOL,
19982 ++ .max_register = AIC32X4_REFPOWERUP,
19983 + .ranges = aic32x4_regmap_pages,
19984 + .num_ranges = ARRAY_SIZE(aic32x4_regmap_pages),
19985 + };
19986 +@@ -1243,6 +1243,10 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
19987 + if (ret)
19988 + goto err_disable_regulators;
19989 +
19990 ++ ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
19991 ++ if (ret)
19992 ++ goto err_disable_regulators;
19993 ++
19994 + ret = devm_snd_soc_register_component(dev,
19995 + &soc_component_dev_aic32x4, &aic32x4_dai, 1);
19996 + if (ret) {
19997 +@@ -1250,10 +1254,6 @@ int aic32x4_probe(struct device *dev, struct regmap *regmap)
19998 + goto err_disable_regulators;
19999 + }
20000 +
20001 +- ret = aic32x4_register_clocks(dev, aic32x4->mclk_name);
20002 +- if (ret)
20003 +- goto err_disable_regulators;
20004 +-
20005 + return 0;
20006 +
20007 + err_disable_regulators:
20008 +diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
20009 +index ceaf3bbb18e66..9d325555e2191 100644
20010 +--- a/sound/soc/codecs/wm8960.c
20011 ++++ b/sound/soc/codecs/wm8960.c
20012 +@@ -608,10 +608,6 @@ static const int bclk_divs[] = {
20013 + * - lrclk = sysclk / dac_divs
20014 + * - 10 * bclk = sysclk / bclk_divs
20015 + *
20016 +- * If we cannot find an exact match for (sysclk, lrclk, bclk)
20017 +- * triplet, we relax the bclk such that bclk is chosen as the
20018 +- * closest available frequency greater than expected bclk.
20019 +- *
20020 + * @wm8960: codec private data
20021 + * @mclk: MCLK used to derive sysclk
20022 + * @sysclk_idx: sysclk_divs index for found sysclk
20023 +@@ -629,7 +625,7 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
20024 + {
20025 + int sysclk, bclk, lrclk;
20026 + int i, j, k;
20027 +- int diff, closest = mclk;
20028 ++ int diff;
20029 +
20030 + /* marker for no match */
20031 + *bclk_idx = -1;
20032 +@@ -653,12 +649,6 @@ int wm8960_configure_sysclk(struct wm8960_priv *wm8960, int mclk,
20033 + *bclk_idx = k;
20034 + break;
20035 + }
20036 +- if (diff > 0 && closest > diff) {
20037 +- *sysclk_idx = i;
20038 +- *dac_idx = j;
20039 +- *bclk_idx = k;
20040 +- closest = diff;
20041 +- }
20042 + }
20043 + if (k != ARRAY_SIZE(bclk_divs))
20044 + break;
20045 +diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
20046 +index 97b4f5480a31c..0c640308ed80b 100644
20047 +--- a/sound/soc/generic/audio-graph-card.c
20048 ++++ b/sound/soc/generic/audio-graph-card.c
20049 +@@ -340,7 +340,7 @@ static int graph_dai_link_of(struct asoc_simple_priv *priv,
20050 + struct device_node *top = dev->of_node;
20051 + struct asoc_simple_dai *cpu_dai;
20052 + struct asoc_simple_dai *codec_dai;
20053 +- int ret, single_cpu;
20054 ++ int ret, single_cpu = 0;
20055 +
20056 + /* Do it only CPU turn */
20057 + if (!li->cpu)
20058 +diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
20059 +index 75365c7bb3930..d916ec69c24ff 100644
20060 +--- a/sound/soc/generic/simple-card.c
20061 ++++ b/sound/soc/generic/simple-card.c
20062 +@@ -258,7 +258,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv,
20063 + struct device_node *plat = NULL;
20064 + char prop[128];
20065 + char *prefix = "";
20066 +- int ret, single_cpu;
20067 ++ int ret, single_cpu = 0;
20068 +
20069 + /*
20070 + * |CPU |Codec : turn
20071 +diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
20072 +index 4e0248d2accc7..7c5038803be73 100644
20073 +--- a/sound/soc/intel/Makefile
20074 ++++ b/sound/soc/intel/Makefile
20075 +@@ -5,7 +5,7 @@ obj-$(CONFIG_SND_SOC) += common/
20076 + # Platform Support
20077 + obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += atom/
20078 + obj-$(CONFIG_SND_SOC_INTEL_CATPT) += catpt/
20079 +-obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += skylake/
20080 ++obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += skylake/
20081 + obj-$(CONFIG_SND_SOC_INTEL_KEEMBAY) += keembay/
20082 +
20083 + # Machine support
20084 +diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
20085 +index cc9a2509ace29..e0149cf6127d0 100644
20086 +--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
20087 ++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
20088 +@@ -282,11 +282,33 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
20089 + struct snd_interval *chan = hw_param_interval(params,
20090 + SNDRV_PCM_HW_PARAM_CHANNELS);
20091 + struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
20092 +- struct snd_soc_dpcm *dpcm = container_of(
20093 +- params, struct snd_soc_dpcm, hw_params);
20094 +- struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
20095 +- struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
20096 ++ struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL;
20097 +
20098 ++ /*
20099 ++ * The following loop will be called only for playback stream
20100 ++ * In this platform, there is only one playback device on every SSP
20101 ++ */
20102 ++ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
20103 ++ rtd_dpcm = dpcm;
20104 ++ break;
20105 ++ }
20106 ++
20107 ++ /*
20108 ++ * This following loop will be called only for capture stream
20109 ++ * In this platform, there is only one capture device on every SSP
20110 ++ */
20111 ++ for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) {
20112 ++ rtd_dpcm = dpcm;
20113 ++ break;
20114 ++ }
20115 ++
20116 ++ if (!rtd_dpcm)
20117 ++ return -EINVAL;
20118 ++
20119 ++ /*
20120 ++ * The above 2 loops are mutually exclusive based on the stream direction,
20121 ++ * thus rtd_dpcm variable will never be overwritten
20122 ++ */
20123 + /*
20124 + * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
20125 + * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
20126 +@@ -309,9 +331,9 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
20127 + /*
20128 + * The ADSP will convert the FE rate to 48k, stereo, 24 bit
20129 + */
20130 +- if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
20131 +- !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
20132 +- !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
20133 ++ if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") ||
20134 ++ !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") ||
20135 ++ !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) {
20136 + rate->min = rate->max = 48000;
20137 + chan->min = chan->max = 2;
20138 + snd_mask_none(fmt);
20139 +@@ -322,7 +344,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
20140 + * The speaker on the SSP0 supports S16_LE and not S24_LE.
20141 + * thus changing the mask here
20142 + */
20143 +- if (!strcmp(be_dai_link->name, "SSP0-Codec"))
20144 ++ if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec"))
20145 + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE);
20146 +
20147 + return 0;
20148 +diff --git a/sound/soc/intel/boards/sof_wm8804.c b/sound/soc/intel/boards/sof_wm8804.c
20149 +index a46ba13e8eb0c..6a181e45143d7 100644
20150 +--- a/sound/soc/intel/boards/sof_wm8804.c
20151 ++++ b/sound/soc/intel/boards/sof_wm8804.c
20152 +@@ -124,7 +124,11 @@ static int sof_wm8804_hw_params(struct snd_pcm_substream *substream,
20153 + }
20154 +
20155 + snd_soc_dai_set_clkdiv(codec_dai, WM8804_MCLK_DIV, mclk_div);
20156 +- snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
20157 ++ ret = snd_soc_dai_set_pll(codec_dai, 0, 0, sysclk, mclk_freq);
20158 ++ if (ret < 0) {
20159 ++ dev_err(rtd->card->dev, "Failed to set WM8804 PLL\n");
20160 ++ return ret;
20161 ++ }
20162 +
20163 + ret = snd_soc_dai_set_sysclk(codec_dai, WM8804_TX_CLKSRC_PLL,
20164 + sysclk, SND_SOC_CLOCK_OUT);
20165 +diff --git a/sound/soc/intel/skylake/Makefile b/sound/soc/intel/skylake/Makefile
20166 +index dd39149b89b1d..1c4649bccec5a 100644
20167 +--- a/sound/soc/intel/skylake/Makefile
20168 ++++ b/sound/soc/intel/skylake/Makefile
20169 +@@ -7,7 +7,7 @@ ifdef CONFIG_DEBUG_FS
20170 + snd-soc-skl-objs += skl-debug.o
20171 + endif
20172 +
20173 +-obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE) += snd-soc-skl.o
20174 ++obj-$(CONFIG_SND_SOC_INTEL_SKYLAKE_COMMON) += snd-soc-skl.o
20175 +
20176 + #Skylake Clock device support
20177 + snd-soc-skl-ssp-clk-objs := skl-ssp-clk.o
20178 +diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
20179 +index 9300fef9bf269..125e07f65d2b5 100644
20180 +--- a/sound/soc/samsung/tm2_wm5110.c
20181 ++++ b/sound/soc/samsung/tm2_wm5110.c
20182 +@@ -553,7 +553,7 @@ static int tm2_probe(struct platform_device *pdev)
20183 +
20184 + ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller",
20185 + cells_name, i, &args);
20186 +- if (!args.np) {
20187 ++ if (ret) {
20188 + dev_err(dev, "i2s-controller property parse error: %d\n", i);
20189 + ret = -EINVAL;
20190 + goto dai_node_put;
20191 +diff --git a/sound/usb/card.c b/sound/usb/card.c
20192 +index fc7c359ae215a..258b81b399177 100644
20193 +--- a/sound/usb/card.c
20194 ++++ b/sound/usb/card.c
20195 +@@ -182,9 +182,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
20196 + ctrlif, interface);
20197 + return -EINVAL;
20198 + }
20199 +- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
20200 +-
20201 +- return 0;
20202 ++ return usb_driver_claim_interface(&usb_audio_driver, iface,
20203 ++ USB_AUDIO_IFACE_UNUSED);
20204 + }
20205 +
20206 + if ((altsd->bInterfaceClass != USB_CLASS_AUDIO &&
20207 +@@ -204,7 +203,8 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
20208 +
20209 + if (! snd_usb_parse_audio_interface(chip, interface)) {
20210 + usb_set_interface(dev, interface, 0); /* reset the current interface */
20211 +- usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L);
20212 ++ return usb_driver_claim_interface(&usb_audio_driver, iface,
20213 ++ USB_AUDIO_IFACE_UNUSED);
20214 + }
20215 +
20216 + return 0;
20217 +@@ -864,7 +864,7 @@ static void usb_audio_disconnect(struct usb_interface *intf)
20218 + struct snd_card *card;
20219 + struct list_head *p;
20220 +
20221 +- if (chip == (void *)-1L)
20222 ++ if (chip == USB_AUDIO_IFACE_UNUSED)
20223 + return;
20224 +
20225 + card = chip->card;
20226 +@@ -993,7 +993,7 @@ static int usb_audio_suspend(struct usb_interface *intf, pm_message_t message)
20227 + struct usb_mixer_interface *mixer;
20228 + struct list_head *p;
20229 +
20230 +- if (chip == (void *)-1L)
20231 ++ if (chip == USB_AUDIO_IFACE_UNUSED)
20232 + return 0;
20233 +
20234 + if (!chip->num_suspended_intf++) {
20235 +@@ -1024,7 +1024,7 @@ static int __usb_audio_resume(struct usb_interface *intf, bool reset_resume)
20236 + struct list_head *p;
20237 + int err = 0;
20238 +
20239 +- if (chip == (void *)-1L)
20240 ++ if (chip == USB_AUDIO_IFACE_UNUSED)
20241 + return 0;
20242 +
20243 + atomic_inc(&chip->active); /* avoid autopm */
20244 +diff --git a/sound/usb/midi.c b/sound/usb/midi.c
20245 +index 0c23fa6d8525d..cd46ca7cd28de 100644
20246 +--- a/sound/usb/midi.c
20247 ++++ b/sound/usb/midi.c
20248 +@@ -1332,7 +1332,7 @@ static int snd_usbmidi_in_endpoint_create(struct snd_usb_midi *umidi,
20249 +
20250 + error:
20251 + snd_usbmidi_in_endpoint_delete(ep);
20252 +- return -ENOMEM;
20253 ++ return err;
20254 + }
20255 +
20256 + /*
20257 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
20258 +index 5ab2a4580bfb2..bddef8ad57783 100644
20259 +--- a/sound/usb/quirks.c
20260 ++++ b/sound/usb/quirks.c
20261 +@@ -55,8 +55,12 @@ static int create_composite_quirk(struct snd_usb_audio *chip,
20262 + if (!iface)
20263 + continue;
20264 + if (quirk->ifnum != probed_ifnum &&
20265 +- !usb_interface_claimed(iface))
20266 +- usb_driver_claim_interface(driver, iface, (void *)-1L);
20267 ++ !usb_interface_claimed(iface)) {
20268 ++ err = usb_driver_claim_interface(driver, iface,
20269 ++ USB_AUDIO_IFACE_UNUSED);
20270 ++ if (err < 0)
20271 ++ return err;
20272 ++ }
20273 + }
20274 +
20275 + return 0;
20276 +@@ -390,8 +394,12 @@ static int create_autodetect_quirks(struct snd_usb_audio *chip,
20277 + continue;
20278 +
20279 + err = create_autodetect_quirk(chip, iface, driver);
20280 +- if (err >= 0)
20281 +- usb_driver_claim_interface(driver, iface, (void *)-1L);
20282 ++ if (err >= 0) {
20283 ++ err = usb_driver_claim_interface(driver, iface,
20284 ++ USB_AUDIO_IFACE_UNUSED);
20285 ++ if (err < 0)
20286 ++ return err;
20287 ++ }
20288 + }
20289 +
20290 + return 0;
20291 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
20292 +index 9667060ff92be..e54a98f465490 100644
20293 +--- a/sound/usb/usbaudio.h
20294 ++++ b/sound/usb/usbaudio.h
20295 +@@ -63,6 +63,8 @@ struct snd_usb_audio {
20296 + struct media_intf_devnode *ctl_intf_media_devnode;
20297 + };
20298 +
20299 ++#define USB_AUDIO_IFACE_UNUSED ((void *)-1L)
20300 ++
20301 + #define usb_audio_err(chip, fmt, args...) \
20302 + dev_err(&(chip)->dev->dev, fmt, ##args)
20303 + #define usb_audio_warn(chip, fmt, args...) \
20304 +diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c
20305 +index 2afb7d5b1aca2..592803af97340 100644
20306 +--- a/tools/bpf/bpftool/btf.c
20307 ++++ b/tools/bpf/bpftool/btf.c
20308 +@@ -519,6 +519,7 @@ static int do_dump(int argc, char **argv)
20309 + NEXT_ARG();
20310 + if (argc < 1) {
20311 + p_err("expecting value for 'format' option\n");
20312 ++ err = -EINVAL;
20313 + goto done;
20314 + }
20315 + if (strcmp(*argv, "c") == 0) {
20316 +@@ -528,11 +529,13 @@ static int do_dump(int argc, char **argv)
20317 + } else {
20318 + p_err("unrecognized format specifier: '%s', possible values: raw, c",
20319 + *argv);
20320 ++ err = -EINVAL;
20321 + goto done;
20322 + }
20323 + NEXT_ARG();
20324 + } else {
20325 + p_err("unrecognized option: '%s'", *argv);
20326 ++ err = -EINVAL;
20327 + goto done;
20328 + }
20329 + }
20330 +diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
20331 +index 682daaa49e6a2..33068d6ed5d6c 100644
20332 +--- a/tools/bpf/bpftool/main.c
20333 ++++ b/tools/bpf/bpftool/main.c
20334 +@@ -274,7 +274,7 @@ static int do_batch(int argc, char **argv)
20335 + int n_argc;
20336 + FILE *fp;
20337 + char *cp;
20338 +- int err;
20339 ++ int err = 0;
20340 + int i;
20341 +
20342 + if (argc < 2) {
20343 +@@ -368,7 +368,6 @@ static int do_batch(int argc, char **argv)
20344 + } else {
20345 + if (!json_output)
20346 + printf("processed %d commands\n", lines);
20347 +- err = 0;
20348 + }
20349 + err_close:
20350 + if (fp != stdin)
20351 +diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
20352 +index a7efbd84fbcc4..ce6faf1b90e83 100644
20353 +--- a/tools/bpf/bpftool/map.c
20354 ++++ b/tools/bpf/bpftool/map.c
20355 +@@ -99,7 +99,7 @@ static int do_dump_btf(const struct btf_dumper *d,
20356 + void *value)
20357 + {
20358 + __u32 value_id;
20359 +- int ret;
20360 ++ int ret = 0;
20361 +
20362 + /* start of key-value pair */
20363 + jsonw_start_object(d->jw);
20364 +diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
20365 +index bbcefb3ff5a57..4538ed762a209 100644
20366 +--- a/tools/lib/bpf/bpf_core_read.h
20367 ++++ b/tools/lib/bpf/bpf_core_read.h
20368 +@@ -88,11 +88,19 @@ enum bpf_enum_value_kind {
20369 + const void *p = (const void *)s + __CORE_RELO(s, field, BYTE_OFFSET); \
20370 + unsigned long long val; \
20371 + \
20372 ++ /* This is a so-called barrier_var() operation that makes specified \
20373 ++ * variable "a black box" for optimizing compiler. \
20374 ++ * It forces compiler to perform BYTE_OFFSET relocation on p and use \
20375 ++ * its calculated value in the switch below, instead of applying \
20376 ++ * the same relocation 4 times for each individual memory load. \
20377 ++ */ \
20378 ++ asm volatile("" : "=r"(p) : "0"(p)); \
20379 ++ \
20380 + switch (__CORE_RELO(s, field, BYTE_SIZE)) { \
20381 +- case 1: val = *(const unsigned char *)p; \
20382 +- case 2: val = *(const unsigned short *)p; \
20383 +- case 4: val = *(const unsigned int *)p; \
20384 +- case 8: val = *(const unsigned long long *)p; \
20385 ++ case 1: val = *(const unsigned char *)p; break; \
20386 ++ case 2: val = *(const unsigned short *)p; break; \
20387 ++ case 4: val = *(const unsigned int *)p; break; \
20388 ++ case 8: val = *(const unsigned long long *)p; break; \
20389 + } \
20390 + val <<= __CORE_RELO(s, field, LSHIFT_U64); \
20391 + if (__CORE_RELO(s, field, SIGNED)) \
20392 +diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
20393 +index f9ef37707888f..1c2e91ee041d8 100644
20394 +--- a/tools/lib/bpf/bpf_tracing.h
20395 ++++ b/tools/lib/bpf/bpf_tracing.h
20396 +@@ -413,20 +413,38 @@ typeof(name(0)) name(struct pt_regs *ctx) \
20397 + } \
20398 + static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args)
20399 +
20400 ++#define ___bpf_fill0(arr, p, x) do {} while (0)
20401 ++#define ___bpf_fill1(arr, p, x) arr[p] = x
20402 ++#define ___bpf_fill2(arr, p, x, args...) arr[p] = x; ___bpf_fill1(arr, p + 1, args)
20403 ++#define ___bpf_fill3(arr, p, x, args...) arr[p] = x; ___bpf_fill2(arr, p + 1, args)
20404 ++#define ___bpf_fill4(arr, p, x, args...) arr[p] = x; ___bpf_fill3(arr, p + 1, args)
20405 ++#define ___bpf_fill5(arr, p, x, args...) arr[p] = x; ___bpf_fill4(arr, p + 1, args)
20406 ++#define ___bpf_fill6(arr, p, x, args...) arr[p] = x; ___bpf_fill5(arr, p + 1, args)
20407 ++#define ___bpf_fill7(arr, p, x, args...) arr[p] = x; ___bpf_fill6(arr, p + 1, args)
20408 ++#define ___bpf_fill8(arr, p, x, args...) arr[p] = x; ___bpf_fill7(arr, p + 1, args)
20409 ++#define ___bpf_fill9(arr, p, x, args...) arr[p] = x; ___bpf_fill8(arr, p + 1, args)
20410 ++#define ___bpf_fill10(arr, p, x, args...) arr[p] = x; ___bpf_fill9(arr, p + 1, args)
20411 ++#define ___bpf_fill11(arr, p, x, args...) arr[p] = x; ___bpf_fill10(arr, p + 1, args)
20412 ++#define ___bpf_fill12(arr, p, x, args...) arr[p] = x; ___bpf_fill11(arr, p + 1, args)
20413 ++#define ___bpf_fill(arr, args...) \
20414 ++ ___bpf_apply(___bpf_fill, ___bpf_narg(args))(arr, 0, args)
20415 ++
20416 + /*
20417 + * BPF_SEQ_PRINTF to wrap bpf_seq_printf to-be-printed values
20418 + * in a structure.
20419 + */
20420 +-#define BPF_SEQ_PRINTF(seq, fmt, args...) \
20421 +- ({ \
20422 +- _Pragma("GCC diagnostic push") \
20423 +- _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
20424 +- static const char ___fmt[] = fmt; \
20425 +- unsigned long long ___param[] = { args }; \
20426 +- _Pragma("GCC diagnostic pop") \
20427 +- int ___ret = bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
20428 +- ___param, sizeof(___param)); \
20429 +- ___ret; \
20430 +- })
20431 ++#define BPF_SEQ_PRINTF(seq, fmt, args...) \
20432 ++({ \
20433 ++ static const char ___fmt[] = fmt; \
20434 ++ unsigned long long ___param[___bpf_narg(args)]; \
20435 ++ \
20436 ++ _Pragma("GCC diagnostic push") \
20437 ++ _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \
20438 ++ ___bpf_fill(___param, args); \
20439 ++ _Pragma("GCC diagnostic pop") \
20440 ++ \
20441 ++ bpf_seq_printf(seq, ___fmt, sizeof(___fmt), \
20442 ++ ___param, sizeof(___param)); \
20443 ++})
20444 +
20445 + #endif
20446 +diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
20447 +index 57247240a20ad..9cabc8b620e33 100644
20448 +--- a/tools/lib/bpf/btf.h
20449 ++++ b/tools/lib/bpf/btf.h
20450 +@@ -164,6 +164,7 @@ struct btf_dump_emit_type_decl_opts {
20451 + int indent_level;
20452 + /* strip all the const/volatile/restrict mods */
20453 + bool strip_mods;
20454 ++ size_t :0;
20455 + };
20456 + #define btf_dump_emit_type_decl_opts__last_field strip_mods
20457 +
20458 +diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
20459 +index 6909ee81113a1..57d10b779dea0 100644
20460 +--- a/tools/lib/bpf/libbpf.h
20461 ++++ b/tools/lib/bpf/libbpf.h
20462 +@@ -507,6 +507,7 @@ struct xdp_link_info {
20463 + struct bpf_xdp_set_link_opts {
20464 + size_t sz;
20465 + int old_fd;
20466 ++ size_t :0;
20467 + };
20468 + #define bpf_xdp_set_link_opts__last_field old_fd
20469 +
20470 +diff --git a/tools/lib/perf/include/perf/event.h b/tools/lib/perf/include/perf/event.h
20471 +index 988c539bedb6e..4a24b855d3ce2 100644
20472 +--- a/tools/lib/perf/include/perf/event.h
20473 ++++ b/tools/lib/perf/include/perf/event.h
20474 +@@ -8,6 +8,8 @@
20475 + #include <linux/bpf.h>
20476 + #include <sys/types.h> /* pid_t */
20477 +
20478 ++#define event_contains(obj, mem) ((obj).header.size > offsetof(typeof(obj), mem))
20479 ++
20480 + struct perf_record_mmap {
20481 + struct perf_event_header header;
20482 + __u32 pid, tid;
20483 +@@ -336,8 +338,9 @@ struct perf_record_time_conv {
20484 + __u64 time_zero;
20485 + __u64 time_cycles;
20486 + __u64 time_mask;
20487 +- bool cap_user_time_zero;
20488 +- bool cap_user_time_short;
20489 ++ __u8 cap_user_time_zero;
20490 ++ __u8 cap_user_time_short;
20491 ++ __u8 reserved[6]; /* For alignment */
20492 + };
20493 +
20494 + struct perf_record_header_feature {
20495 +diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
20496 +index 4ea7ec4f496e8..008f1683e5407 100644
20497 +--- a/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
20498 ++++ b/tools/perf/pmu-events/arch/x86/amdzen1/cache.json
20499 +@@ -275,7 +275,7 @@
20500 + {
20501 + "EventName": "l2_pf_hit_l2",
20502 + "EventCode": "0x70",
20503 +- "BriefDescription": "L2 prefetch hit in L2.",
20504 ++ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
20505 + "UMask": "0xff"
20506 + },
20507 + {
20508 +diff --git a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
20509 +index 2cfe2d2f3bfdd..3c954543d1ae6 100644
20510 +--- a/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
20511 ++++ b/tools/perf/pmu-events/arch/x86/amdzen1/recommended.json
20512 +@@ -79,10 +79,10 @@
20513 + "UMask": "0x70"
20514 + },
20515 + {
20516 +- "MetricName": "l2_cache_hits_from_l2_hwpf",
20517 ++ "EventName": "l2_cache_hits_from_l2_hwpf",
20518 ++ "EventCode": "0x70",
20519 + "BriefDescription": "L2 Cache Hits from L2 HWPF",
20520 +- "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
20521 +- "MetricGroup": "l2_cache"
20522 ++ "UMask": "0xff"
20523 + },
20524 + {
20525 + "EventName": "l3_accesses",
20526 +diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
20527 +index f61b982f83ca3..8ba84a48188dd 100644
20528 +--- a/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
20529 ++++ b/tools/perf/pmu-events/arch/x86/amdzen2/cache.json
20530 +@@ -205,7 +205,7 @@
20531 + {
20532 + "EventName": "l2_pf_hit_l2",
20533 + "EventCode": "0x70",
20534 +- "BriefDescription": "L2 prefetch hit in L2.",
20535 ++ "BriefDescription": "L2 prefetch hit in L2. Use l2_cache_hits_from_l2_hwpf instead.",
20536 + "UMask": "0xff"
20537 + },
20538 + {
20539 +diff --git a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
20540 +index 2ef91e25e6613..1c624cee9ef48 100644
20541 +--- a/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
20542 ++++ b/tools/perf/pmu-events/arch/x86/amdzen2/recommended.json
20543 +@@ -79,10 +79,10 @@
20544 + "UMask": "0x70"
20545 + },
20546 + {
20547 +- "MetricName": "l2_cache_hits_from_l2_hwpf",
20548 ++ "EventName": "l2_cache_hits_from_l2_hwpf",
20549 ++ "EventCode": "0x70",
20550 + "BriefDescription": "L2 Cache Hits from L2 HWPF",
20551 +- "MetricExpr": "l2_pf_hit_l2 + l2_pf_miss_l2_hit_l3 + l2_pf_miss_l2_l3",
20552 +- "MetricGroup": "l2_cache"
20553 ++ "UMask": "0xff"
20554 + },
20555 + {
20556 + "EventName": "l3_accesses",
20557 +diff --git a/tools/perf/trace/beauty/fsconfig.sh b/tools/perf/trace/beauty/fsconfig.sh
20558 +index 83fb24df05c9f..bc6ef7bb7a5f9 100755
20559 +--- a/tools/perf/trace/beauty/fsconfig.sh
20560 ++++ b/tools/perf/trace/beauty/fsconfig.sh
20561 +@@ -10,8 +10,7 @@ fi
20562 + linux_mount=${linux_header_dir}/mount.h
20563 +
20564 + printf "static const char *fsconfig_cmds[] = {\n"
20565 +-regex='^[[:space:]]*+FSCONFIG_([[:alnum:]_]+)[[:space:]]*=[[:space:]]*([[:digit:]]+)[[:space:]]*,[[:space:]]*.*'
20566 +-egrep $regex ${linux_mount} | \
20567 +- sed -r "s/$regex/\2 \1/g" | \
20568 +- xargs printf "\t[%s] = \"%s\",\n"
20569 ++ms='[[:space:]]*'
20570 ++sed -nr "s/^${ms}FSCONFIG_([[:alnum:]_]+)${ms}=${ms}([[:digit:]]+)${ms},.*/\t[\2] = \"\1\",/p" \
20571 ++ ${linux_mount}
20572 + printf "};\n"
20573 +diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
20574 +index 055bab7a92b35..64d8f9ba8c034 100644
20575 +--- a/tools/perf/util/jitdump.c
20576 ++++ b/tools/perf/util/jitdump.c
20577 +@@ -369,21 +369,31 @@ jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
20578 +
20579 + static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
20580 + {
20581 +- struct perf_tsc_conversion tc;
20582 ++ struct perf_tsc_conversion tc = { .time_shift = 0, };
20583 ++ struct perf_record_time_conv *time_conv = &jd->session->time_conv;
20584 +
20585 + if (!jd->use_arch_timestamp)
20586 + return timestamp;
20587 +
20588 +- tc.time_shift = jd->session->time_conv.time_shift;
20589 +- tc.time_mult = jd->session->time_conv.time_mult;
20590 +- tc.time_zero = jd->session->time_conv.time_zero;
20591 +- tc.time_cycles = jd->session->time_conv.time_cycles;
20592 +- tc.time_mask = jd->session->time_conv.time_mask;
20593 +- tc.cap_user_time_zero = jd->session->time_conv.cap_user_time_zero;
20594 +- tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
20595 ++ tc.time_shift = time_conv->time_shift;
20596 ++ tc.time_mult = time_conv->time_mult;
20597 ++ tc.time_zero = time_conv->time_zero;
20598 +
20599 +- if (!tc.cap_user_time_zero)
20600 +- return 0;
20601 ++ /*
20602 ++ * The event TIME_CONV was extended for the fields from "time_cycles"
20603 ++ * when supported cap_user_time_short, for backward compatibility,
20604 ++ * checks the event size and assigns these extended fields if these
20605 ++ * fields are contained in the event.
20606 ++ */
20607 ++ if (event_contains(*time_conv, time_cycles)) {
20608 ++ tc.time_cycles = time_conv->time_cycles;
20609 ++ tc.time_mask = time_conv->time_mask;
20610 ++ tc.cap_user_time_zero = time_conv->cap_user_time_zero;
20611 ++ tc.cap_user_time_short = time_conv->cap_user_time_short;
20612 ++
20613 ++ if (!tc.cap_user_time_zero)
20614 ++ return 0;
20615 ++ }
20616 +
20617 + return tsc_to_perf_time(timestamp, &tc);
20618 + }
20619 +diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
20620 +index 22098fffac4f1..63b619084b34a 100644
20621 +--- a/tools/perf/util/session.c
20622 ++++ b/tools/perf/util/session.c
20623 +@@ -945,6 +945,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
20624 + event->stat_round.time = bswap_64(event->stat_round.time);
20625 + }
20626 +
20627 ++static void perf_event__time_conv_swap(union perf_event *event,
20628 ++ bool sample_id_all __maybe_unused)
20629 ++{
20630 ++ event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
20631 ++ event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
20632 ++ event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
20633 ++
20634 ++ if (event_contains(event->time_conv, time_cycles)) {
20635 ++ event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
20636 ++ event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
20637 ++ }
20638 ++}
20639 ++
20640 + typedef void (*perf_event__swap_op)(union perf_event *event,
20641 + bool sample_id_all);
20642 +
20643 +@@ -981,7 +994,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
20644 + [PERF_RECORD_STAT] = perf_event__stat_swap,
20645 + [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
20646 + [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
20647 +- [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
20648 ++ [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
20649 + [PERF_RECORD_HEADER_MAX] = NULL,
20650 + };
20651 +
20652 +diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
20653 +index 35c936ce33efa..2664fb65e47ad 100644
20654 +--- a/tools/perf/util/symbol_fprintf.c
20655 ++++ b/tools/perf/util/symbol_fprintf.c
20656 +@@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
20657 +
20658 + for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
20659 + pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
20660 +- fprintf(fp, "%s\n", pos->sym.name);
20661 ++ ret += fprintf(fp, "%s\n", pos->sym.name);
20662 + }
20663 +
20664 + return ret;
20665 +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
20666 +index ca69bdb0159fd..424ed19a9d542 100644
20667 +--- a/tools/power/x86/turbostat/turbostat.c
20668 ++++ b/tools/power/x86/turbostat/turbostat.c
20669 +@@ -4795,33 +4795,12 @@ double discover_bclk(unsigned int family, unsigned int model)
20670 + * below this value, including the Digital Thermal Sensor (DTS),
20671 + * Package Thermal Management Sensor (PTM), and thermal event thresholds.
20672 + */
20673 +-int read_tcc_activation_temp()
20674 ++int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
20675 + {
20676 + unsigned long long msr;
20677 +- unsigned int tcc, target_c, offset_c;
20678 +-
20679 +- /* Temperature Target MSR is Nehalem and newer only */
20680 +- if (!do_nhm_platform_info)
20681 +- return 0;
20682 +-
20683 +- if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
20684 +- return 0;
20685 +-
20686 +- target_c = (msr >> 16) & 0xFF;
20687 +-
20688 +- offset_c = (msr >> 24) & 0xF;
20689 +-
20690 +- tcc = target_c - offset_c;
20691 +-
20692 +- if (!quiet)
20693 +- fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C) (%d default - %d offset)\n",
20694 +- base_cpu, msr, tcc, target_c, offset_c);
20695 +-
20696 +- return tcc;
20697 +-}
20698 ++ unsigned int target_c_local;
20699 ++ int cpu;
20700 +
20701 +-int set_temperature_target(struct thread_data *t, struct core_data *c, struct pkg_data *p)
20702 +-{
20703 + /* tcc_activation_temp is used only for dts or ptm */
20704 + if (!(do_dts || do_ptm))
20705 + return 0;
20706 +@@ -4830,18 +4809,43 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
20707 + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE) || !(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
20708 + return 0;
20709 +
20710 ++ cpu = t->cpu_id;
20711 ++ if (cpu_migrate(cpu)) {
20712 ++ fprintf(outf, "Could not migrate to CPU %d\n", cpu);
20713 ++ return -1;
20714 ++ }
20715 ++
20716 + if (tcc_activation_temp_override != 0) {
20717 + tcc_activation_temp = tcc_activation_temp_override;
20718 +- fprintf(outf, "Using cmdline TCC Target (%d C)\n", tcc_activation_temp);
20719 ++ fprintf(outf, "cpu%d: Using cmdline TCC Target (%d C)\n",
20720 ++ cpu, tcc_activation_temp);
20721 + return 0;
20722 + }
20723 +
20724 +- tcc_activation_temp = read_tcc_activation_temp();
20725 +- if (tcc_activation_temp)
20726 +- return 0;
20727 ++ /* Temperature Target MSR is Nehalem and newer only */
20728 ++ if (!do_nhm_platform_info)
20729 ++ goto guess;
20730 ++
20731 ++ if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
20732 ++ goto guess;
20733 ++
20734 ++ target_c_local = (msr >> 16) & 0xFF;
20735 ++
20736 ++ if (!quiet)
20737 ++ fprintf(outf, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
20738 ++ cpu, msr, target_c_local);
20739 ++
20740 ++ if (!target_c_local)
20741 ++ goto guess;
20742 ++
20743 ++ tcc_activation_temp = target_c_local;
20744 ++
20745 ++ return 0;
20746 +
20747 ++guess:
20748 + tcc_activation_temp = TJMAX_DEFAULT;
20749 +- fprintf(outf, "Guessing tjMax %d C, Please use -T to specify\n", tcc_activation_temp);
20750 ++ fprintf(outf, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
20751 ++ cpu, tcc_activation_temp);
20752 +
20753 + return 0;
20754 + }
20755 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
20756 +index 9359377aeb35c..b5322d60068c4 100644
20757 +--- a/tools/testing/selftests/bpf/Makefile
20758 ++++ b/tools/testing/selftests/bpf/Makefile
20759 +@@ -196,7 +196,7 @@ $(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_D
20760 + $(call msg,MKDIR,,$@)
20761 + $(Q)mkdir -p $@
20762 +
20763 +-$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
20764 ++$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL) | $(INCLUDE_DIR)
20765 + ifeq ($(VMLINUX_H),)
20766 + $(call msg,GEN,,$@)
20767 + $(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
20768 +@@ -333,7 +333,8 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
20769 +
20770 + $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
20771 + $(TRUNNER_OUTPUT)/%.o \
20772 +- | $(BPFTOOL) $(TRUNNER_OUTPUT)
20773 ++ $(BPFTOOL) \
20774 ++ | $(TRUNNER_OUTPUT)
20775 + $$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
20776 + $(Q)$$(BPFTOOL) gen skeleton $$< > $$@
20777 + endif
20778 +diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
20779 +index 30e40ff4b0d8e..8b641c306f263 100644
20780 +--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
20781 ++++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
20782 +@@ -185,11 +185,6 @@ static int duration = 0;
20783 + .bpf_obj_file = "test_core_reloc_existence.o", \
20784 + .btf_src_file = "btf__core_reloc_" #name ".o" \
20785 +
20786 +-#define FIELD_EXISTS_ERR_CASE(name) { \
20787 +- FIELD_EXISTS_CASE_COMMON(name), \
20788 +- .fails = true, \
20789 +-}
20790 +-
20791 + #define BITFIELDS_CASE_COMMON(objfile, test_name_prefix, name) \
20792 + .case_name = test_name_prefix#name, \
20793 + .bpf_obj_file = objfile, \
20794 +@@ -197,7 +192,7 @@ static int duration = 0;
20795 +
20796 + #define BITFIELDS_CASE(name, ...) { \
20797 + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_probed.o", \
20798 +- "direct:", name), \
20799 ++ "probed:", name), \
20800 + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
20801 + .input_len = sizeof(struct core_reloc_##name), \
20802 + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
20803 +@@ -205,7 +200,7 @@ static int duration = 0;
20804 + .output_len = sizeof(struct core_reloc_bitfields_output), \
20805 + }, { \
20806 + BITFIELDS_CASE_COMMON("test_core_reloc_bitfields_direct.o", \
20807 +- "probed:", name), \
20808 ++ "direct:", name), \
20809 + .input = STRUCT_TO_CHAR_PTR(core_reloc_##name) __VA_ARGS__, \
20810 + .input_len = sizeof(struct core_reloc_##name), \
20811 + .output = STRUCT_TO_CHAR_PTR(core_reloc_bitfields_output) \
20812 +@@ -500,8 +495,7 @@ static struct core_reloc_test_case test_cases[] = {
20813 + ARRAYS_ERR_CASE(arrays___err_too_small),
20814 + ARRAYS_ERR_CASE(arrays___err_too_shallow),
20815 + ARRAYS_ERR_CASE(arrays___err_non_array),
20816 +- ARRAYS_ERR_CASE(arrays___err_wrong_val_type1),
20817 +- ARRAYS_ERR_CASE(arrays___err_wrong_val_type2),
20818 ++ ARRAYS_ERR_CASE(arrays___err_wrong_val_type),
20819 + ARRAYS_ERR_CASE(arrays___err_bad_zero_sz_arr),
20820 +
20821 + /* enum/ptr/int handling scenarios */
20822 +@@ -592,13 +586,25 @@ static struct core_reloc_test_case test_cases[] = {
20823 + },
20824 + .output_len = sizeof(struct core_reloc_existence_output),
20825 + },
20826 +-
20827 +- FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
20828 +- FIELD_EXISTS_ERR_CASE(existence__err_int_type),
20829 +- FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
20830 +- FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
20831 +- FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
20832 +- FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
20833 ++ {
20834 ++ FIELD_EXISTS_CASE_COMMON(existence___wrong_field_defs),
20835 ++ .input = STRUCT_TO_CHAR_PTR(core_reloc_existence___wrong_field_defs) {
20836 ++ },
20837 ++ .input_len = sizeof(struct core_reloc_existence___wrong_field_defs),
20838 ++ .output = STRUCT_TO_CHAR_PTR(core_reloc_existence_output) {
20839 ++ .a_exists = 0,
20840 ++ .b_exists = 0,
20841 ++ .c_exists = 0,
20842 ++ .arr_exists = 0,
20843 ++ .s_exists = 0,
20844 ++ .a_value = 0xff000001u,
20845 ++ .b_value = 0xff000002u,
20846 ++ .c_value = 0xff000003u,
20847 ++ .arr_value = 0xff000004u,
20848 ++ .s_value = 0xff000005u,
20849 ++ },
20850 ++ .output_len = sizeof(struct core_reloc_existence_output),
20851 ++ },
20852 +
20853 + /* bitfield relocation checks */
20854 + BITFIELDS_CASE(bitfields, {
20855 +@@ -804,13 +810,20 @@ void test_core_reloc(void)
20856 + "prog '%s' not found\n", probe_name))
20857 + goto cleanup;
20858 +
20859 ++
20860 ++ if (test_case->btf_src_file) {
20861 ++ err = access(test_case->btf_src_file, R_OK);
20862 ++ if (!ASSERT_OK(err, "btf_src_file"))
20863 ++ goto cleanup;
20864 ++ }
20865 ++
20866 + load_attr.obj = obj;
20867 + load_attr.log_level = 0;
20868 + load_attr.target_btf_path = test_case->btf_src_file;
20869 + err = bpf_object__load_xattr(&load_attr);
20870 + if (err) {
20871 + if (!test_case->fails)
20872 +- CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
20873 ++ ASSERT_OK(err, "obj_load");
20874 + goto cleanup;
20875 + }
20876 +
20877 +@@ -844,10 +857,8 @@ void test_core_reloc(void)
20878 + goto cleanup;
20879 + }
20880 +
20881 +- if (test_case->fails) {
20882 +- CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
20883 ++ if (!ASSERT_FALSE(test_case->fails, "obj_load_should_fail"))
20884 + goto cleanup;
20885 +- }
20886 +
20887 + equal = memcmp(data->out, test_case->output,
20888 + test_case->output_len) == 0;
20889 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
20890 +deleted file mode 100644
20891 +index dd0ffa518f366..0000000000000
20892 +--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_kind.c
20893 ++++ /dev/null
20894 +@@ -1,3 +0,0 @@
20895 +-#include "core_reloc_types.h"
20896 +-
20897 +-void f(struct core_reloc_existence___err_wrong_arr_kind x) {}
20898 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
20899 +deleted file mode 100644
20900 +index bc83372088ad0..0000000000000
20901 +--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_arr_value_type.c
20902 ++++ /dev/null
20903 +@@ -1,3 +0,0 @@
20904 +-#include "core_reloc_types.h"
20905 +-
20906 +-void f(struct core_reloc_existence___err_wrong_arr_value_type x) {}
20907 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
20908 +deleted file mode 100644
20909 +index 917bec41be081..0000000000000
20910 +--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_kind.c
20911 ++++ /dev/null
20912 +@@ -1,3 +0,0 @@
20913 +-#include "core_reloc_types.h"
20914 +-
20915 +-void f(struct core_reloc_existence___err_wrong_int_kind x) {}
20916 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
20917 +deleted file mode 100644
20918 +index 6ec7e6ec1c915..0000000000000
20919 +--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_sz.c
20920 ++++ /dev/null
20921 +@@ -1,3 +0,0 @@
20922 +-#include "core_reloc_types.h"
20923 +-
20924 +-void f(struct core_reloc_existence___err_wrong_int_sz x) {}
20925 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
20926 +deleted file mode 100644
20927 +index 7bbcacf2b0d17..0000000000000
20928 +--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_int_type.c
20929 ++++ /dev/null
20930 +@@ -1,3 +0,0 @@
20931 +-#include "core_reloc_types.h"
20932 +-
20933 +-void f(struct core_reloc_existence___err_wrong_int_type x) {}
20934 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
20935 +deleted file mode 100644
20936 +index f384dd38ec709..0000000000000
20937 +--- a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___err_wrong_struct_type.c
20938 ++++ /dev/null
20939 +@@ -1,3 +0,0 @@
20940 +-#include "core_reloc_types.h"
20941 +-
20942 +-void f(struct core_reloc_existence___err_wrong_struct_type x) {}
20943 +diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
20944 +new file mode 100644
20945 +index 0000000000000..d14b496190c3d
20946 +--- /dev/null
20947 ++++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_existence___wrong_field_defs.c
20948 +@@ -0,0 +1,3 @@
20949 ++#include "core_reloc_types.h"
20950 ++
20951 ++void f(struct core_reloc_existence___wrong_field_defs x) {}
20952 +diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
20953 +index e6e616cb7bc91..af58ef9a28caf 100644
20954 +--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
20955 ++++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
20956 +@@ -683,27 +683,11 @@ struct core_reloc_existence___minimal {
20957 + int a;
20958 + };
20959 +
20960 +-struct core_reloc_existence___err_wrong_int_sz {
20961 +- short a;
20962 +-};
20963 +-
20964 +-struct core_reloc_existence___err_wrong_int_type {
20965 ++struct core_reloc_existence___wrong_field_defs {
20966 ++ void *a;
20967 + int b[1];
20968 +-};
20969 +-
20970 +-struct core_reloc_existence___err_wrong_int_kind {
20971 + struct{ int x; } c;
20972 +-};
20973 +-
20974 +-struct core_reloc_existence___err_wrong_arr_kind {
20975 + int arr;
20976 +-};
20977 +-
20978 +-struct core_reloc_existence___err_wrong_arr_value_type {
20979 +- short arr[1];
20980 +-};
20981 +-
20982 +-struct core_reloc_existence___err_wrong_struct_type {
20983 + int s;
20984 + };
20985 +
20986 +diff --git a/tools/testing/selftests/bpf/verifier/array_access.c b/tools/testing/selftests/bpf/verifier/array_access.c
20987 +index 1b138cd2b187d..1b1c798e92489 100644
20988 +--- a/tools/testing/selftests/bpf/verifier/array_access.c
20989 ++++ b/tools/testing/selftests/bpf/verifier/array_access.c
20990 +@@ -186,7 +186,7 @@
20991 + },
20992 + .fixup_map_hash_48b = { 3 },
20993 + .errstr_unpriv = "R0 leaks addr",
20994 +- .errstr = "invalid access to map value, value_size=48 off=44 size=8",
20995 ++ .errstr = "R0 unbounded memory access",
20996 + .result_unpriv = REJECT,
20997 + .result = REJECT,
20998 + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
20999 +diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
21000 +index cc0f07e72cf22..aa74be9f47c85 100644
21001 +--- a/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
21002 ++++ b/tools/testing/selftests/drivers/net/mlxsw/tc_flower_scale.sh
21003 +@@ -98,11 +98,7 @@ __tc_flower_test()
21004 + jq -r '[ .[] | select(.kind == "flower") |
21005 + .options | .in_hw ]' | jq .[] | wc -l)
21006 + [[ $((offload_count - 1)) -eq $count ]]
21007 +- if [[ $should_fail -eq 0 ]]; then
21008 +- check_err $? "Offload mismatch"
21009 +- else
21010 +- check_err_fail $should_fail $? "Offload more than expacted"
21011 +- fi
21012 ++ check_err_fail $should_fail $? "Attempt to offload $count rules (actual result $((offload_count - 1)))"
21013 + }
21014 +
21015 + tc_flower_test()
21016 +diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
21017 +index a5ce26d548e4f..be17462fe1467 100644
21018 +--- a/tools/testing/selftests/lib.mk
21019 ++++ b/tools/testing/selftests/lib.mk
21020 +@@ -74,7 +74,8 @@ ifdef building_out_of_srctree
21021 + rsync -aq $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(OUTPUT); \
21022 + fi
21023 + @if [ "X$(TEST_PROGS)" != "X" ]; then \
21024 +- $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(OUTPUT)/$(TEST_PROGS)) ; \
21025 ++ $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) \
21026 ++ $(addprefix $(OUTPUT)/,$(TEST_PROGS))) ; \
21027 + else \
21028 + $(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS)); \
21029 + fi
21030 +diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
21031 +index c02291e9841e3..880e3ab9d088d 100755
21032 +--- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
21033 ++++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh
21034 +@@ -271,7 +271,7 @@ test_span_gre_fdb_roaming()
21035 +
21036 + while ((RET == 0)); do
21037 + bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null
21038 +- bridge fdb add dev $swp2 $h3mac vlan 555 master
21039 ++ bridge fdb add dev $swp2 $h3mac vlan 555 master static
21040 + sleep 1
21041 + fail_test_span_gre_dir $tundev ingress
21042 +
21043 +diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
21044 +index e2c197fd4f9d4..6edfcf1f3bd66 100644
21045 +--- a/virt/kvm/coalesced_mmio.c
21046 ++++ b/virt/kvm/coalesced_mmio.c
21047 +@@ -174,21 +174,36 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
21048 + struct kvm_coalesced_mmio_zone *zone)
21049 + {
21050 + struct kvm_coalesced_mmio_dev *dev, *tmp;
21051 ++ int r;
21052 +
21053 + if (zone->pio != 1 && zone->pio != 0)
21054 + return -EINVAL;
21055 +
21056 + mutex_lock(&kvm->slots_lock);
21057 +
21058 +- list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
21059 ++ list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
21060 + if (zone->pio == dev->zone.pio &&
21061 + coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
21062 +- kvm_io_bus_unregister_dev(kvm,
21063 ++ r = kvm_io_bus_unregister_dev(kvm,
21064 + zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
21065 + kvm_iodevice_destructor(&dev->dev);
21066 ++
21067 ++ /*
21068 ++ * On failure, unregister destroys all devices on the
21069 ++ * bus _except_ the target device, i.e. coalesced_zones
21070 ++ * has been modified. No need to restart the walk as
21071 ++ * there aren't any zones left.
21072 ++ */
21073 ++ if (r)
21074 ++ break;
21075 + }
21076 ++ }
21077 +
21078 + mutex_unlock(&kvm->slots_lock);
21079 +
21080 ++ /*
21081 ++ * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
21082 ++ * perspective, the coalesced MMIO is most definitely unregistered.
21083 ++ */
21084 + return 0;
21085 + }
21086 +diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
21087 +index ed4d2e3a00718..78bf3f5492143 100644
21088 +--- a/virt/kvm/kvm_main.c
21089 ++++ b/virt/kvm/kvm_main.c
21090 +@@ -4342,15 +4342,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
21091 + }
21092 +
21093 + /* Caller must hold slots_lock. */
21094 +-void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
21095 +- struct kvm_io_device *dev)
21096 ++int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
21097 ++ struct kvm_io_device *dev)
21098 + {
21099 + int i, j;
21100 + struct kvm_io_bus *new_bus, *bus;
21101 +
21102 + bus = kvm_get_bus(kvm, bus_idx);
21103 + if (!bus)
21104 +- return;
21105 ++ return 0;
21106 +
21107 + for (i = 0; i < bus->dev_count; i++)
21108 + if (bus->range[i].dev == dev) {
21109 +@@ -4358,7 +4358,7 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
21110 + }
21111 +
21112 + if (i == bus->dev_count)
21113 +- return;
21114 ++ return 0;
21115 +
21116 + new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
21117 + GFP_KERNEL_ACCOUNT);
21118 +@@ -4367,7 +4367,13 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
21119 + new_bus->dev_count--;
21120 + memcpy(new_bus->range + i, bus->range + i + 1,
21121 + flex_array_size(new_bus, range, new_bus->dev_count - i));
21122 +- } else {
21123 ++ }
21124 ++
21125 ++ rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
21126 ++ synchronize_srcu_expedited(&kvm->srcu);
21127 ++
21128 ++ /* Destroy the old bus _after_ installing the (null) bus. */
21129 ++ if (!new_bus) {
21130 + pr_err("kvm: failed to shrink bus, removing it completely\n");
21131 + for (j = 0; j < bus->dev_count; j++) {
21132 + if (j == i)
21133 +@@ -4376,10 +4382,8 @@ void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
21134 + }
21135 + }
21136 +
21137 +- rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
21138 +- synchronize_srcu_expedited(&kvm->srcu);
21139 + kfree(bus);
21140 +- return;
21141 ++ return new_bus ? 0 : -ENOMEM;
21142 + }
21143 +
21144 + struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,