Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.14 commit in: /
Date: Wed, 15 Sep 2021 11:58:36
Message-Id: 1631707100.756955cf3ec599943c85ce5eed917d9441d0d6a9.mpagano@gentoo
1 commit: 756955cf3ec599943c85ce5eed917d9441d0d6a9
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 15 11:58:20 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 15 11:58:20 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=756955cf
7
8 Linuxpatch 5.14.4
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +
13 1003_linux-5.14.4.patch | 13171 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 13179 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index f4fbe66..79faaf3 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -55,6 +55,14 @@ Patch: 1002_linux-5.14.3.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.14.3
23
24 +Patch: 1002_linux-5.14.3.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.14.3
27 +
28 +Patch: 1003_linux-5.14.4.patch
29 +From: http://www.kernel.org
30 +Desc: Linux 5.14.4
31 +
32 Patch: 1500_XATTR_USER_PREFIX.patch
33 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
34 Desc: Support for namespace user.pax.* on tmpfs.
35
36 diff --git a/1003_linux-5.14.4.patch b/1003_linux-5.14.4.patch
37 new file mode 100644
38 index 0000000..2f4c377
39 --- /dev/null
40 +++ b/1003_linux-5.14.4.patch
41 @@ -0,0 +1,13171 @@
42 +diff --git a/Documentation/fault-injection/provoke-crashes.rst b/Documentation/fault-injection/provoke-crashes.rst
43 +index a20ba5d939320..18de17354206a 100644
44 +--- a/Documentation/fault-injection/provoke-crashes.rst
45 ++++ b/Documentation/fault-injection/provoke-crashes.rst
46 +@@ -29,7 +29,7 @@ recur_count
47 + cpoint_name
48 + Where in the kernel to trigger the action. It can be
49 + one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
50 +- FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
51 ++ FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ,
52 + IDE_CORE_CP, or DIRECT
53 +
54 + cpoint_type
55 +diff --git a/Makefile b/Makefile
56 +index 8715942fccb4a..e16a1a80074cd 100644
57 +--- a/Makefile
58 ++++ b/Makefile
59 +@@ -1,7 +1,7 @@
60 + # SPDX-License-Identifier: GPL-2.0
61 + VERSION = 5
62 + PATCHLEVEL = 14
63 +-SUBLEVEL = 3
64 ++SUBLEVEL = 4
65 + EXTRAVERSION =
66 + NAME = Opossums on Parade
67 +
68 +diff --git a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
69 +index aa24cac8e5be5..44b03a5e24166 100644
70 +--- a/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
71 ++++ b/arch/arm/boot/dts/aspeed-bmc-ibm-everest.dts
72 +@@ -2832,7 +2832,7 @@
73 +
74 + &emmc {
75 + status = "okay";
76 +- clk-phase-mmc-hs200 = <180>, <180>;
77 ++ clk-phase-mmc-hs200 = <210>, <228>;
78 + };
79 +
80 + &fsim0 {
81 +diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
82 +index 7e90d713f5e58..6dde51c2aed3f 100644
83 +--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
84 ++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
85 +@@ -208,12 +208,12 @@
86 + };
87 +
88 + pinctrl_hvi3c3_default: hvi3c3_default {
89 +- function = "HVI3C3";
90 ++ function = "I3C3";
91 + groups = "HVI3C3";
92 + };
93 +
94 + pinctrl_hvi3c4_default: hvi3c4_default {
95 +- function = "HVI3C4";
96 ++ function = "I3C4";
97 + groups = "HVI3C4";
98 + };
99 +
100 +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
101 +index edca66c232c15..ebbc9b23aef1c 100644
102 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
103 ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
104 +@@ -92,6 +92,8 @@
105 +
106 + leds {
107 + compatible = "gpio-leds";
108 ++ pinctrl-names = "default";
109 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
110 + status = "okay"; /* Conflict with pwm0. */
111 +
112 + red {
113 +@@ -537,6 +539,10 @@
114 + AT91_PIOA 19 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA19 DAT2 periph A with pullup */
115 + AT91_PIOA 20 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)>; /* PA20 DAT3 periph A with pullup */
116 + };
117 ++ pinctrl_sdmmc0_cd: sdmmc0_cd {
118 ++ atmel,pins =
119 ++ <AT91_PIOA 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
120 ++ };
121 + };
122 +
123 + sdmmc1 {
124 +@@ -569,6 +575,14 @@
125 + AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
126 + };
127 + };
128 ++
129 ++ leds {
130 ++ pinctrl_gpio_leds: gpio_leds {
131 ++ atmel,pins = <AT91_PIOB 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
132 ++ AT91_PIOB 12 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
133 ++ AT91_PIOB 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
134 ++ };
135 ++ };
136 + }; /* pinctrl */
137 +
138 + &pwm0 {
139 +@@ -580,7 +594,7 @@
140 + &sdmmc0 {
141 + bus-width = <4>;
142 + pinctrl-names = "default";
143 +- pinctrl-0 = <&pinctrl_sdmmc0_default>;
144 ++ pinctrl-0 = <&pinctrl_sdmmc0_default &pinctrl_sdmmc0_cd>;
145 + status = "okay";
146 + cd-gpios = <&pioA 23 GPIO_ACTIVE_LOW>;
147 + disable-wp;
148 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
149 +index 9c55a921263bd..cc55d1684322b 100644
150 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
151 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
152 +@@ -57,6 +57,8 @@
153 + };
154 +
155 + spi0: spi@f0004000 {
156 ++ pinctrl-names = "default";
157 ++ pinctrl-0 = <&pinctrl_spi0_cs>;
158 + cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
159 + status = "okay";
160 + };
161 +@@ -169,6 +171,8 @@
162 + };
163 +
164 + spi1: spi@f8008000 {
165 ++ pinctrl-names = "default";
166 ++ pinctrl-0 = <&pinctrl_spi1_cs>;
167 + cs-gpios = <&pioC 25 0>;
168 + status = "okay";
169 + };
170 +@@ -248,6 +252,26 @@
171 + <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
172 + AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
173 + };
174 ++
175 ++ pinctrl_gpio_leds: gpio_leds_default {
176 ++ atmel,pins =
177 ++ <AT91_PIOE 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
178 ++ AT91_PIOE 24 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
179 ++ };
180 ++
181 ++ pinctrl_spi0_cs: spi0_cs_default {
182 ++ atmel,pins =
183 ++ <AT91_PIOD 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
184 ++ AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
185 ++ };
186 ++
187 ++ pinctrl_spi1_cs: spi1_cs_default {
188 ++ atmel,pins = <AT91_PIOC 25 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
189 ++ };
190 ++
191 ++ pinctrl_vcc_mmc0_reg_gpio: vcc_mmc0_reg_gpio_default {
192 ++ atmel,pins = <AT91_PIOE 2 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
193 ++ };
194 + };
195 + };
196 + };
197 +@@ -339,6 +363,8 @@
198 +
199 + vcc_mmc0_reg: fixedregulator_mmc0 {
200 + compatible = "regulator-fixed";
201 ++ pinctrl-names = "default";
202 ++ pinctrl-0 = <&pinctrl_vcc_mmc0_reg_gpio>;
203 + gpio = <&pioE 2 GPIO_ACTIVE_LOW>;
204 + regulator-name = "mmc0-card-supply";
205 + regulator-min-microvolt = <3300000>;
206 +@@ -362,6 +388,9 @@
207 +
208 + leds {
209 + compatible = "gpio-leds";
210 ++ pinctrl-names = "default";
211 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
212 ++ status = "okay";
213 +
214 + d2 {
215 + label = "d2";
216 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
217 +index 0b3ad1b580b83..e42dae06b5826 100644
218 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
219 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
220 +@@ -90,6 +90,8 @@
221 + };
222 +
223 + spi1: spi@fc018000 {
224 ++ pinctrl-names = "default";
225 ++ pinctrl-0 = <&pinctrl_spi0_cs>;
226 + cs-gpios = <&pioB 21 0>;
227 + status = "okay";
228 + };
229 +@@ -147,6 +149,19 @@
230 + atmel,pins =
231 + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
232 + };
233 ++ pinctrl_spi0_cs: spi0_cs_default {
234 ++ atmel,pins =
235 ++ <AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
236 ++ };
237 ++ pinctrl_gpio_leds: gpio_leds_default {
238 ++ atmel,pins =
239 ++ <AT91_PIOD 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
240 ++ AT91_PIOE 15 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
241 ++ };
242 ++ pinctrl_vcc_mmc1_reg: vcc_mmc1_reg {
243 ++ atmel,pins =
244 ++ <AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
245 ++ };
246 + };
247 + };
248 + };
249 +@@ -252,6 +267,8 @@
250 +
251 + leds {
252 + compatible = "gpio-leds";
253 ++ pinctrl-names = "default";
254 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
255 + status = "okay";
256 +
257 + d8 {
258 +@@ -278,6 +295,8 @@
259 +
260 + vcc_mmc1_reg: fixedregulator_mmc1 {
261 + compatible = "regulator-fixed";
262 ++ pinctrl-names = "default";
263 ++ pinctrl-0 = <&pinctrl_vcc_mmc1_reg>;
264 + gpio = <&pioE 4 GPIO_ACTIVE_LOW>;
265 + regulator-name = "VDD MCI1";
266 + regulator-min-microvolt = <3300000>;
267 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
268 +index 157a950a55d38..686c7b7c79d55 100644
269 +--- a/arch/arm/boot/dts/meson8.dtsi
270 ++++ b/arch/arm/boot/dts/meson8.dtsi
271 +@@ -304,8 +304,13 @@
272 + "pp2", "ppmmu2", "pp4", "ppmmu4",
273 + "pp5", "ppmmu5", "pp6", "ppmmu6";
274 + resets = <&reset RESET_MALI>;
275 ++
276 + clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
277 + clock-names = "bus", "core";
278 ++
279 ++ assigned-clocks = <&clkc CLKID_MALI>;
280 ++ assigned-clock-rates = <318750000>;
281 ++
282 + operating-points-v2 = <&gpu_opp_table>;
283 + #cooling-cells = <2>; /* min followed by max */
284 + };
285 +diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
286 +index 8e48ccc6b634e..7e8ddc6f1252b 100644
287 +--- a/arch/arm/boot/dts/meson8b-ec100.dts
288 ++++ b/arch/arm/boot/dts/meson8b-ec100.dts
289 +@@ -148,7 +148,7 @@
290 + regulator-min-microvolt = <860000>;
291 + regulator-max-microvolt = <1140000>;
292 +
293 +- vin-supply = <&vcc_5v>;
294 ++ pwm-supply = <&vcc_5v>;
295 +
296 + pwms = <&pwm_cd 0 1148 0>;
297 + pwm-dutycycle-range = <100 0>;
298 +@@ -232,7 +232,7 @@
299 + regulator-min-microvolt = <860000>;
300 + regulator-max-microvolt = <1140000>;
301 +
302 +- vin-supply = <&vcc_5v>;
303 ++ pwm-supply = <&vcc_5v>;
304 +
305 + pwms = <&pwm_cd 1 1148 0>;
306 + pwm-dutycycle-range = <100 0>;
307 +diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
308 +index f3937d55472d4..7adedd3258c33 100644
309 +--- a/arch/arm/boot/dts/meson8b-mxq.dts
310 ++++ b/arch/arm/boot/dts/meson8b-mxq.dts
311 +@@ -34,6 +34,8 @@
312 + regulator-min-microvolt = <860000>;
313 + regulator-max-microvolt = <1140000>;
314 +
315 ++ pwm-supply = <&vcc_5v>;
316 ++
317 + pwms = <&pwm_cd 0 1148 0>;
318 + pwm-dutycycle-range = <100 0>;
319 +
320 +@@ -79,7 +81,7 @@
321 + regulator-min-microvolt = <860000>;
322 + regulator-max-microvolt = <1140000>;
323 +
324 +- vin-supply = <&vcc_5v>;
325 ++ pwm-supply = <&vcc_5v>;
326 +
327 + pwms = <&pwm_cd 1 1148 0>;
328 + pwm-dutycycle-range = <100 0>;
329 +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
330 +index c440ef94e0820..04356bc639faf 100644
331 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
332 ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
333 +@@ -131,7 +131,7 @@
334 + regulator-min-microvolt = <860000>;
335 + regulator-max-microvolt = <1140000>;
336 +
337 +- vin-supply = <&p5v0>;
338 ++ pwm-supply = <&p5v0>;
339 +
340 + pwms = <&pwm_cd 0 12218 0>;
341 + pwm-dutycycle-range = <91 0>;
342 +@@ -163,7 +163,7 @@
343 + regulator-min-microvolt = <860000>;
344 + regulator-max-microvolt = <1140000>;
345 +
346 +- vin-supply = <&p5v0>;
347 ++ pwm-supply = <&p5v0>;
348 +
349 + pwms = <&pwm_cd 1 12218 0>;
350 + pwm-dutycycle-range = <91 0>;
351 +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
352 +index 10244e59d56dd..56a0bb7eb0e69 100644
353 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
354 ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
355 +@@ -102,7 +102,7 @@
356 + #address-cells = <0>;
357 + interrupt-controller;
358 + reg = <0x11001000 0x1000>,
359 +- <0x11002000 0x1000>,
360 ++ <0x11002000 0x2000>,
361 + <0x11004000 0x2000>,
362 + <0x11006000 0x2000>;
363 + };
364 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
365 +index a05b1ab2dd12c..04da07ae44208 100644
366 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
367 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
368 +@@ -135,6 +135,23 @@
369 + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
370 + status = "okay";
371 + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
372 ++ /*
373 ++ * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
374 ++ * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
375 ++ * 2 size cells and also expects that the second range starts at 16 MB offset. If these
376 ++ * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
377 ++ * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
378 ++ * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
379 ++ * This bug is not present in U-Boot ports for other Armada 3700 devices and is fixed in
380 ++ * U-Boot version 2021.07. See relevant U-Boot commits (the last one contains fix):
381 ++ * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
382 ++ * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
383 ++ * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
384 ++ */
385 ++ #address-cells = <3>;
386 ++ #size-cells = <2>;
387 ++ ranges = <0x81000000 0 0xe8000000 0 0xe8000000 0 0x01000000 /* Port 0 IO */
388 ++ 0x82000000 0 0xe9000000 0 0xe9000000 0 0x07000000>; /* Port 0 MEM */
389 +
390 + /* enabled by U-Boot if PCIe module is present */
391 + status = "disabled";
392 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
393 +index 5db81a416cd65..9acc5d2b5a002 100644
394 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
395 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
396 +@@ -489,8 +489,15 @@
397 + #interrupt-cells = <1>;
398 + msi-parent = <&pcie0>;
399 + msi-controller;
400 +- ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
401 +- 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
402 ++ /*
403 ++ * The 128 MiB address range [0xe8000000-0xf0000000] is
404 ++ * dedicated for PCIe and can be assigned to 8 windows
405 ++ * with size a power of two. Use one 64 KiB window for
406 ++ * IO at the end and the remaining seven windows
407 ++ * (totaling 127 MiB) for MEM.
408 ++ */
409 ++ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
410 ++ 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
411 + interrupt-map-mask = <0 0 0 7>;
412 + interrupt-map = <0 0 0 1 &pcie_intc 0>,
413 + <0 0 0 2 &pcie_intc 1>,
414 +diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
415 +index 6f9c071475513..a758e4d226122 100644
416 +--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
417 ++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
418 +@@ -23,7 +23,7 @@ ap_h1_spi: &spi0 {};
419 + adau7002: audio-codec-1 {
420 + compatible = "adi,adau7002";
421 + IOVDD-supply = <&pp1800_l15a>;
422 +- wakeup-delay-ms = <15>;
423 ++ wakeup-delay-ms = <80>;
424 + #sound-dai-cells = <0>;
425 + };
426 +
427 +diff --git a/arch/arm64/boot/dts/qcom/sc7280.dtsi b/arch/arm64/boot/dts/qcom/sc7280.dtsi
428 +index 188c5768a55ae..c08f074106994 100644
429 +--- a/arch/arm64/boot/dts/qcom/sc7280.dtsi
430 ++++ b/arch/arm64/boot/dts/qcom/sc7280.dtsi
431 +@@ -1437,9 +1437,9 @@
432 +
433 + cpufreq_hw: cpufreq@18591000 {
434 + compatible = "qcom,cpufreq-epss";
435 +- reg = <0 0x18591000 0 0x1000>,
436 +- <0 0x18592000 0 0x1000>,
437 +- <0 0x18593000 0 0x1000>;
438 ++ reg = <0 0x18591100 0 0x900>,
439 ++ <0 0x18592100 0 0x900>,
440 ++ <0 0x18593100 0 0x900>;
441 + clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
442 + clock-names = "xo", "alternate";
443 + #freq-domain-cells = <1>;
444 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
445 +index 4798368b02efb..9a6eff1813a68 100644
446 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
447 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
448 +@@ -2210,7 +2210,7 @@
449 + <&gcc GCC_USB3_PHY_SEC_BCR>;
450 + reset-names = "phy", "common";
451 +
452 +- usb_2_ssphy: lane@88eb200 {
453 ++ usb_2_ssphy: lanes@88eb200 {
454 + reg = <0 0x088eb200 0 0x200>,
455 + <0 0x088eb400 0 0x200>,
456 + <0 0x088eb800 0 0x800>;
457 +diff --git a/arch/arm64/boot/dts/qcom/sm8350.dtsi b/arch/arm64/boot/dts/qcom/sm8350.dtsi
458 +index 0d16392bb9767..dbc174d424e26 100644
459 +--- a/arch/arm64/boot/dts/qcom/sm8350.dtsi
460 ++++ b/arch/arm64/boot/dts/qcom/sm8350.dtsi
461 +@@ -666,12 +666,10 @@
462 + clocks = <&rpmhcc RPMH_IPA_CLK>;
463 + clock-names = "core";
464 +
465 +- interconnects = <&aggre2_noc MASTER_IPA &gem_noc SLAVE_LLCC>,
466 +- <&mc_virt MASTER_LLCC &mc_virt SLAVE_EBI1>,
467 ++ interconnects = <&aggre2_noc MASTER_IPA &mc_virt SLAVE_EBI1>,
468 + <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_IPA_CFG>;
469 +- interconnect-names = "ipa_to_llcc",
470 +- "llcc_to_ebi1",
471 +- "appss_to_ipa";
472 ++ interconnect-names = "memory",
473 ++ "config";
474 +
475 + qcom,smem-states = <&ipa_smp2p_out 0>,
476 + <&ipa_smp2p_out 1>;
477 +diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
478 +index 202c4fc88bd51..dde3a07bc417c 100644
479 +--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
480 ++++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
481 +@@ -20,6 +20,7 @@
482 + pinctrl-names = "default";
483 + phy-handle = <&phy0>;
484 + tx-internal-delay-ps = <2000>;
485 ++ rx-internal-delay-ps = <1800>;
486 + status = "okay";
487 +
488 + phy0: ethernet-phy@0 {
489 +diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
490 +index 6783c3ad08567..57784341f39d7 100644
491 +--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
492 ++++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
493 +@@ -277,10 +277,6 @@
494 + interrupt-parent = <&gpio1>;
495 + interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
496 +
497 +- /* Depends on LVDS */
498 +- max-clock = <135000000>;
499 +- min-vrefresh = <50>;
500 +-
501 + adi,input-depth = <8>;
502 + adi,input-colorspace = "rgb";
503 + adi,input-clock = "1x";
504 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
505 +index 0ca72f5cda41b..5d1fc9c4bca5e 100644
506 +--- a/arch/arm64/kvm/arm.c
507 ++++ b/arch/arm64/kvm/arm.c
508 +@@ -15,6 +15,7 @@
509 + #include <linux/fs.h>
510 + #include <linux/mman.h>
511 + #include <linux/sched.h>
512 ++#include <linux/kmemleak.h>
513 + #include <linux/kvm.h>
514 + #include <linux/kvm_irqfd.h>
515 + #include <linux/irqbypass.h>
516 +@@ -1986,6 +1987,12 @@ static int finalize_hyp_mode(void)
517 + if (ret)
518 + return ret;
519 +
520 ++ /*
521 ++ * Exclude HYP BSS from kmemleak so that it doesn't get peeked
522 ++ * at, which would end badly once the section is inaccessible.
523 ++ * None of other sections should ever be introspected.
524 ++ */
525 ++ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
526 + ret = pkvm_mark_hyp_section(__hyp_bss);
527 + if (ret)
528 + return ret;
529 +diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
530 +index 2c580204f1dc9..95a18cec14a35 100644
531 +--- a/arch/arm64/kvm/vgic/vgic-v2.c
532 ++++ b/arch/arm64/kvm/vgic/vgic-v2.c
533 +@@ -60,6 +60,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
534 + u32 val = cpuif->vgic_lr[lr];
535 + u32 cpuid, intid = val & GICH_LR_VIRTUALID;
536 + struct vgic_irq *irq;
537 ++ bool deactivated;
538 +
539 + /* Extract the source vCPU id from the LR */
540 + cpuid = val & GICH_LR_PHYSID_CPUID;
541 +@@ -75,7 +76,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
542 +
543 + raw_spin_lock(&irq->irq_lock);
544 +
545 +- /* Always preserve the active bit */
546 ++ /* Always preserve the active bit, note deactivation */
547 ++ deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
548 + irq->active = !!(val & GICH_LR_ACTIVE_BIT);
549 +
550 + if (irq->active && vgic_irq_is_sgi(intid))
551 +@@ -96,36 +98,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
552 + if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
553 + irq->pending_latch = false;
554 +
555 +- /*
556 +- * Level-triggered mapped IRQs are special because we only
557 +- * observe rising edges as input to the VGIC.
558 +- *
559 +- * If the guest never acked the interrupt we have to sample
560 +- * the physical line and set the line level, because the
561 +- * device state could have changed or we simply need to
562 +- * process the still pending interrupt later.
563 +- *
564 +- * If this causes us to lower the level, we have to also clear
565 +- * the physical active state, since we will otherwise never be
566 +- * told when the interrupt becomes asserted again.
567 +- *
568 +- * Another case is when the interrupt requires a helping hand
569 +- * on deactivation (no HW deactivation, for example).
570 +- */
571 +- if (vgic_irq_is_mapped_level(irq)) {
572 +- bool resample = false;
573 +-
574 +- if (val & GICH_LR_PENDING_BIT) {
575 +- irq->line_level = vgic_get_phys_line_level(irq);
576 +- resample = !irq->line_level;
577 +- } else if (vgic_irq_needs_resampling(irq) &&
578 +- !(irq->active || irq->pending_latch)) {
579 +- resample = true;
580 +- }
581 +-
582 +- if (resample)
583 +- vgic_irq_set_phys_active(irq, false);
584 +- }
585 ++ /* Handle resampling for mapped interrupts if required */
586 ++ vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
587 +
588 + raw_spin_unlock(&irq->irq_lock);
589 + vgic_put_irq(vcpu->kvm, irq);
590 +diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
591 +index 66004f61cd83d..21a6207fb2eed 100644
592 +--- a/arch/arm64/kvm/vgic/vgic-v3.c
593 ++++ b/arch/arm64/kvm/vgic/vgic-v3.c
594 +@@ -46,6 +46,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
595 + u32 intid, cpuid;
596 + struct vgic_irq *irq;
597 + bool is_v2_sgi = false;
598 ++ bool deactivated;
599 +
600 + cpuid = val & GICH_LR_PHYSID_CPUID;
601 + cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
602 +@@ -68,7 +69,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
603 +
604 + raw_spin_lock(&irq->irq_lock);
605 +
606 +- /* Always preserve the active bit */
607 ++ /* Always preserve the active bit, note deactivation */
608 ++ deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
609 + irq->active = !!(val & ICH_LR_ACTIVE_BIT);
610 +
611 + if (irq->active && is_v2_sgi)
612 +@@ -89,36 +91,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
613 + if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
614 + irq->pending_latch = false;
615 +
616 +- /*
617 +- * Level-triggered mapped IRQs are special because we only
618 +- * observe rising edges as input to the VGIC.
619 +- *
620 +- * If the guest never acked the interrupt we have to sample
621 +- * the physical line and set the line level, because the
622 +- * device state could have changed or we simply need to
623 +- * process the still pending interrupt later.
624 +- *
625 +- * If this causes us to lower the level, we have to also clear
626 +- * the physical active state, since we will otherwise never be
627 +- * told when the interrupt becomes asserted again.
628 +- *
629 +- * Another case is when the interrupt requires a helping hand
630 +- * on deactivation (no HW deactivation, for example).
631 +- */
632 +- if (vgic_irq_is_mapped_level(irq)) {
633 +- bool resample = false;
634 +-
635 +- if (val & ICH_LR_PENDING_BIT) {
636 +- irq->line_level = vgic_get_phys_line_level(irq);
637 +- resample = !irq->line_level;
638 +- } else if (vgic_irq_needs_resampling(irq) &&
639 +- !(irq->active || irq->pending_latch)) {
640 +- resample = true;
641 +- }
642 +-
643 +- if (resample)
644 +- vgic_irq_set_phys_active(irq, false);
645 +- }
646 ++ /* Handle resampling for mapped interrupts if required */
647 ++ vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
648 +
649 + raw_spin_unlock(&irq->irq_lock);
650 + vgic_put_irq(vcpu->kvm, irq);
651 +diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
652 +index 111bff47e4710..42a6ac78fe959 100644
653 +--- a/arch/arm64/kvm/vgic/vgic.c
654 ++++ b/arch/arm64/kvm/vgic/vgic.c
655 +@@ -1022,3 +1022,41 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
656 +
657 + return map_is_active;
658 + }
659 ++
660 ++/*
661 ++ * Level-triggered mapped IRQs are special because we only observe rising
662 ++ * edges as input to the VGIC.
663 ++ *
664 ++ * If the guest never acked the interrupt we have to sample the physical
665 ++ * line and set the line level, because the device state could have changed
666 ++ * or we simply need to process the still pending interrupt later.
667 ++ *
668 ++ * We could also have entered the guest with the interrupt active+pending.
669 ++ * On the next exit, we need to re-evaluate the pending state, as it could
670 ++ * otherwise result in a spurious interrupt by injecting a now potentially
671 ++ * stale pending state.
672 ++ *
673 ++ * If this causes us to lower the level, we have to also clear the physical
674 ++ * active state, since we will otherwise never be told when the interrupt
675 ++ * becomes asserted again.
676 ++ *
677 ++ * Another case is when the interrupt requires a helping hand on
678 ++ * deactivation (no HW deactivation, for example).
679 ++ */
680 ++void vgic_irq_handle_resampling(struct vgic_irq *irq,
681 ++ bool lr_deactivated, bool lr_pending)
682 ++{
683 ++ if (vgic_irq_is_mapped_level(irq)) {
684 ++ bool resample = false;
685 ++
686 ++ if (unlikely(vgic_irq_needs_resampling(irq))) {
687 ++ resample = !(irq->active || irq->pending_latch);
688 ++ } else if (lr_pending || (lr_deactivated && irq->line_level)) {
689 ++ irq->line_level = vgic_get_phys_line_level(irq);
690 ++ resample = !irq->line_level;
691 ++ }
692 ++
693 ++ if (resample)
694 ++ vgic_irq_set_phys_active(irq, false);
695 ++ }
696 ++}
697 +diff --git a/arch/arm64/kvm/vgic/vgic.h b/arch/arm64/kvm/vgic/vgic.h
698 +index dc1f3d1657ee9..14a9218641f57 100644
699 +--- a/arch/arm64/kvm/vgic/vgic.h
700 ++++ b/arch/arm64/kvm/vgic/vgic.h
701 +@@ -169,6 +169,8 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
702 + bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
703 + unsigned long flags);
704 + void vgic_kick_vcpus(struct kvm *kvm);
705 ++void vgic_irq_handle_resampling(struct vgic_irq *irq,
706 ++ bool lr_deactivated, bool lr_pending);
707 +
708 + int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
709 + phys_addr_t addr, phys_addr_t alignment);
710 +diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
711 +index 29e946394fdb4..277d61a094637 100644
712 +--- a/arch/m68k/Kconfig.cpu
713 ++++ b/arch/m68k/Kconfig.cpu
714 +@@ -26,6 +26,7 @@ config COLDFIRE
715 + bool "Coldfire CPU family support"
716 + select ARCH_HAVE_CUSTOM_GPIO_H
717 + select CPU_HAS_NO_BITFIELDS
718 ++ select CPU_HAS_NO_CAS
719 + select CPU_HAS_NO_MULDIV64
720 + select GENERIC_CSUM
721 + select GPIOLIB
722 +@@ -39,6 +40,7 @@ config M68000
723 + bool
724 + depends on !MMU
725 + select CPU_HAS_NO_BITFIELDS
726 ++ select CPU_HAS_NO_CAS
727 + select CPU_HAS_NO_MULDIV64
728 + select CPU_HAS_NO_UNALIGNED
729 + select GENERIC_CSUM
730 +@@ -54,6 +56,7 @@ config M68000
731 + config MCPU32
732 + bool
733 + select CPU_HAS_NO_BITFIELDS
734 ++ select CPU_HAS_NO_CAS
735 + select CPU_HAS_NO_UNALIGNED
736 + select CPU_NO_EFFICIENT_FFS
737 + help
738 +@@ -383,7 +386,7 @@ config ADVANCED
739 +
740 + config RMW_INSNS
741 + bool "Use read-modify-write instructions"
742 +- depends on ADVANCED
743 ++ depends on ADVANCED && !CPU_HAS_NO_CAS
744 + help
745 + This allows to use certain instructions that work with indivisible
746 + read-modify-write bus cycles. While this is faster than the
747 +@@ -450,6 +453,9 @@ config M68K_L2_CACHE
748 + config CPU_HAS_NO_BITFIELDS
749 + bool
750 +
751 ++config CPU_HAS_NO_CAS
752 ++ bool
753 ++
754 + config CPU_HAS_NO_MULDIV64
755 + bool
756 +
757 +diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
758 +index 2ed841e941113..d03b6c4aa86b4 100644
759 +--- a/arch/m68k/coldfire/clk.c
760 ++++ b/arch/m68k/coldfire/clk.c
761 +@@ -78,7 +78,7 @@ int clk_enable(struct clk *clk)
762 + unsigned long flags;
763 +
764 + if (!clk)
765 +- return -EINVAL;
766 ++ return 0;
767 +
768 + spin_lock_irqsave(&clk_lock, flags);
769 + if ((clk->enabled++ == 0) && clk->clk_ops)
770 +diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
771 +index d2875e32abfca..79e55421cfb18 100644
772 +--- a/arch/m68k/emu/nfeth.c
773 ++++ b/arch/m68k/emu/nfeth.c
774 +@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
775 +
776 + for (i = 0; i < MAX_UNIT; i++) {
777 + if (nfeth_dev[i]) {
778 +- unregister_netdev(nfeth_dev[0]);
779 +- free_netdev(nfeth_dev[0]);
780 ++ unregister_netdev(nfeth_dev[i]);
781 ++ free_netdev(nfeth_dev[i]);
782 + }
783 + }
784 + free_irq(nfEtherIRQ, nfeth_interrupt);
785 +diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
786 +index 8637bf8a2f652..cfba83d230fde 100644
787 +--- a/arch/m68k/include/asm/atomic.h
788 ++++ b/arch/m68k/include/asm/atomic.h
789 +@@ -48,7 +48,7 @@ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
790 + " casl %2,%1,%0\n" \
791 + " jne 1b" \
792 + : "+m" (*v), "=&d" (t), "=&d" (tmp) \
793 +- : "g" (i), "2" (arch_atomic_read(v))); \
794 ++ : "di" (i), "2" (arch_atomic_read(v))); \
795 + return t; \
796 + }
797 +
798 +@@ -63,7 +63,7 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
799 + " casl %2,%1,%0\n" \
800 + " jne 1b" \
801 + : "+m" (*v), "=&d" (t), "=&d" (tmp) \
802 +- : "g" (i), "2" (arch_atomic_read(v))); \
803 ++ : "di" (i), "2" (arch_atomic_read(v))); \
804 + return tmp; \
805 + }
806 +
807 +diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
808 +index 2d395998f524a..7ee49f5881d15 100644
809 +--- a/arch/parisc/boot/compressed/misc.c
810 ++++ b/arch/parisc/boot/compressed/misc.c
811 +@@ -26,7 +26,7 @@
812 + extern char input_data[];
813 + extern int input_len;
814 + /* output_len is inserted by the linker possibly at an unaligned address */
815 +-extern __le32 output_len __aligned(1);
816 ++extern char output_len;
817 + extern char _text, _end;
818 + extern char _bss, _ebss;
819 + extern char _startcode_end;
820 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
821 +index 161a9e12bfb86..630eab0fa1760 100644
822 +--- a/arch/s390/include/asm/kvm_host.h
823 ++++ b/arch/s390/include/asm/kvm_host.h
824 +@@ -957,6 +957,7 @@ struct kvm_arch{
825 + atomic64_t cmma_dirty_pages;
826 + /* subset of available cpu features enabled by user space */
827 + DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
828 ++ /* indexed by vcpu_idx */
829 + DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
830 + struct kvm_s390_gisa_interrupt gisa_int;
831 + struct kvm_s390_pv pv;
832 +diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
833 +index 47bde5a20a41c..11213c8bfca56 100644
834 +--- a/arch/s390/include/asm/lowcore.h
835 ++++ b/arch/s390/include/asm/lowcore.h
836 +@@ -124,7 +124,8 @@ struct lowcore {
837 + /* Restart function and parameter. */
838 + __u64 restart_fn; /* 0x0370 */
839 + __u64 restart_data; /* 0x0378 */
840 +- __u64 restart_source; /* 0x0380 */
841 ++ __u32 restart_source; /* 0x0380 */
842 ++ __u32 restart_flags; /* 0x0384 */
843 +
844 + /* Address space pointer. */
845 + __u64 kernel_asce; /* 0x0388 */
846 +diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
847 +index ddc7858bbce40..879b8e3f609cd 100644
848 +--- a/arch/s390/include/asm/processor.h
849 ++++ b/arch/s390/include/asm/processor.h
850 +@@ -26,6 +26,8 @@
851 + #define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
852 + #define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
853 +
854 ++#define RESTART_FLAG_CTLREGS _AC(1 << 0, U)
855 ++
856 + #ifndef __ASSEMBLY__
857 +
858 + #include <linux/cpumask.h>
859 +diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
860 +index 77ff2130cb045..dc53b0452ce2f 100644
861 +--- a/arch/s390/kernel/asm-offsets.c
862 ++++ b/arch/s390/kernel/asm-offsets.c
863 +@@ -116,6 +116,7 @@ int main(void)
864 + OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
865 + OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
866 + OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
867 ++ OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
868 + OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
869 + OFFSET(__LC_USER_ASCE, lowcore, user_asce);
870 + OFFSET(__LC_LPP, lowcore, lpp);
871 +diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
872 +index 09b6c6402f9b7..05b765b8038eb 100644
873 +--- a/arch/s390/kernel/debug.c
874 ++++ b/arch/s390/kernel/debug.c
875 +@@ -24,6 +24,7 @@
876 + #include <linux/export.h>
877 + #include <linux/init.h>
878 + #include <linux/fs.h>
879 ++#include <linux/minmax.h>
880 + #include <linux/debugfs.h>
881 +
882 + #include <asm/debug.h>
883 +@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
884 + char *out_buf, const char *in_buf);
885 + static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
886 + char *out_buf, debug_sprintf_entry_t *curr_event);
887 ++static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
888 ++static void debug_events_append(debug_info_t *dest, debug_info_t *src);
889 +
890 + /* globals */
891 +
892 +@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
893 + goto out;
894 +
895 + rc->mode = mode & ~S_IFMT;
896 +-
897 +- /* create root directory */
898 +- rc->debugfs_root_entry = debugfs_create_dir(rc->name,
899 +- debug_debugfs_root_entry);
900 +-
901 +- /* append new element to linked list */
902 +- if (!debug_area_first) {
903 +- /* first element in list */
904 +- debug_area_first = rc;
905 +- rc->prev = NULL;
906 +- } else {
907 +- /* append element to end of list */
908 +- debug_area_last->next = rc;
909 +- rc->prev = debug_area_last;
910 +- }
911 +- debug_area_last = rc;
912 +- rc->next = NULL;
913 +-
914 + refcount_set(&rc->ref_count, 1);
915 + out:
916 + return rc;
917 +@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
918 + */
919 + static void debug_info_put(debug_info_t *db_info)
920 + {
921 +- int i;
922 +-
923 + if (!db_info)
924 + return;
925 +- if (refcount_dec_and_test(&db_info->ref_count)) {
926 +- for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
927 +- if (!db_info->views[i])
928 +- continue;
929 +- debugfs_remove(db_info->debugfs_entries[i]);
930 +- }
931 +- debugfs_remove(db_info->debugfs_root_entry);
932 +- if (db_info == debug_area_first)
933 +- debug_area_first = db_info->next;
934 +- if (db_info == debug_area_last)
935 +- debug_area_last = db_info->prev;
936 +- if (db_info->prev)
937 +- db_info->prev->next = db_info->next;
938 +- if (db_info->next)
939 +- db_info->next->prev = db_info->prev;
940 ++ if (refcount_dec_and_test(&db_info->ref_count))
941 + debug_info_free(db_info);
942 +- }
943 + }
944 +
945 + /*
946 +@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
947 + return 0; /* success */
948 + }
949 +
950 ++/* Create debugfs entries and add to internal list. */
951 ++static void _debug_register(debug_info_t *id)
952 ++{
953 ++ /* create root directory */
954 ++ id->debugfs_root_entry = debugfs_create_dir(id->name,
955 ++ debug_debugfs_root_entry);
956 ++
957 ++ /* append new element to linked list */
958 ++ if (!debug_area_first) {
959 ++ /* first element in list */
960 ++ debug_area_first = id;
961 ++ id->prev = NULL;
962 ++ } else {
963 ++ /* append element to end of list */
964 ++ debug_area_last->next = id;
965 ++ id->prev = debug_area_last;
966 ++ }
967 ++ debug_area_last = id;
968 ++ id->next = NULL;
969 ++
970 ++ debug_register_view(id, &debug_level_view);
971 ++ debug_register_view(id, &debug_flush_view);
972 ++ debug_register_view(id, &debug_pages_view);
973 ++}
974 ++
975 + /**
976 + * debug_register_mode() - creates and initializes debug area.
977 + *
978 +@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
979 + if ((uid != 0) || (gid != 0))
980 + pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
981 + BUG_ON(!initialized);
982 +- mutex_lock(&debug_mutex);
983 +
984 + /* create new debug_info */
985 + rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
986 +- if (!rc)
987 +- goto out;
988 +- debug_register_view(rc, &debug_level_view);
989 +- debug_register_view(rc, &debug_flush_view);
990 +- debug_register_view(rc, &debug_pages_view);
991 +-out:
992 +- if (!rc)
993 ++ if (rc) {
994 ++ mutex_lock(&debug_mutex);
995 ++ _debug_register(rc);
996 ++ mutex_unlock(&debug_mutex);
997 ++ } else {
998 + pr_err("Registering debug feature %s failed\n", name);
999 +- mutex_unlock(&debug_mutex);
1000 ++ }
1001 + return rc;
1002 + }
1003 + EXPORT_SYMBOL(debug_register_mode);
1004 +@@ -702,6 +692,27 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
1005 + }
1006 + EXPORT_SYMBOL(debug_register);
1007 +
1008 ++/* Remove debugfs entries and remove from internal list. */
1009 ++static void _debug_unregister(debug_info_t *id)
1010 ++{
1011 ++ int i;
1012 ++
1013 ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
1014 ++ if (!id->views[i])
1015 ++ continue;
1016 ++ debugfs_remove(id->debugfs_entries[i]);
1017 ++ }
1018 ++ debugfs_remove(id->debugfs_root_entry);
1019 ++ if (id == debug_area_first)
1020 ++ debug_area_first = id->next;
1021 ++ if (id == debug_area_last)
1022 ++ debug_area_last = id->prev;
1023 ++ if (id->prev)
1024 ++ id->prev->next = id->next;
1025 ++ if (id->next)
1026 ++ id->next->prev = id->prev;
1027 ++}
1028 ++
1029 + /**
1030 + * debug_unregister() - give back debug area.
1031 + *
1032 +@@ -715,8 +726,10 @@ void debug_unregister(debug_info_t *id)
1033 + if (!id)
1034 + return;
1035 + mutex_lock(&debug_mutex);
1036 +- debug_info_put(id);
1037 ++ _debug_unregister(id);
1038 + mutex_unlock(&debug_mutex);
1039 ++
1040 ++ debug_info_put(id);
1041 + }
1042 + EXPORT_SYMBOL(debug_unregister);
1043 +
1044 +@@ -726,35 +739,28 @@ EXPORT_SYMBOL(debug_unregister);
1045 + */
1046 + static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
1047 + {
1048 +- debug_entry_t ***new_areas;
1049 ++ debug_info_t *new_id;
1050 + unsigned long flags;
1051 +- int rc = 0;
1052 +
1053 + if (!id || (nr_areas <= 0) || (pages_per_area < 0))
1054 + return -EINVAL;
1055 +- if (pages_per_area > 0) {
1056 +- new_areas = debug_areas_alloc(pages_per_area, nr_areas);
1057 +- if (!new_areas) {
1058 +- pr_info("Allocating memory for %i pages failed\n",
1059 +- pages_per_area);
1060 +- rc = -ENOMEM;
1061 +- goto out;
1062 +- }
1063 +- } else {
1064 +- new_areas = NULL;
1065 ++
1066 ++ new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
1067 ++ id->level, ALL_AREAS);
1068 ++ if (!new_id) {
1069 ++ pr_info("Allocating memory for %i pages failed\n",
1070 ++ pages_per_area);
1071 ++ return -ENOMEM;
1072 + }
1073 ++
1074 + spin_lock_irqsave(&id->lock, flags);
1075 +- debug_areas_free(id);
1076 +- id->areas = new_areas;
1077 +- id->nr_areas = nr_areas;
1078 +- id->pages_per_area = pages_per_area;
1079 +- id->active_area = 0;
1080 +- memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
1081 +- memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
1082 ++ debug_events_append(new_id, id);
1083 ++ debug_areas_swap(new_id, id);
1084 ++ debug_info_free(new_id);
1085 + spin_unlock_irqrestore(&id->lock, flags);
1086 + pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
1087 +-out:
1088 +- return rc;
1089 ++
1090 ++ return 0;
1091 + }
1092 +
1093 + /**
1094 +@@ -821,6 +827,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
1095 + id->active_entries[id->active_area]);
1096 + }
1097 +
1098 ++/* Swap debug areas of a and b. */
1099 ++static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
1100 ++{
1101 ++ swap(a->nr_areas, b->nr_areas);
1102 ++ swap(a->pages_per_area, b->pages_per_area);
1103 ++ swap(a->areas, b->areas);
1104 ++ swap(a->active_area, b->active_area);
1105 ++ swap(a->active_pages, b->active_pages);
1106 ++ swap(a->active_entries, b->active_entries);
1107 ++}
1108 ++
1109 ++/* Append all debug events in active area from source to destination log. */
1110 ++static void debug_events_append(debug_info_t *dest, debug_info_t *src)
1111 ++{
1112 ++ debug_entry_t *from, *to, *last;
1113 ++
1114 ++ if (!src->areas || !dest->areas)
1115 ++ return;
1116 ++
1117 ++ /* Loop over all entries in src, starting with oldest. */
1118 ++ from = get_active_entry(src);
1119 ++ last = from;
1120 ++ do {
1121 ++ if (from->clock != 0LL) {
1122 ++ to = get_active_entry(dest);
1123 ++ memset(to, 0, dest->entry_size);
1124 ++ memcpy(to, from, min(src->entry_size,
1125 ++ dest->entry_size));
1126 ++ proceed_active_entry(dest);
1127 ++ }
1128 ++
1129 ++ proceed_active_entry(src);
1130 ++ from = get_active_entry(src);
1131 ++ } while (from != last);
1132 ++}
1133 ++
1134 + /*
1135 + * debug_finish_entry:
1136 + * - set timestamp, caller address, cpu number etc.
1137 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
1138 +index 5a2f70cbd3a9d..b9716a7e326d0 100644
1139 +--- a/arch/s390/kernel/entry.S
1140 ++++ b/arch/s390/kernel/entry.S
1141 +@@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
1142 + 4: j 4b
1143 + ENDPROC(mcck_int_handler)
1144 +
1145 +-#
1146 +-# PSW restart interrupt handler
1147 +-#
1148 + ENTRY(restart_int_handler)
1149 + ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
1150 + stg %r15,__LC_SAVE_AREA_RESTART
1151 ++ TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
1152 ++ jz 0f
1153 ++ la %r15,4095
1154 ++ lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
1155 ++0: larl %r15,.Lstosm_tmp
1156 ++ stosm 0(%r15),0x04 # turn dat on, keep irqs off
1157 + lg %r15,__LC_RESTART_STACK
1158 + xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
1159 + stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
1160 +@@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
1161 + xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
1162 + lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
1163 + lg %r2,__LC_RESTART_DATA
1164 +- lg %r3,__LC_RESTART_SOURCE
1165 ++ lgf %r3,__LC_RESTART_SOURCE
1166 + ltgr %r3,%r3 # test source cpu address
1167 + jm 1f # negative -> skip source stop
1168 + 0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu
1169 +diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
1170 +index 50e2c21e0ec94..911cd39123514 100644
1171 +--- a/arch/s390/kernel/ipl.c
1172 ++++ b/arch/s390/kernel/ipl.c
1173 +@@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
1174 +
1175 + int diag308(unsigned long subcode, void *addr)
1176 + {
1177 +- if (IS_ENABLED(CONFIG_KASAN))
1178 +- __arch_local_irq_stosm(0x04); /* enable DAT */
1179 + diag_stat_inc(DIAG_STAT_X308);
1180 + return __diag308(subcode, addr);
1181 + }
1182 +@@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
1183 +
1184 + static void __do_restart(void *ignore)
1185 + {
1186 +- __arch_local_irq_stosm(0x04); /* enable DAT */
1187 + smp_send_stop();
1188 + #ifdef CONFIG_CRASH_DUMP
1189 + crash_kexec(NULL);
1190 +diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
1191 +index 1005a6935fbe3..c1fbc979e0e8b 100644
1192 +--- a/arch/s390/kernel/machine_kexec.c
1193 ++++ b/arch/s390/kernel/machine_kexec.c
1194 +@@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
1195 + */
1196 + static void __machine_kexec(void *data)
1197 + {
1198 +- __arch_local_irq_stosm(0x04); /* enable DAT */
1199 + pfault_fini();
1200 + tracing_off();
1201 + debug_locks_off();
1202 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
1203 +index ff0f9e8389162..ee23908f1b960 100644
1204 +--- a/arch/s390/kernel/setup.c
1205 ++++ b/arch/s390/kernel/setup.c
1206 +@@ -421,7 +421,7 @@ static void __init setup_lowcore_dat_off(void)
1207 + lc->restart_stack = (unsigned long) restart_stack;
1208 + lc->restart_fn = (unsigned long) do_restart;
1209 + lc->restart_data = 0;
1210 +- lc->restart_source = -1UL;
1211 ++ lc->restart_source = -1U;
1212 +
1213 + mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
1214 + if (!mcck_stack)
1215 +@@ -450,12 +450,19 @@ static void __init setup_lowcore_dat_off(void)
1216 +
1217 + static void __init setup_lowcore_dat_on(void)
1218 + {
1219 ++ struct lowcore *lc = lowcore_ptr[0];
1220 ++
1221 + __ctl_clear_bit(0, 28);
1222 + S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
1223 + S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
1224 + S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
1225 + S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
1226 ++ __ctl_store(S390_lowcore.cregs_save_area, 0, 15);
1227 + __ctl_set_bit(0, 28);
1228 ++ mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
1229 ++ mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
1230 ++ memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
1231 ++ sizeof(S390_lowcore.cregs_save_area));
1232 + }
1233 +
1234 + static struct resource code_resource = {
1235 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1236 +index 8984711f72ede..8e8ace899407c 100644
1237 +--- a/arch/s390/kernel/smp.c
1238 ++++ b/arch/s390/kernel/smp.c
1239 +@@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
1240 + cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
1241 + cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
1242 + lc->cpu_nr = cpu;
1243 ++ lc->restart_flags = RESTART_FLAG_CTLREGS;
1244 + lc->spinlock_lockval = arch_spin_lockval(cpu);
1245 + lc->spinlock_index = 0;
1246 + lc->percpu_offset = __per_cpu_offset[cpu];
1247 +@@ -297,7 +298,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
1248 + lc->restart_stack = lc->nodat_stack;
1249 + lc->restart_fn = (unsigned long) func;
1250 + lc->restart_data = (unsigned long) data;
1251 +- lc->restart_source = -1UL;
1252 ++ lc->restart_source = -1U;
1253 + pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
1254 + }
1255 +
1256 +@@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
1257 + func(data); /* should not return */
1258 + }
1259 +
1260 +-static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
1261 +- pcpu_delegate_fn *func,
1262 +- void *data, unsigned long stack)
1263 ++static void pcpu_delegate(struct pcpu *pcpu,
1264 ++ pcpu_delegate_fn *func,
1265 ++ void *data, unsigned long stack)
1266 + {
1267 + struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
1268 +- unsigned long source_cpu = stap();
1269 ++ unsigned int source_cpu = stap();
1270 +
1271 + __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
1272 + if (pcpu->address == source_cpu) {
1273 +@@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
1274 + __ctl_load(cregs, 0, 15);
1275 + }
1276 +
1277 ++static DEFINE_SPINLOCK(ctl_lock);
1278 ++static unsigned long ctlreg;
1279 ++
1280 + /*
1281 + * Set a bit in a control register of all cpus
1282 + */
1283 +@@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
1284 + {
1285 + struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
1286 +
1287 ++ spin_lock(&ctl_lock);
1288 ++ memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
1289 ++ __set_bit(bit, &ctlreg);
1290 ++ memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
1291 ++ spin_unlock(&ctl_lock);
1292 + on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1293 + }
1294 + EXPORT_SYMBOL(smp_ctl_set_bit);
1295 +@@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
1296 + {
1297 + struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
1298 +
1299 ++ spin_lock(&ctl_lock);
1300 ++ memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
1301 ++ __clear_bit(bit, &ctlreg);
1302 ++ memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
1303 ++ spin_unlock(&ctl_lock);
1304 + on_each_cpu(smp_ctl_bit_callback, &parms, 1);
1305 + }
1306 + EXPORT_SYMBOL(smp_ctl_clear_bit);
1307 +@@ -895,14 +909,13 @@ static void smp_init_secondary(void)
1308 + /*
1309 + * Activate a secondary processor.
1310 + */
1311 +-static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
1312 ++static void smp_start_secondary(void *cpuvoid)
1313 + {
1314 + S390_lowcore.restart_stack = (unsigned long) restart_stack;
1315 + S390_lowcore.restart_fn = (unsigned long) do_restart;
1316 + S390_lowcore.restart_data = 0;
1317 +- S390_lowcore.restart_source = -1UL;
1318 +- __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
1319 +- __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
1320 ++ S390_lowcore.restart_source = -1U;
1321 ++ S390_lowcore.restart_flags = 0;
1322 + call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
1323 + }
1324 +
1325 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
1326 +index d548d60caed25..16256e17a544a 100644
1327 +--- a/arch/s390/kvm/interrupt.c
1328 ++++ b/arch/s390/kvm/interrupt.c
1329 +@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
1330 + static void __set_cpu_idle(struct kvm_vcpu *vcpu)
1331 + {
1332 + kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
1333 +- set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
1334 ++ set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
1335 + }
1336 +
1337 + static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
1338 + {
1339 + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
1340 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
1341 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
1342 + }
1343 +
1344 + static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
1345 +@@ -3050,18 +3050,18 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
1346 +
1347 + static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
1348 + {
1349 +- int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
1350 ++ int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
1351 + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
1352 + struct kvm_vcpu *vcpu;
1353 +
1354 +- for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
1355 +- vcpu = kvm_get_vcpu(kvm, vcpu_id);
1356 ++ for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
1357 ++ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
1358 + if (psw_ioint_disabled(vcpu))
1359 + continue;
1360 + deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
1361 + if (deliverable_mask) {
1362 + /* lately kicked but not yet running */
1363 +- if (test_and_set_bit(vcpu_id, gi->kicked_mask))
1364 ++ if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
1365 + return;
1366 + kvm_s390_vcpu_wakeup(vcpu);
1367 + return;
1368 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
1369 +index 4527ac7b5961d..8580543c5bc33 100644
1370 +--- a/arch/s390/kvm/kvm-s390.c
1371 ++++ b/arch/s390/kvm/kvm-s390.c
1372 +@@ -4044,7 +4044,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
1373 + kvm_s390_patch_guest_per_regs(vcpu);
1374 + }
1375 +
1376 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
1377 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
1378 +
1379 + vcpu->arch.sie_block->icptcode = 0;
1380 + cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1381 +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
1382 +index 9fad25109b0dd..ecd741ee3276e 100644
1383 +--- a/arch/s390/kvm/kvm-s390.h
1384 ++++ b/arch/s390/kvm/kvm-s390.h
1385 +@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
1386 +
1387 + static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
1388 + {
1389 +- return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
1390 ++ return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
1391 + }
1392 +
1393 + static inline int kvm_is_ucontrol(struct kvm *kvm)
1394 +diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
1395 +index a0fdc6dc5f9d0..cc3af046c14e5 100644
1396 +--- a/arch/s390/mm/kasan_init.c
1397 ++++ b/arch/s390/mm/kasan_init.c
1398 +@@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
1399 + sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
1400 + }
1401 +
1402 ++ /*
1403 ++ * The first 1MB of 1:1 mapping is mapped with 4KB pages
1404 ++ */
1405 + while (address < end) {
1406 + pg_dir = pgd_offset_k(address);
1407 + if (pgd_none(*pg_dir)) {
1408 +@@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
1409 +
1410 + pm_dir = pmd_offset(pu_dir, address);
1411 + if (pmd_none(*pm_dir)) {
1412 +- if (mode == POPULATE_ZERO_SHADOW &&
1413 +- IS_ALIGNED(address, PMD_SIZE) &&
1414 ++ if (IS_ALIGNED(address, PMD_SIZE) &&
1415 + end - address >= PMD_SIZE) {
1416 +- pmd_populate(&init_mm, pm_dir,
1417 +- kasan_early_shadow_pte);
1418 +- address = (address + PMD_SIZE) & PMD_MASK;
1419 +- continue;
1420 +- }
1421 +- /* the first megabyte of 1:1 is mapped with 4k pages */
1422 +- if (has_edat && address && end - address >= PMD_SIZE &&
1423 +- mode != POPULATE_ZERO_SHADOW) {
1424 +- void *page;
1425 +-
1426 +- if (mode == POPULATE_ONE2ONE) {
1427 +- page = (void *)address;
1428 +- } else {
1429 +- page = kasan_early_alloc_segment();
1430 +- memset(page, 0, _SEGMENT_SIZE);
1431 ++ if (mode == POPULATE_ZERO_SHADOW) {
1432 ++ pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
1433 ++ address = (address + PMD_SIZE) & PMD_MASK;
1434 ++ continue;
1435 ++ } else if (has_edat && address) {
1436 ++ void *page;
1437 ++
1438 ++ if (mode == POPULATE_ONE2ONE) {
1439 ++ page = (void *)address;
1440 ++ } else {
1441 ++ page = kasan_early_alloc_segment();
1442 ++ memset(page, 0, _SEGMENT_SIZE);
1443 ++ }
1444 ++ pmd_val(*pm_dir) = __pa(page) | sgt_prot;
1445 ++ address = (address + PMD_SIZE) & PMD_MASK;
1446 ++ continue;
1447 + }
1448 +- pmd_val(*pm_dir) = __pa(page) | sgt_prot;
1449 +- address = (address + PMD_SIZE) & PMD_MASK;
1450 +- continue;
1451 + }
1452 +-
1453 + pt_dir = kasan_early_pte_alloc();
1454 + pmd_populate(&init_mm, pm_dir, pt_dir);
1455 + } else if (pmd_large(*pm_dir)) {
1456 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
1457 +index 8fcb7ecb7225a..77cd965cffefa 100644
1458 +--- a/arch/s390/pci/pci.c
1459 ++++ b/arch/s390/pci/pci.c
1460 +@@ -661,9 +661,10 @@ int zpci_enable_device(struct zpci_dev *zdev)
1461 + {
1462 + int rc;
1463 +
1464 +- rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
1465 +- if (rc)
1466 ++ if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
1467 ++ rc = -EIO;
1468 + goto out;
1469 ++ }
1470 +
1471 + rc = zpci_dma_init_device(zdev);
1472 + if (rc)
1473 +@@ -684,7 +685,7 @@ int zpci_disable_device(struct zpci_dev *zdev)
1474 + * The zPCI function may already be disabled by the platform, this is
1475 + * detected in clp_disable_fh() which becomes a no-op.
1476 + */
1477 +- return clp_disable_fh(zdev);
1478 ++ return clp_disable_fh(zdev) ? -EIO : 0;
1479 + }
1480 +
1481 + /**
1482 +diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
1483 +index d3331596ddbe1..0a0e8b8293bef 100644
1484 +--- a/arch/s390/pci/pci_clp.c
1485 ++++ b/arch/s390/pci/pci_clp.c
1486 +@@ -213,15 +213,19 @@ out:
1487 + }
1488 +
1489 + static int clp_refresh_fh(u32 fid);
1490 +-/*
1491 +- * Enable/Disable a given PCI function and update its function handle if
1492 +- * necessary
1493 ++/**
1494 ++ * clp_set_pci_fn() - Execute a command on a PCI function
1495 ++ * @zdev: Function that will be affected
1496 ++ * @nr_dma_as: DMA address space number
1497 ++ * @command: The command code to execute
1498 ++ *
1499 ++ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
1500 ++ * > 0 for non-success platform responses
1501 + */
1502 + static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1503 + {
1504 + struct clp_req_rsp_set_pci *rrb;
1505 + int rc, retries = 100;
1506 +- u32 fid = zdev->fid;
1507 +
1508 + rrb = clp_alloc_block(GFP_KERNEL);
1509 + if (!rrb)
1510 +@@ -245,17 +249,16 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1511 + }
1512 + } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
1513 +
1514 +- if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
1515 +- zpci_err("Set PCI FN:\n");
1516 +- zpci_err_clp(rrb->response.hdr.rsp, rc);
1517 +- }
1518 +-
1519 + if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
1520 + zdev->fh = rrb->response.fh;
1521 +- } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
1522 +- rrb->response.fh == 0) {
1523 ++ } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY) {
1524 + /* Function is already in desired state - update handle */
1525 +- rc = clp_refresh_fh(fid);
1526 ++ rc = clp_refresh_fh(zdev->fid);
1527 ++ } else {
1528 ++ zpci_err("Set PCI FN:\n");
1529 ++ zpci_err_clp(rrb->response.hdr.rsp, rc);
1530 ++ if (!rc)
1531 ++ rc = rrb->response.hdr.rsp;
1532 + }
1533 + clp_free_block(rrb);
1534 + return rc;
1535 +@@ -301,17 +304,13 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
1536 +
1537 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
1538 + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
1539 +- if (rc)
1540 +- goto out;
1541 +-
1542 +- if (zpci_use_mio(zdev)) {
1543 ++ if (!rc && zpci_use_mio(zdev)) {
1544 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
1545 + zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
1546 + zdev->fid, zdev->fh, rc);
1547 + if (rc)
1548 + clp_disable_fh(zdev);
1549 + }
1550 +-out:
1551 + return rc;
1552 + }
1553 +
1554 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
1555 +index 2144e54a6c892..388643ca2177e 100644
1556 +--- a/arch/x86/crypto/aesni-intel_glue.c
1557 ++++ b/arch/x86/crypto/aesni-intel_glue.c
1558 +@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
1559 + return -EINVAL;
1560 +
1561 + err = skcipher_walk_virt(&walk, req, false);
1562 ++ if (err)
1563 ++ return err;
1564 +
1565 + if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
1566 + int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
1567 +@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
1568 + skcipher_request_set_crypt(&subreq, req->src, req->dst,
1569 + blocks * AES_BLOCK_SIZE, req->iv);
1570 + req = &subreq;
1571 ++
1572 + err = skcipher_walk_virt(&walk, req, false);
1573 ++ if (err)
1574 ++ return err;
1575 + } else {
1576 + tail = 0;
1577 + }
1578 +diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
1579 +index c682b09b18fa0..482a9931d1e65 100644
1580 +--- a/arch/x86/events/intel/uncore_snbep.c
1581 ++++ b/arch/x86/events/intel/uncore_snbep.c
1582 +@@ -3838,26 +3838,32 @@ clear_attr_update:
1583 + return ret;
1584 + }
1585 +
1586 +-static int skx_iio_set_mapping(struct intel_uncore_type *type)
1587 +-{
1588 +- return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
1589 +-}
1590 +-
1591 +-static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
1592 ++static void
1593 ++pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
1594 + {
1595 +- struct attribute **attr = skx_iio_mapping_group.attrs;
1596 ++ struct attribute **attr = ag->attrs;
1597 +
1598 + if (!attr)
1599 + return;
1600 +
1601 + for (; *attr; attr++)
1602 + kfree((*attr)->name);
1603 +- kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
1604 +- kfree(skx_iio_mapping_group.attrs);
1605 +- skx_iio_mapping_group.attrs = NULL;
1606 ++ kfree(attr_to_ext_attr(*ag->attrs));
1607 ++ kfree(ag->attrs);
1608 ++ ag->attrs = NULL;
1609 + kfree(type->topology);
1610 + }
1611 +
1612 ++static int skx_iio_set_mapping(struct intel_uncore_type *type)
1613 ++{
1614 ++ return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
1615 ++}
1616 ++
1617 ++static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
1618 ++{
1619 ++ pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
1620 ++}
1621 ++
1622 + static struct intel_uncore_type skx_uncore_iio = {
1623 + .name = "iio",
1624 + .num_counters = 4,
1625 +@@ -4501,6 +4507,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
1626 + return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
1627 + }
1628 +
1629 ++static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
1630 ++{
1631 ++ pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
1632 ++}
1633 ++
1634 + static struct intel_uncore_type snr_uncore_iio = {
1635 + .name = "iio",
1636 + .num_counters = 4,
1637 +@@ -4517,7 +4528,7 @@ static struct intel_uncore_type snr_uncore_iio = {
1638 + .attr_update = snr_iio_attr_update,
1639 + .get_topology = snr_iio_get_topology,
1640 + .set_mapping = snr_iio_set_mapping,
1641 +- .cleanup_mapping = skx_iio_cleanup_mapping,
1642 ++ .cleanup_mapping = snr_iio_cleanup_mapping,
1643 + };
1644 +
1645 + static struct intel_uncore_type snr_uncore_irp = {
1646 +@@ -5092,6 +5103,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
1647 + return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
1648 + }
1649 +
1650 ++static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
1651 ++{
1652 ++ pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
1653 ++}
1654 ++
1655 + static struct intel_uncore_type icx_uncore_iio = {
1656 + .name = "iio",
1657 + .num_counters = 4,
1658 +@@ -5109,7 +5125,7 @@ static struct intel_uncore_type icx_uncore_iio = {
1659 + .attr_update = icx_iio_attr_update,
1660 + .get_topology = icx_iio_get_topology,
1661 + .set_mapping = icx_iio_set_mapping,
1662 +- .cleanup_mapping = skx_iio_cleanup_mapping,
1663 ++ .cleanup_mapping = icx_iio_cleanup_mapping,
1664 + };
1665 +
1666 + static struct intel_uncore_type icx_uncore_irp = {
1667 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
1668 +index 0607ec4f50914..da9321548f6f1 100644
1669 +--- a/arch/x86/include/asm/mce.h
1670 ++++ b/arch/x86/include/asm/mce.h
1671 +@@ -265,6 +265,7 @@ enum mcp_flags {
1672 + MCP_TIMESTAMP = BIT(0), /* log time stamp */
1673 + MCP_UC = BIT(1), /* log uncorrected errors */
1674 + MCP_DONTLOG = BIT(2), /* only clear, don't log */
1675 ++ MCP_QUEUE_LOG = BIT(3), /* only queue to genpool */
1676 + };
1677 + bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
1678 +
1679 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1680 +index 22791aadc085c..8cb7816d03b4c 100644
1681 +--- a/arch/x86/kernel/cpu/mce/core.c
1682 ++++ b/arch/x86/kernel/cpu/mce/core.c
1683 +@@ -817,7 +817,10 @@ log_it:
1684 + if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
1685 + goto clear_it;
1686 +
1687 +- mce_log(&m);
1688 ++ if (flags & MCP_QUEUE_LOG)
1689 ++ mce_gen_pool_add(&m);
1690 ++ else
1691 ++ mce_log(&m);
1692 +
1693 + clear_it:
1694 + /*
1695 +@@ -1639,10 +1642,12 @@ static void __mcheck_cpu_init_generic(void)
1696 + m_fl = MCP_DONTLOG;
1697 +
1698 + /*
1699 +- * Log the machine checks left over from the previous reset.
1700 ++ * Log the machine checks left over from the previous reset. Log them
1701 ++ * only, do not start processing them. That will happen in mcheck_late_init()
1702 ++ * when all consumers have been registered on the notifier chain.
1703 + */
1704 + bitmap_fill(all_banks, MAX_NR_BANKS);
1705 +- machine_check_poll(MCP_UC | m_fl, &all_banks);
1706 ++ machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1707 +
1708 + cr4_set_bits(X86_CR4_MCE);
1709 +
1710 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
1711 +index 47b7652702397..c268fb59f7794 100644
1712 +--- a/arch/x86/kvm/mmu/mmu.c
1713 ++++ b/arch/x86/kvm/mmu/mmu.c
1714 +@@ -323,12 +323,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
1715 + static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1716 + struct x86_exception *exception)
1717 + {
1718 +- /* Check if guest physical address doesn't exceed guest maximum */
1719 +- if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
1720 +- exception->error_code |= PFERR_RSVD_MASK;
1721 +- return UNMAPPED_GVA;
1722 +- }
1723 +-
1724 + return gpa;
1725 + }
1726 +
1727 +@@ -2852,6 +2846,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
1728 + kvm_pfn_t pfn, int max_level)
1729 + {
1730 + struct kvm_lpage_info *linfo;
1731 ++ int host_level;
1732 +
1733 + max_level = min(max_level, max_huge_page_level);
1734 + for ( ; max_level > PG_LEVEL_4K; max_level--) {
1735 +@@ -2863,7 +2858,8 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
1736 + if (max_level == PG_LEVEL_4K)
1737 + return PG_LEVEL_4K;
1738 +
1739 +- return host_pfn_mapping_level(kvm, gfn, pfn, slot);
1740 ++ host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
1741 ++ return min(host_level, max_level);
1742 + }
1743 +
1744 + int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
1745 +@@ -2887,17 +2883,12 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
1746 + if (!slot)
1747 + return PG_LEVEL_4K;
1748 +
1749 +- level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
1750 +- if (level == PG_LEVEL_4K)
1751 +- return level;
1752 +-
1753 +- *req_level = level = min(level, max_level);
1754 +-
1755 + /*
1756 + * Enforce the iTLB multihit workaround after capturing the requested
1757 + * level, which will be used to do precise, accurate accounting.
1758 + */
1759 +- if (huge_page_disallowed)
1760 ++ *req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
1761 ++ if (level == PG_LEVEL_4K || huge_page_disallowed)
1762 + return PG_LEVEL_4K;
1763 +
1764 + /*
1765 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
1766 +index d80cb122b5f38..0a1fa42d03aa6 100644
1767 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
1768 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
1769 +@@ -412,6 +412,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1770 + bool was_leaf = was_present && is_last_spte(old_spte, level);
1771 + bool is_leaf = is_present && is_last_spte(new_spte, level);
1772 + bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
1773 ++ bool was_large, is_large;
1774 +
1775 + WARN_ON(level > PT64_ROOT_MAX_LEVEL);
1776 + WARN_ON(level < PG_LEVEL_4K);
1777 +@@ -445,13 +446,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1778 +
1779 + trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
1780 +
1781 +- if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
1782 +- if (is_large_pte(old_spte))
1783 +- atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
1784 +- else
1785 +- atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
1786 +- }
1787 +-
1788 + /*
1789 + * The only times a SPTE should be changed from a non-present to
1790 + * non-present state is when an MMIO entry is installed/modified/
1791 +@@ -477,6 +471,18 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1792 + return;
1793 + }
1794 +
1795 ++ /*
1796 ++ * Update large page stats if a large page is being zapped, created, or
1797 ++ * is replacing an existing shadow page.
1798 ++ */
1799 ++ was_large = was_leaf && is_large_pte(old_spte);
1800 ++ is_large = is_leaf && is_large_pte(new_spte);
1801 ++ if (was_large != is_large) {
1802 ++ if (was_large)
1803 ++ atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
1804 ++ else
1805 ++ atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
1806 ++ }
1807 +
1808 + if (was_leaf && is_dirty_spte(old_spte) &&
1809 + (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
1810 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1811 +index b3f77d18eb5aa..ac1803dac4357 100644
1812 +--- a/arch/x86/kvm/vmx/nested.c
1813 ++++ b/arch/x86/kvm/vmx/nested.c
1814 +@@ -2223,12 +2223,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1815 + ~PIN_BASED_VMX_PREEMPTION_TIMER);
1816 +
1817 + /* Posted interrupts setting is only taken from vmcs12. */
1818 +- if (nested_cpu_has_posted_intr(vmcs12)) {
1819 ++ vmx->nested.pi_pending = false;
1820 ++ if (nested_cpu_has_posted_intr(vmcs12))
1821 + vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
1822 +- vmx->nested.pi_pending = false;
1823 +- } else {
1824 ++ else
1825 + exec_control &= ~PIN_BASED_POSTED_INTR;
1826 +- }
1827 + pin_controls_set(vmx, exec_control);
1828 +
1829 + /*
1830 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1831 +index 927a552393b96..256f8cab4b8b4 100644
1832 +--- a/arch/x86/kvm/vmx/vmx.c
1833 ++++ b/arch/x86/kvm/vmx/vmx.c
1834 +@@ -6368,6 +6368,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
1835 + {
1836 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1837 +
1838 ++ if (vmx->emulation_required)
1839 ++ return;
1840 ++
1841 + if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
1842 + handle_external_interrupt_irqoff(vcpu);
1843 + else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
1844 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1845 +index e5d5c5ed7dd43..7ec7c2dce5065 100644
1846 +--- a/arch/x86/kvm/x86.c
1847 ++++ b/arch/x86/kvm/x86.c
1848 +@@ -3316,6 +3316,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1849 + if (!msr_info->host_initiated) {
1850 + s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
1851 + adjust_tsc_offset_guest(vcpu, adj);
1852 ++ /* Before back to guest, tsc_timestamp must be adjusted
1853 ++ * as well, otherwise guest's percpu pvclock time could jump.
1854 ++ */
1855 ++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1856 + }
1857 + vcpu->arch.ia32_tsc_adjust_msr = data;
1858 + }
1859 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1860 +index 7279559185630..673a634eadd9f 100644
1861 +--- a/block/bfq-iosched.c
1862 ++++ b/block/bfq-iosched.c
1863 +@@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
1864 + __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1865 + if (__rq && elv_bio_merge_ok(__rq, bio)) {
1866 + *req = __rq;
1867 ++
1868 ++ if (blk_discard_mergable(__rq))
1869 ++ return ELEVATOR_DISCARD_MERGE;
1870 + return ELEVATOR_FRONT_MERGE;
1871 + }
1872 +
1873 +diff --git a/block/bio.c b/block/bio.c
1874 +index 1fab762e079be..d95e3456ba0c5 100644
1875 +--- a/block/bio.c
1876 ++++ b/block/bio.c
1877 +@@ -979,6 +979,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
1878 + return 0;
1879 + }
1880 +
1881 ++static void bio_put_pages(struct page **pages, size_t size, size_t off)
1882 ++{
1883 ++ size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
1884 ++
1885 ++ for (i = 0; i < nr; i++)
1886 ++ put_page(pages[i]);
1887 ++}
1888 ++
1889 + #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1890 +
1891 + /**
1892 +@@ -1023,8 +1031,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1893 + if (same_page)
1894 + put_page(page);
1895 + } else {
1896 +- if (WARN_ON_ONCE(bio_full(bio, len)))
1897 +- return -EINVAL;
1898 ++ if (WARN_ON_ONCE(bio_full(bio, len))) {
1899 ++ bio_put_pages(pages + i, left, offset);
1900 ++ return -EINVAL;
1901 ++ }
1902 + __bio_add_page(bio, page, len, offset);
1903 + }
1904 + offset = 0;
1905 +@@ -1069,6 +1079,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1906 + len = min_t(size_t, PAGE_SIZE - offset, left);
1907 + if (bio_add_hw_page(q, bio, page, len, offset,
1908 + max_append_sectors, &same_page) != len) {
1909 ++ bio_put_pages(pages + i, left, offset);
1910 + ret = -EINVAL;
1911 + break;
1912 + }
1913 +diff --git a/block/blk-crypto.c b/block/blk-crypto.c
1914 +index c5bdaafffa29f..103c2e2d50d67 100644
1915 +--- a/block/blk-crypto.c
1916 ++++ b/block/blk-crypto.c
1917 +@@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
1918 + if (mode->keysize == 0)
1919 + return -EINVAL;
1920 +
1921 +- if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
1922 ++ if (dun_bytes == 0 || dun_bytes > mode->ivsize)
1923 + return -EINVAL;
1924 +
1925 + if (!is_power_of_2(data_unit_size))
1926 +diff --git a/block/blk-merge.c b/block/blk-merge.c
1927 +index a11b3b53717ef..eeba8422ae823 100644
1928 +--- a/block/blk-merge.c
1929 ++++ b/block/blk-merge.c
1930 +@@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
1931 + trace_block_split(split, (*bio)->bi_iter.bi_sector);
1932 + submit_bio_noacct(*bio);
1933 + *bio = split;
1934 ++
1935 ++ blk_throtl_charge_bio_split(*bio);
1936 + }
1937 + }
1938 +
1939 +@@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
1940 + }
1941 + }
1942 +
1943 +-/*
1944 +- * Two cases of handling DISCARD merge:
1945 +- * If max_discard_segments > 1, the driver takes every bio
1946 +- * as a range and send them to controller together. The ranges
1947 +- * needn't to be contiguous.
1948 +- * Otherwise, the bios/requests will be handled as same as
1949 +- * others which should be contiguous.
1950 +- */
1951 +-static inline bool blk_discard_mergable(struct request *req)
1952 +-{
1953 +- if (req_op(req) == REQ_OP_DISCARD &&
1954 +- queue_max_discard_segments(req->q) > 1)
1955 +- return true;
1956 +- return false;
1957 +-}
1958 +-
1959 + static enum elv_merge blk_try_req_merge(struct request *req,
1960 + struct request *next)
1961 + {
1962 +diff --git a/block/blk-throttle.c b/block/blk-throttle.c
1963 +index b1b22d863bdf8..55c49015e5333 100644
1964 +--- a/block/blk-throttle.c
1965 ++++ b/block/blk-throttle.c
1966 +@@ -178,6 +178,9 @@ struct throtl_grp {
1967 + unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
1968 + unsigned long bio_cnt_reset_time;
1969 +
1970 ++ atomic_t io_split_cnt[2];
1971 ++ atomic_t last_io_split_cnt[2];
1972 ++
1973 + struct blkg_rwstat stat_bytes;
1974 + struct blkg_rwstat stat_ios;
1975 + };
1976 +@@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
1977 + tg->bytes_disp[rw] = 0;
1978 + tg->io_disp[rw] = 0;
1979 +
1980 ++ atomic_set(&tg->io_split_cnt[rw], 0);
1981 ++
1982 + /*
1983 + * Previous slice has expired. We must have trimmed it after last
1984 + * bio dispatch. That means since start of last slice, we never used
1985 +@@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
1986 + tg->io_disp[rw] = 0;
1987 + tg->slice_start[rw] = jiffies;
1988 + tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
1989 ++
1990 ++ atomic_set(&tg->io_split_cnt[rw], 0);
1991 ++
1992 + throtl_log(&tg->service_queue,
1993 + "[%c] new slice start=%lu end=%lu jiffies=%lu",
1994 + rw == READ ? 'R' : 'W', tg->slice_start[rw],
1995 +@@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1996 + jiffies + tg->td->throtl_slice);
1997 + }
1998 +
1999 ++ if (iops_limit != UINT_MAX)
2000 ++ tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
2001 ++
2002 + if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
2003 + tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
2004 + if (wait)
2005 +@@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
2006 + }
2007 +
2008 + if (tg->iops[READ][LIMIT_LOW]) {
2009 ++ tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
2010 + iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2011 + if (iops >= tg->iops[READ][LIMIT_LOW])
2012 + tg->last_low_overflow_time[READ] = now;
2013 + }
2014 +
2015 + if (tg->iops[WRITE][LIMIT_LOW]) {
2016 ++ tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
2017 + iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2018 + if (iops >= tg->iops[WRITE][LIMIT_LOW])
2019 + tg->last_low_overflow_time[WRITE] = now;
2020 +@@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
2021 + }
2022 + #endif
2023 +
2024 ++void blk_throtl_charge_bio_split(struct bio *bio)
2025 ++{
2026 ++ struct blkcg_gq *blkg = bio->bi_blkg;
2027 ++ struct throtl_grp *parent = blkg_to_tg(blkg);
2028 ++ struct throtl_service_queue *parent_sq;
2029 ++ bool rw = bio_data_dir(bio);
2030 ++
2031 ++ do {
2032 ++ if (!parent->has_rules[rw])
2033 ++ break;
2034 ++
2035 ++ atomic_inc(&parent->io_split_cnt[rw]);
2036 ++ atomic_inc(&parent->last_io_split_cnt[rw]);
2037 ++
2038 ++ parent_sq = parent->service_queue.parent_sq;
2039 ++ parent = sq_to_tg(parent_sq);
2040 ++ } while (parent);
2041 ++}
2042 ++
2043 + bool blk_throtl_bio(struct bio *bio)
2044 + {
2045 + struct request_queue *q = bio->bi_bdev->bd_disk->queue;
2046 +diff --git a/block/blk.h b/block/blk.h
2047 +index cb01429c162c6..f10cc9b2c27f7 100644
2048 +--- a/block/blk.h
2049 ++++ b/block/blk.h
2050 +@@ -289,11 +289,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
2051 + extern int blk_throtl_init(struct request_queue *q);
2052 + extern void blk_throtl_exit(struct request_queue *q);
2053 + extern void blk_throtl_register_queue(struct request_queue *q);
2054 ++extern void blk_throtl_charge_bio_split(struct bio *bio);
2055 + bool blk_throtl_bio(struct bio *bio);
2056 + #else /* CONFIG_BLK_DEV_THROTTLING */
2057 + static inline int blk_throtl_init(struct request_queue *q) { return 0; }
2058 + static inline void blk_throtl_exit(struct request_queue *q) { }
2059 + static inline void blk_throtl_register_queue(struct request_queue *q) { }
2060 ++static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
2061 + static inline bool blk_throtl_bio(struct bio *bio) { return false; }
2062 + #endif /* CONFIG_BLK_DEV_THROTTLING */
2063 + #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2064 +diff --git a/block/elevator.c b/block/elevator.c
2065 +index 52ada14cfe452..a5fe2615ec0f1 100644
2066 +--- a/block/elevator.c
2067 ++++ b/block/elevator.c
2068 +@@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
2069 + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
2070 + if (__rq && elv_bio_merge_ok(__rq, bio)) {
2071 + *req = __rq;
2072 ++
2073 ++ if (blk_discard_mergable(__rq))
2074 ++ return ELEVATOR_DISCARD_MERGE;
2075 + return ELEVATOR_BACK_MERGE;
2076 + }
2077 +
2078 +diff --git a/block/mq-deadline.c b/block/mq-deadline.c
2079 +index 36920670dccc3..3c3693c34f061 100644
2080 +--- a/block/mq-deadline.c
2081 ++++ b/block/mq-deadline.c
2082 +@@ -629,6 +629,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
2083 +
2084 + if (elv_bio_merge_ok(__rq, bio)) {
2085 + *rq = __rq;
2086 ++ if (blk_discard_mergable(__rq))
2087 ++ return ELEVATOR_DISCARD_MERGE;
2088 + return ELEVATOR_FRONT_MERGE;
2089 + }
2090 + }
2091 +diff --git a/certs/Makefile b/certs/Makefile
2092 +index 359239a0ee9e3..f9344e52ecdae 100644
2093 +--- a/certs/Makefile
2094 ++++ b/certs/Makefile
2095 +@@ -57,11 +57,19 @@ endif
2096 + redirect_openssl = 2>&1
2097 + quiet_redirect_openssl = 2>&1
2098 + silent_redirect_openssl = 2>/dev/null
2099 ++openssl_available = $(shell openssl help 2>/dev/null && echo yes)
2100 +
2101 + # We do it this way rather than having a boolean option for enabling an
2102 + # external private key, because 'make randconfig' might enable such a
2103 + # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
2104 + ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
2105 ++
2106 ++ifeq ($(openssl_available),yes)
2107 ++X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
2108 ++
2109 ++$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
2110 ++endif
2111 ++
2112 + $(obj)/signing_key.pem: $(obj)/x509.genkey
2113 + @$(kecho) "###"
2114 + @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
2115 +diff --git a/crypto/ecc.h b/crypto/ecc.h
2116 +index a006132646a43..1350e8eb6ac23 100644
2117 +--- a/crypto/ecc.h
2118 ++++ b/crypto/ecc.h
2119 +@@ -27,6 +27,7 @@
2120 + #define _CRYPTO_ECC_H
2121 +
2122 + #include <crypto/ecc_curve.h>
2123 ++#include <asm/unaligned.h>
2124 +
2125 + /* One digit is u64 qword. */
2126 + #define ECC_CURVE_NIST_P192_DIGITS 3
2127 +@@ -46,13 +47,13 @@
2128 + * @out: Output array
2129 + * @ndigits: Number of digits to copy
2130 + */
2131 +-static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
2132 ++static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
2133 + {
2134 + const __be64 *src = (__force __be64 *)in;
2135 + int i;
2136 +
2137 + for (i = 0; i < ndigits; i++)
2138 +- out[i] = be64_to_cpu(src[ndigits - 1 - i]);
2139 ++ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
2140 + }
2141 +
2142 + /**
2143 +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
2144 +index f8d06da78e4f3..6863e57b088d5 100644
2145 +--- a/crypto/tcrypt.c
2146 ++++ b/crypto/tcrypt.c
2147 +@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
2148 + }
2149 +
2150 + ret = crypto_aead_setauthsize(tfm, authsize);
2151 ++ if (ret) {
2152 ++ pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
2153 ++ ret);
2154 ++ goto out_free_tfm;
2155 ++ }
2156 +
2157 + for (i = 0; i < num_mb; ++i)
2158 + if (testmgr_alloc_buf(data[i].xbuf)) {
2159 +@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
2160 + for (i = 0; i < num_mb; ++i) {
2161 + data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
2162 + if (!data[i].req) {
2163 +- pr_err("alg: skcipher: Failed to allocate request for %s\n",
2164 ++ pr_err("alg: aead: Failed to allocate request for %s\n",
2165 + algo);
2166 + while (i--)
2167 + aead_request_free(data[i].req);
2168 +@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
2169 + sgout = &sg[9];
2170 +
2171 + tfm = crypto_alloc_aead(algo, 0, 0);
2172 +-
2173 + if (IS_ERR(tfm)) {
2174 + pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
2175 + PTR_ERR(tfm));
2176 + goto out_notfm;
2177 + }
2178 +
2179 ++ ret = crypto_aead_setauthsize(tfm, authsize);
2180 ++ if (ret) {
2181 ++ pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
2182 ++ ret);
2183 ++ goto out_noreq;
2184 ++ }
2185 ++
2186 + crypto_init_wait(&wait);
2187 + printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
2188 + get_driver_name(crypto_aead, tfm), e);
2189 +@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
2190 + break;
2191 + }
2192 + }
2193 ++
2194 + ret = crypto_aead_setkey(tfm, key, *keysize);
2195 +- ret = crypto_aead_setauthsize(tfm, authsize);
2196 ++ if (ret) {
2197 ++ pr_err("setkey() failed flags=%x: %d\n",
2198 ++ crypto_aead_get_flags(tfm), ret);
2199 ++ goto out;
2200 ++ }
2201 +
2202 + iv_len = crypto_aead_ivsize(tfm);
2203 + if (iv_len)
2204 +@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
2205 + printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
2206 + i, *keysize * 8, bs);
2207 +
2208 +-
2209 + memset(tvmem[0], 0xff, PAGE_SIZE);
2210 +
2211 +- if (ret) {
2212 +- pr_err("setkey() failed flags=%x\n",
2213 +- crypto_aead_get_flags(tfm));
2214 +- goto out;
2215 +- }
2216 +-
2217 + sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
2218 + assoc, aad_size);
2219 +
2220 +diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
2221 +index 1f6007abcf18e..89c22bc550570 100644
2222 +--- a/drivers/acpi/prmt.c
2223 ++++ b/drivers/acpi/prmt.c
2224 +@@ -288,10 +288,18 @@ invalid_guid:
2225 +
2226 + void __init init_prmt(void)
2227 + {
2228 ++ struct acpi_table_header *tbl;
2229 + acpi_status status;
2230 +- int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
2231 ++ int mc;
2232 ++
2233 ++ status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
2234 ++ if (ACPI_FAILURE(status))
2235 ++ return;
2236 ++
2237 ++ mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
2238 + sizeof (struct acpi_table_prmt_header),
2239 + 0, acpi_parse_prmt, 0);
2240 ++ acpi_put_table(tbl);
2241 + /*
2242 + * Return immediately if PRMT table is not present or no PRM module found.
2243 + */
2244 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
2245 +index 61c762961ca8e..44f434acfce08 100644
2246 +--- a/drivers/ata/libata-core.c
2247 ++++ b/drivers/ata/libata-core.c
2248 +@@ -5573,7 +5573,7 @@ int ata_host_start(struct ata_host *host)
2249 + have_stop = 1;
2250 + }
2251 +
2252 +- if (host->ops->host_stop)
2253 ++ if (host->ops && host->ops->host_stop)
2254 + have_stop = 1;
2255 +
2256 + if (have_stop) {
2257 +diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
2258 +index 2e5e7c9939334..8b2a0eb3f32a4 100644
2259 +--- a/drivers/auxdisplay/hd44780.c
2260 ++++ b/drivers/auxdisplay/hd44780.c
2261 +@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
2262 + {
2263 + struct charlcd *lcd = platform_get_drvdata(pdev);
2264 +
2265 +- kfree(lcd->drvdata);
2266 + charlcd_unregister(lcd);
2267 ++ kfree(lcd->drvdata);
2268 +
2269 + kfree(lcd);
2270 + return 0;
2271 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
2272 +index 437cd61343b26..68ea1f949daa9 100644
2273 +--- a/drivers/base/dd.c
2274 ++++ b/drivers/base/dd.c
2275 +@@ -580,7 +580,8 @@ re_probe:
2276 + goto probe_failed;
2277 + }
2278 +
2279 +- if (driver_sysfs_add(dev)) {
2280 ++ ret = driver_sysfs_add(dev);
2281 ++ if (ret) {
2282 + pr_err("%s: driver_sysfs_add(%s) failed\n",
2283 + __func__, dev_name(dev));
2284 + goto probe_failed;
2285 +@@ -602,15 +603,18 @@ re_probe:
2286 + goto probe_failed;
2287 + }
2288 +
2289 +- if (device_add_groups(dev, drv->dev_groups)) {
2290 ++ ret = device_add_groups(dev, drv->dev_groups);
2291 ++ if (ret) {
2292 + dev_err(dev, "device_add_groups() failed\n");
2293 + goto dev_groups_failed;
2294 + }
2295 +
2296 +- if (dev_has_sync_state(dev) &&
2297 +- device_create_file(dev, &dev_attr_state_synced)) {
2298 +- dev_err(dev, "state_synced sysfs add failed\n");
2299 +- goto dev_sysfs_state_synced_failed;
2300 ++ if (dev_has_sync_state(dev)) {
2301 ++ ret = device_create_file(dev, &dev_attr_state_synced);
2302 ++ if (ret) {
2303 ++ dev_err(dev, "state_synced sysfs add failed\n");
2304 ++ goto dev_sysfs_state_synced_failed;
2305 ++ }
2306 + }
2307 +
2308 + if (test_remove) {
2309 +diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
2310 +index 68c549d712304..bdbedc6660a87 100644
2311 +--- a/drivers/base/firmware_loader/main.c
2312 ++++ b/drivers/base/firmware_loader/main.c
2313 +@@ -165,7 +165,7 @@ static inline int fw_state_wait(struct fw_priv *fw_priv)
2314 + return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
2315 + }
2316 +
2317 +-static int fw_cache_piggyback_on_request(const char *name);
2318 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
2319 +
2320 + static struct fw_priv *__allocate_fw_priv(const char *fw_name,
2321 + struct firmware_cache *fwc,
2322 +@@ -707,10 +707,8 @@ int assign_fw(struct firmware *fw, struct device *device)
2323 + * on request firmware.
2324 + */
2325 + if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
2326 +- fw_priv->fwc->state == FW_LOADER_START_CACHE) {
2327 +- if (fw_cache_piggyback_on_request(fw_priv->fw_name))
2328 +- kref_get(&fw_priv->ref);
2329 +- }
2330 ++ fw_priv->fwc->state == FW_LOADER_START_CACHE)
2331 ++ fw_cache_piggyback_on_request(fw_priv);
2332 +
2333 + /* pass the pages buffer to driver at the last minute */
2334 + fw_set_page_data(fw_priv, fw);
2335 +@@ -1259,11 +1257,11 @@ static int __fw_entry_found(const char *name)
2336 + return 0;
2337 + }
2338 +
2339 +-static int fw_cache_piggyback_on_request(const char *name)
2340 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
2341 + {
2342 +- struct firmware_cache *fwc = &fw_cache;
2343 ++ const char *name = fw_priv->fw_name;
2344 ++ struct firmware_cache *fwc = fw_priv->fwc;
2345 + struct fw_cache_entry *fce;
2346 +- int ret = 0;
2347 +
2348 + spin_lock(&fwc->name_lock);
2349 + if (__fw_entry_found(name))
2350 +@@ -1271,13 +1269,12 @@ static int fw_cache_piggyback_on_request(const char *name)
2351 +
2352 + fce = alloc_fw_cache_entry(name);
2353 + if (fce) {
2354 +- ret = 1;
2355 + list_add(&fce->list, &fwc->fw_names);
2356 ++ kref_get(&fw_priv->ref);
2357 + pr_debug("%s: fw: %s\n", __func__, name);
2358 + }
2359 + found:
2360 + spin_unlock(&fwc->name_lock);
2361 +- return ret;
2362 + }
2363 +
2364 + static void free_fw_cache_entry(struct fw_cache_entry *fce)
2365 +@@ -1508,9 +1505,8 @@ static inline void unregister_fw_pm_ops(void)
2366 + unregister_pm_notifier(&fw_cache.pm_notify);
2367 + }
2368 + #else
2369 +-static int fw_cache_piggyback_on_request(const char *name)
2370 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
2371 + {
2372 +- return 0;
2373 + }
2374 + static inline int register_fw_pm_ops(void)
2375 + {
2376 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
2377 +index fe3e38dd5324f..2fc826e97591e 100644
2378 +--- a/drivers/base/regmap/regmap.c
2379 ++++ b/drivers/base/regmap/regmap.c
2380 +@@ -1667,7 +1667,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
2381 + if (ret) {
2382 + dev_err(map->dev,
2383 + "Error in caching of register: %x ret: %d\n",
2384 +- reg + i, ret);
2385 ++ reg + regmap_get_offset(map, i), ret);
2386 + return ret;
2387 + }
2388 + }
2389 +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
2390 +index 6535614a7dc13..1df2b5801c3bc 100644
2391 +--- a/drivers/bcma/main.c
2392 ++++ b/drivers/bcma/main.c
2393 +@@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
2394 +
2395 + void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
2396 + {
2397 ++ device_initialize(&core->dev);
2398 + core->dev.release = bcma_release_core_dev;
2399 + core->dev.bus = &bcma_bus_type;
2400 + dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
2401 +@@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
2402 + {
2403 + int err;
2404 +
2405 +- err = device_register(&core->dev);
2406 ++ err = device_add(&core->dev);
2407 + if (err) {
2408 + bcma_err(bus, "Could not register dev for core 0x%03X\n",
2409 + core->id.id);
2410 +- put_device(&core->dev);
2411 + return;
2412 + }
2413 + core->dev_registered = true;
2414 +@@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
2415 + /* Now noone uses internally-handled cores, we can free them */
2416 + list_for_each_entry_safe(core, tmp, &bus->cores, list) {
2417 + list_del(&core->list);
2418 +- kfree(core);
2419 ++ put_device(&core->dev);
2420 + }
2421 + }
2422 +
2423 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
2424 +index 19f5d5a8b16a3..93708b1938e80 100644
2425 +--- a/drivers/block/nbd.c
2426 ++++ b/drivers/block/nbd.c
2427 +@@ -49,6 +49,7 @@
2428 +
2429 + static DEFINE_IDR(nbd_index_idr);
2430 + static DEFINE_MUTEX(nbd_index_mutex);
2431 ++static struct workqueue_struct *nbd_del_wq;
2432 + static int nbd_total_devices = 0;
2433 +
2434 + struct nbd_sock {
2435 +@@ -113,6 +114,7 @@ struct nbd_device {
2436 + struct mutex config_lock;
2437 + struct gendisk *disk;
2438 + struct workqueue_struct *recv_workq;
2439 ++ struct work_struct remove_work;
2440 +
2441 + struct list_head list;
2442 + struct task_struct *task_recv;
2443 +@@ -233,7 +235,7 @@ static const struct device_attribute backend_attr = {
2444 + .show = backend_show,
2445 + };
2446 +
2447 +-static void nbd_dev_remove(struct nbd_device *nbd)
2448 ++static void nbd_del_disk(struct nbd_device *nbd)
2449 + {
2450 + struct gendisk *disk = nbd->disk;
2451 +
2452 +@@ -242,24 +244,60 @@ static void nbd_dev_remove(struct nbd_device *nbd)
2453 + blk_cleanup_disk(disk);
2454 + blk_mq_free_tag_set(&nbd->tag_set);
2455 + }
2456 ++}
2457 ++
2458 ++/*
2459 ++ * Place this in the last just before the nbd is freed to
2460 ++ * make sure that the disk and the related kobject are also
2461 ++ * totally removed to avoid duplicate creation of the same
2462 ++ * one.
2463 ++ */
2464 ++static void nbd_notify_destroy_completion(struct nbd_device *nbd)
2465 ++{
2466 ++ if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
2467 ++ nbd->destroy_complete)
2468 ++ complete(nbd->destroy_complete);
2469 ++}
2470 +
2471 ++static void nbd_dev_remove_work(struct work_struct *work)
2472 ++{
2473 ++ struct nbd_device *nbd =
2474 ++ container_of(work, struct nbd_device, remove_work);
2475 ++
2476 ++ nbd_del_disk(nbd);
2477 ++
2478 ++ mutex_lock(&nbd_index_mutex);
2479 + /*
2480 +- * Place this in the last just before the nbd is freed to
2481 +- * make sure that the disk and the related kobject are also
2482 +- * totally removed to avoid duplicate creation of the same
2483 +- * one.
2484 ++ * Remove from idr after del_gendisk() completes,
2485 ++ * so if the same id is reused, the following
2486 ++ * add_disk() will succeed.
2487 + */
2488 +- if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
2489 +- complete(nbd->destroy_complete);
2490 ++ idr_remove(&nbd_index_idr, nbd->index);
2491 ++
2492 ++ nbd_notify_destroy_completion(nbd);
2493 ++ mutex_unlock(&nbd_index_mutex);
2494 +
2495 + kfree(nbd);
2496 + }
2497 +
2498 ++static void nbd_dev_remove(struct nbd_device *nbd)
2499 ++{
2500 ++ /* Call del_gendisk() asynchrounously to prevent deadlock */
2501 ++ if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) {
2502 ++ queue_work(nbd_del_wq, &nbd->remove_work);
2503 ++ return;
2504 ++ }
2505 ++
2506 ++ nbd_del_disk(nbd);
2507 ++ idr_remove(&nbd_index_idr, nbd->index);
2508 ++ nbd_notify_destroy_completion(nbd);
2509 ++ kfree(nbd);
2510 ++}
2511 ++
2512 + static void nbd_put(struct nbd_device *nbd)
2513 + {
2514 + if (refcount_dec_and_mutex_lock(&nbd->refs,
2515 + &nbd_index_mutex)) {
2516 +- idr_remove(&nbd_index_idr, nbd->index);
2517 + nbd_dev_remove(nbd);
2518 + mutex_unlock(&nbd_index_mutex);
2519 + }
2520 +@@ -1388,6 +1426,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2521 + unsigned int cmd, unsigned long arg)
2522 + {
2523 + struct nbd_config *config = nbd->config;
2524 ++ loff_t bytesize;
2525 +
2526 + switch (cmd) {
2527 + case NBD_DISCONNECT:
2528 +@@ -1402,8 +1441,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
2529 + case NBD_SET_SIZE:
2530 + return nbd_set_size(nbd, arg, config->blksize);
2531 + case NBD_SET_SIZE_BLOCKS:
2532 +- return nbd_set_size(nbd, arg * config->blksize,
2533 +- config->blksize);
2534 ++ if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
2535 ++ return -EINVAL;
2536 ++ return nbd_set_size(nbd, bytesize, config->blksize);
2537 + case NBD_SET_TIMEOUT:
2538 + nbd_set_cmd_timeout(nbd, arg);
2539 + return 0;
2540 +@@ -1683,6 +1723,7 @@ static int nbd_dev_add(int index)
2541 + nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2542 + BLK_MQ_F_BLOCKING;
2543 + nbd->tag_set.driver_data = nbd;
2544 ++ INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
2545 + nbd->destroy_complete = NULL;
2546 + nbd->backend = NULL;
2547 +
2548 +@@ -1729,7 +1770,17 @@ static int nbd_dev_add(int index)
2549 + refcount_set(&nbd->refs, 1);
2550 + INIT_LIST_HEAD(&nbd->list);
2551 + disk->major = NBD_MAJOR;
2552 ++
2553 ++ /* Too big first_minor can cause duplicate creation of
2554 ++ * sysfs files/links, since first_minor will be truncated to
2555 ++ * byte in __device_add_disk().
2556 ++ */
2557 + disk->first_minor = index << part_shift;
2558 ++ if (disk->first_minor > 0xff) {
2559 ++ err = -EINVAL;
2560 ++ goto out_free_idr;
2561 ++ }
2562 ++
2563 + disk->minors = 1 << part_shift;
2564 + disk->fops = &nbd_fops;
2565 + disk->private_data = nbd;
2566 +@@ -2424,7 +2475,14 @@ static int __init nbd_init(void)
2567 + if (register_blkdev(NBD_MAJOR, "nbd"))
2568 + return -EIO;
2569 +
2570 ++ nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2571 ++ if (!nbd_del_wq) {
2572 ++ unregister_blkdev(NBD_MAJOR, "nbd");
2573 ++ return -ENOMEM;
2574 ++ }
2575 ++
2576 + if (genl_register_family(&nbd_genl_family)) {
2577 ++ destroy_workqueue(nbd_del_wq);
2578 + unregister_blkdev(NBD_MAJOR, "nbd");
2579 + return -EINVAL;
2580 + }
2581 +@@ -2442,7 +2500,10 @@ static int nbd_exit_cb(int id, void *ptr, void *data)
2582 + struct list_head *list = (struct list_head *)data;
2583 + struct nbd_device *nbd = ptr;
2584 +
2585 +- list_add_tail(&nbd->list, list);
2586 ++ /* Skip nbd that is being removed asynchronously */
2587 ++ if (refcount_read(&nbd->refs))
2588 ++ list_add_tail(&nbd->list, list);
2589 ++
2590 + return 0;
2591 + }
2592 +
2593 +@@ -2465,6 +2526,9 @@ static void __exit nbd_cleanup(void)
2594 + nbd_put(nbd);
2595 + }
2596 +
2597 ++ /* Also wait for nbd_dev_remove_work() completes */
2598 ++ destroy_workqueue(nbd_del_wq);
2599 ++
2600 + idr_destroy(&nbd_index_idr);
2601 + genl_unregister_family(&nbd_genl_family);
2602 + unregister_blkdev(NBD_MAJOR, "nbd");
2603 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2604 +index 0255bf243ce55..bd37d6fb88c26 100644
2605 +--- a/drivers/bluetooth/btusb.c
2606 ++++ b/drivers/bluetooth/btusb.c
2607 +@@ -2921,10 +2921,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
2608 + /* Read the Intel supported features and if new exception formats
2609 + * supported, need to load the additional DDC config to enable.
2610 + */
2611 +- btintel_read_debug_features(hdev, &features);
2612 +-
2613 +- /* Set DDC mask for available debug features */
2614 +- btintel_set_debug_features(hdev, &features);
2615 ++ err = btintel_read_debug_features(hdev, &features);
2616 ++ if (!err) {
2617 ++ /* Set DDC mask for available debug features */
2618 ++ btintel_set_debug_features(hdev, &features);
2619 ++ }
2620 +
2621 + /* Read the Intel version information after loading the FW */
2622 + err = btintel_read_version(hdev, &ver);
2623 +@@ -3017,10 +3018,11 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
2624 + /* Read the Intel supported features and if new exception formats
2625 + * supported, need to load the additional DDC config to enable.
2626 + */
2627 +- btintel_read_debug_features(hdev, &features);
2628 +-
2629 +- /* Set DDC mask for available debug features */
2630 +- btintel_set_debug_features(hdev, &features);
2631 ++ err = btintel_read_debug_features(hdev, &features);
2632 ++ if (!err) {
2633 ++ /* Set DDC mask for available debug features */
2634 ++ btintel_set_debug_features(hdev, &features);
2635 ++ }
2636 +
2637 + /* Read the Intel version information after loading the FW */
2638 + err = btintel_read_version_tlv(hdev, &version);
2639 +diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
2640 +index 4308f9ca7a43d..d6ba644f6b00a 100644
2641 +--- a/drivers/char/tpm/Kconfig
2642 ++++ b/drivers/char/tpm/Kconfig
2643 +@@ -89,7 +89,6 @@ config TCG_TIS_SYNQUACER
2644 + config TCG_TIS_I2C_CR50
2645 + tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
2646 + depends on I2C
2647 +- select TCG_CR50
2648 + help
2649 + This is a driver for the Google cr50 I2C TPM interface which is a
2650 + custom microcontroller and requires a custom i2c protocol interface
2651 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
2652 +index 903604769de99..3af4c07a9342f 100644
2653 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
2654 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
2655 +@@ -106,17 +106,12 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
2656 + {
2657 + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
2658 + u16 len;
2659 +- int sig;
2660 +
2661 + if (!ibmvtpm->rtce_buf) {
2662 + dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
2663 + return 0;
2664 + }
2665 +
2666 +- sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
2667 +- if (sig)
2668 +- return -EINTR;
2669 +-
2670 + len = ibmvtpm->res_len;
2671 +
2672 + if (count < len) {
2673 +@@ -237,7 +232,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
2674 + * set the processing flag before the Hcall, since we may get the
2675 + * result (interrupt) before even being able to check rc.
2676 + */
2677 +- ibmvtpm->tpm_processing_cmd = true;
2678 ++ ibmvtpm->tpm_processing_cmd = 1;
2679 +
2680 + again:
2681 + rc = ibmvtpm_send_crq(ibmvtpm->vdev,
2682 +@@ -255,7 +250,7 @@ again:
2683 + goto again;
2684 + }
2685 + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
2686 +- ibmvtpm->tpm_processing_cmd = false;
2687 ++ ibmvtpm->tpm_processing_cmd = 0;
2688 + }
2689 +
2690 + spin_unlock(&ibmvtpm->rtce_lock);
2691 +@@ -269,7 +264,9 @@ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
2692 +
2693 + static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
2694 + {
2695 +- return 0;
2696 ++ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
2697 ++
2698 ++ return ibmvtpm->tpm_processing_cmd;
2699 + }
2700 +
2701 + /**
2702 +@@ -457,7 +454,7 @@ static const struct tpm_class_ops tpm_ibmvtpm = {
2703 + .send = tpm_ibmvtpm_send,
2704 + .cancel = tpm_ibmvtpm_cancel,
2705 + .status = tpm_ibmvtpm_status,
2706 +- .req_complete_mask = 0,
2707 ++ .req_complete_mask = 1,
2708 + .req_complete_val = 0,
2709 + .req_canceled = tpm_ibmvtpm_req_canceled,
2710 + };
2711 +@@ -550,7 +547,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
2712 + case VTPM_TPM_COMMAND_RES:
2713 + /* len of the data in rtce buffer */
2714 + ibmvtpm->res_len = be16_to_cpu(crq->len);
2715 +- ibmvtpm->tpm_processing_cmd = false;
2716 ++ ibmvtpm->tpm_processing_cmd = 0;
2717 + wake_up_interruptible(&ibmvtpm->wq);
2718 + return;
2719 + default:
2720 +@@ -688,8 +685,15 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
2721 + goto init_irq_cleanup;
2722 + }
2723 +
2724 +- if (!strcmp(id->compat, "IBM,vtpm20")) {
2725 ++
2726 ++ if (!strcmp(id->compat, "IBM,vtpm20"))
2727 + chip->flags |= TPM_CHIP_FLAG_TPM2;
2728 ++
2729 ++ rc = tpm_get_timeouts(chip);
2730 ++ if (rc)
2731 ++ goto init_irq_cleanup;
2732 ++
2733 ++ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
2734 + rc = tpm2_get_cc_attrs_tbl(chip);
2735 + if (rc)
2736 + goto init_irq_cleanup;
2737 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
2738 +index b92aa7d3e93e7..51198b137461e 100644
2739 +--- a/drivers/char/tpm/tpm_ibmvtpm.h
2740 ++++ b/drivers/char/tpm/tpm_ibmvtpm.h
2741 +@@ -41,7 +41,7 @@ struct ibmvtpm_dev {
2742 + wait_queue_head_t wq;
2743 + u16 res_len;
2744 + u32 vtpm_version;
2745 +- bool tpm_processing_cmd;
2746 ++ u8 tpm_processing_cmd;
2747 + };
2748 +
2749 + #define CRQ_RES_BUF_SIZE PAGE_SIZE
2750 +diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
2751 +index 47680237d0beb..8bc893df47364 100644
2752 +--- a/drivers/clk/mvebu/kirkwood.c
2753 ++++ b/drivers/clk/mvebu/kirkwood.c
2754 +@@ -265,6 +265,7 @@ static const char *powersave_parents[] = {
2755 + static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
2756 + { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
2757 + 11, 1, 0 },
2758 ++ { }
2759 + };
2760 +
2761 + static struct clk *clk_muxing_get_src(
2762 +diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
2763 +index d7ed99f0001f8..dd0956ad969c1 100644
2764 +--- a/drivers/clocksource/sh_cmt.c
2765 ++++ b/drivers/clocksource/sh_cmt.c
2766 +@@ -579,7 +579,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
2767 + ch->flags |= flag;
2768 +
2769 + /* setup timeout if no clockevent */
2770 +- if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
2771 ++ if (ch->cmt->num_channels == 1 &&
2772 ++ flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
2773 + __sh_cmt_set_next(ch, ch->max_match_value);
2774 + out:
2775 + raw_spin_unlock_irqrestore(&ch->lock, flags);
2776 +@@ -621,20 +622,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
2777 + static u64 sh_cmt_clocksource_read(struct clocksource *cs)
2778 + {
2779 + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
2780 +- unsigned long flags;
2781 + u32 has_wrapped;
2782 +- u64 value;
2783 +- u32 raw;
2784 +
2785 +- raw_spin_lock_irqsave(&ch->lock, flags);
2786 +- value = ch->total_cycles;
2787 +- raw = sh_cmt_get_counter(ch, &has_wrapped);
2788 ++ if (ch->cmt->num_channels == 1) {
2789 ++ unsigned long flags;
2790 ++ u64 value;
2791 ++ u32 raw;
2792 +
2793 +- if (unlikely(has_wrapped))
2794 +- raw += ch->match_value + 1;
2795 +- raw_spin_unlock_irqrestore(&ch->lock, flags);
2796 ++ raw_spin_lock_irqsave(&ch->lock, flags);
2797 ++ value = ch->total_cycles;
2798 ++ raw = sh_cmt_get_counter(ch, &has_wrapped);
2799 ++
2800 ++ if (unlikely(has_wrapped))
2801 ++ raw += ch->match_value + 1;
2802 ++ raw_spin_unlock_irqrestore(&ch->lock, flags);
2803 ++
2804 ++ return value + raw;
2805 ++ }
2806 +
2807 +- return value + raw;
2808 ++ return sh_cmt_get_counter(ch, &has_wrapped);
2809 + }
2810 +
2811 + static int sh_cmt_clocksource_enable(struct clocksource *cs)
2812 +@@ -697,7 +703,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
2813 + cs->disable = sh_cmt_clocksource_disable;
2814 + cs->suspend = sh_cmt_clocksource_suspend;
2815 + cs->resume = sh_cmt_clocksource_resume;
2816 +- cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
2817 ++ cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
2818 + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
2819 +
2820 + dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
2821 +diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
2822 +index 09a9a77cce06b..81f9642777fb8 100644
2823 +--- a/drivers/counter/104-quad-8.c
2824 ++++ b/drivers/counter/104-quad-8.c
2825 +@@ -715,12 +715,13 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
2826 + case 1:
2827 + case 3:
2828 + quad8_preset_register_set(priv, count->id, ceiling);
2829 +- break;
2830 ++ mutex_unlock(&priv->lock);
2831 ++ return len;
2832 + }
2833 +
2834 + mutex_unlock(&priv->lock);
2835 +
2836 +- return len;
2837 ++ return -EINVAL;
2838 + }
2839 +
2840 + static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
2841 +diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
2842 +index 018415b9840a9..d97cf02b1df75 100644
2843 +--- a/drivers/crypto/hisilicon/sec2/sec.h
2844 ++++ b/drivers/crypto/hisilicon/sec2/sec.h
2845 +@@ -157,11 +157,6 @@ struct sec_ctx {
2846 + struct device *dev;
2847 + };
2848 +
2849 +-enum sec_endian {
2850 +- SEC_LE = 0,
2851 +- SEC_32BE,
2852 +- SEC_64BE
2853 +-};
2854 +
2855 + enum sec_debug_file_index {
2856 + SEC_CLEAR_ENABLE,
2857 +diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
2858 +index 490db7bccf619..a0cc46b649a39 100644
2859 +--- a/drivers/crypto/hisilicon/sec2/sec_main.c
2860 ++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
2861 +@@ -312,31 +312,20 @@ static const struct pci_device_id sec_dev_ids[] = {
2862 + };
2863 + MODULE_DEVICE_TABLE(pci, sec_dev_ids);
2864 +
2865 +-static u8 sec_get_endian(struct hisi_qm *qm)
2866 ++static void sec_set_endian(struct hisi_qm *qm)
2867 + {
2868 + u32 reg;
2869 +
2870 +- /*
2871 +- * As for VF, it is a wrong way to get endian setting by
2872 +- * reading a register of the engine
2873 +- */
2874 +- if (qm->pdev->is_virtfn) {
2875 +- dev_err_ratelimited(&qm->pdev->dev,
2876 +- "cannot access a register in VF!\n");
2877 +- return SEC_LE;
2878 +- }
2879 + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
2880 +- /* BD little endian mode */
2881 +- if (!(reg & BIT(0)))
2882 +- return SEC_LE;
2883 ++ reg &= ~(BIT(1) | BIT(0));
2884 ++ if (!IS_ENABLED(CONFIG_64BIT))
2885 ++ reg |= BIT(1);
2886 +
2887 +- /* BD 32-bits big endian mode */
2888 +- else if (!(reg & BIT(1)))
2889 +- return SEC_32BE;
2890 +
2891 +- /* BD 64-bits big endian mode */
2892 +- else
2893 +- return SEC_64BE;
2894 ++ if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
2895 ++ reg |= BIT(0);
2896 ++
2897 ++ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
2898 + }
2899 +
2900 + static void sec_open_sva_prefetch(struct hisi_qm *qm)
2901 +@@ -429,9 +418,7 @@ static int sec_engine_init(struct hisi_qm *qm)
2902 + qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
2903 +
2904 + /* config endian */
2905 +- reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
2906 +- reg |= sec_get_endian(qm);
2907 +- writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
2908 ++ sec_set_endian(qm);
2909 +
2910 + return 0;
2911 + }
2912 +@@ -984,7 +971,8 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2913 + return 0;
2914 +
2915 + err_alg_unregister:
2916 +- hisi_qm_alg_unregister(qm, &sec_devices);
2917 ++ if (qm->qp_num >= ctx_q_num)
2918 ++ hisi_qm_alg_unregister(qm, &sec_devices);
2919 + err_qm_stop:
2920 + sec_debugfs_exit(qm);
2921 + hisi_qm_stop(qm, QM_NORMAL);
2922 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
2923 +index d6a7784d29888..f397cc5bf1021 100644
2924 +--- a/drivers/crypto/mxs-dcp.c
2925 ++++ b/drivers/crypto/mxs-dcp.c
2926 +@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
2927 +
2928 + static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
2929 + {
2930 ++ int dma_err;
2931 + struct dcp *sdcp = global_sdcp;
2932 + const int chan = actx->chan;
2933 + uint32_t stat;
2934 + unsigned long ret;
2935 + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
2936 +-
2937 + dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
2938 + DMA_TO_DEVICE);
2939 +
2940 ++ dma_err = dma_mapping_error(sdcp->dev, desc_phys);
2941 ++ if (dma_err)
2942 ++ return dma_err;
2943 ++
2944 + reinit_completion(&sdcp->completion[chan]);
2945 +
2946 + /* Clear status register. */
2947 +@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
2948 + static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
2949 + struct skcipher_request *req, int init)
2950 + {
2951 ++ dma_addr_t key_phys, src_phys, dst_phys;
2952 + struct dcp *sdcp = global_sdcp;
2953 + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
2954 + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
2955 + int ret;
2956 +
2957 +- dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2958 +- 2 * AES_KEYSIZE_128,
2959 +- DMA_TO_DEVICE);
2960 +- dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
2961 +- DCP_BUF_SZ, DMA_TO_DEVICE);
2962 +- dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
2963 +- DCP_BUF_SZ, DMA_FROM_DEVICE);
2964 ++ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2965 ++ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
2966 ++ ret = dma_mapping_error(sdcp->dev, key_phys);
2967 ++ if (ret)
2968 ++ return ret;
2969 ++
2970 ++ src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
2971 ++ DCP_BUF_SZ, DMA_TO_DEVICE);
2972 ++ ret = dma_mapping_error(sdcp->dev, src_phys);
2973 ++ if (ret)
2974 ++ goto err_src;
2975 ++
2976 ++ dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
2977 ++ DCP_BUF_SZ, DMA_FROM_DEVICE);
2978 ++ ret = dma_mapping_error(sdcp->dev, dst_phys);
2979 ++ if (ret)
2980 ++ goto err_dst;
2981 +
2982 + if (actx->fill % AES_BLOCK_SIZE) {
2983 + dev_err(sdcp->dev, "Invalid block size!\n");
2984 +@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
2985 + ret = mxs_dcp_start_dma(actx);
2986 +
2987 + aes_done_run:
2988 ++ dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
2989 ++err_dst:
2990 ++ dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
2991 ++err_src:
2992 + dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
2993 + DMA_TO_DEVICE);
2994 +- dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
2995 +- dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
2996 +
2997 + return ret;
2998 + }
2999 +@@ -557,6 +574,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
3000 + dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
3001 + DCP_BUF_SZ, DMA_TO_DEVICE);
3002 +
3003 ++ ret = dma_mapping_error(sdcp->dev, buf_phys);
3004 ++ if (ret)
3005 ++ return ret;
3006 ++
3007 + /* Fill in the DMA descriptor. */
3008 + desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
3009 + MXS_DCP_CONTROL0_INTERRUPT |
3010 +@@ -589,6 +610,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
3011 + if (rctx->fini) {
3012 + digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
3013 + DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
3014 ++ ret = dma_mapping_error(sdcp->dev, digest_phys);
3015 ++ if (ret)
3016 ++ goto done_run;
3017 ++
3018 + desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
3019 + desc->payload = digest_phys;
3020 + }
3021 +diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
3022 +index 0dd4c6b157de9..9b968ac4ee7b6 100644
3023 +--- a/drivers/crypto/omap-aes.c
3024 ++++ b/drivers/crypto/omap-aes.c
3025 +@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
3026 + spin_lock_init(&dd->lock);
3027 +
3028 + INIT_LIST_HEAD(&dd->list);
3029 +- spin_lock(&list_lock);
3030 ++ spin_lock_bh(&list_lock);
3031 + list_add_tail(&dd->list, &dev_list);
3032 +- spin_unlock(&list_lock);
3033 ++ spin_unlock_bh(&list_lock);
3034 +
3035 + /* Initialize crypto engine */
3036 + dd->engine = crypto_engine_alloc_init(dev, 1);
3037 +@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
3038 + if (!dd)
3039 + return -ENODEV;
3040 +
3041 +- spin_lock(&list_lock);
3042 ++ spin_lock_bh(&list_lock);
3043 + list_del(&dd->list);
3044 +- spin_unlock(&list_lock);
3045 ++ spin_unlock_bh(&list_lock);
3046 +
3047 + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
3048 + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
3049 +diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
3050 +index bc8631363d725..be77656864e3f 100644
3051 +--- a/drivers/crypto/omap-des.c
3052 ++++ b/drivers/crypto/omap-des.c
3053 +@@ -1033,9 +1033,9 @@ static int omap_des_probe(struct platform_device *pdev)
3054 +
3055 +
3056 + INIT_LIST_HEAD(&dd->list);
3057 +- spin_lock(&list_lock);
3058 ++ spin_lock_bh(&list_lock);
3059 + list_add_tail(&dd->list, &dev_list);
3060 +- spin_unlock(&list_lock);
3061 ++ spin_unlock_bh(&list_lock);
3062 +
3063 + /* Initialize des crypto engine */
3064 + dd->engine = crypto_engine_alloc_init(dev, 1);
3065 +@@ -1094,9 +1094,9 @@ static int omap_des_remove(struct platform_device *pdev)
3066 + if (!dd)
3067 + return -ENODEV;
3068 +
3069 +- spin_lock(&list_lock);
3070 ++ spin_lock_bh(&list_lock);
3071 + list_del(&dd->list);
3072 +- spin_unlock(&list_lock);
3073 ++ spin_unlock_bh(&list_lock);
3074 +
3075 + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
3076 + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
3077 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
3078 +index dd53ad9987b0d..63beea7cdba5e 100644
3079 +--- a/drivers/crypto/omap-sham.c
3080 ++++ b/drivers/crypto/omap-sham.c
3081 +@@ -1736,7 +1736,7 @@ static void omap_sham_done_task(unsigned long data)
3082 + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
3083 + goto finish;
3084 + } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
3085 +- if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
3086 ++ if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
3087 + omap_sham_update_dma_stop(dd);
3088 + if (dd->err) {
3089 + err = dd->err;
3090 +@@ -2144,9 +2144,9 @@ static int omap_sham_probe(struct platform_device *pdev)
3091 + (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
3092 + (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
3093 +
3094 +- spin_lock(&sham.lock);
3095 ++ spin_lock_bh(&sham.lock);
3096 + list_add_tail(&dd->list, &sham.dev_list);
3097 +- spin_unlock(&sham.lock);
3098 ++ spin_unlock_bh(&sham.lock);
3099 +
3100 + dd->engine = crypto_engine_alloc_init(dev, 1);
3101 + if (!dd->engine) {
3102 +@@ -2194,9 +2194,9 @@ err_algs:
3103 + err_engine_start:
3104 + crypto_engine_exit(dd->engine);
3105 + err_engine:
3106 +- spin_lock(&sham.lock);
3107 ++ spin_lock_bh(&sham.lock);
3108 + list_del(&dd->list);
3109 +- spin_unlock(&sham.lock);
3110 ++ spin_unlock_bh(&sham.lock);
3111 + err_pm:
3112 + pm_runtime_disable(dev);
3113 + if (!dd->polling_mode)
3114 +@@ -2215,9 +2215,9 @@ static int omap_sham_remove(struct platform_device *pdev)
3115 + dd = platform_get_drvdata(pdev);
3116 + if (!dd)
3117 + return -ENODEV;
3118 +- spin_lock(&sham.lock);
3119 ++ spin_lock_bh(&sham.lock);
3120 + list_del(&dd->list);
3121 +- spin_unlock(&sham.lock);
3122 ++ spin_unlock_bh(&sham.lock);
3123 + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
3124 + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
3125 + crypto_unregister_ahash(
3126 +diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
3127 +index 15f6b9bdfb221..ddf42fb326251 100644
3128 +--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
3129 ++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
3130 +@@ -81,10 +81,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
3131 + hw_data->enable_error_correction = adf_vf_void_noop;
3132 + hw_data->init_admin_comms = adf_vf_int_noop;
3133 + hw_data->exit_admin_comms = adf_vf_void_noop;
3134 +- hw_data->send_admin_init = adf_vf2pf_init;
3135 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
3136 + hw_data->init_arb = adf_vf_int_noop;
3137 + hw_data->exit_arb = adf_vf_void_noop;
3138 +- hw_data->disable_iov = adf_vf2pf_shutdown;
3139 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
3140 + hw_data->get_accel_mask = get_accel_mask;
3141 + hw_data->get_ae_mask = get_ae_mask;
3142 + hw_data->get_num_accels = get_num_accels;
3143 +diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
3144 +index d231583428c91..7e202ef925231 100644
3145 +--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
3146 ++++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
3147 +@@ -81,10 +81,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
3148 + hw_data->enable_error_correction = adf_vf_void_noop;
3149 + hw_data->init_admin_comms = adf_vf_int_noop;
3150 + hw_data->exit_admin_comms = adf_vf_void_noop;
3151 +- hw_data->send_admin_init = adf_vf2pf_init;
3152 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
3153 + hw_data->init_arb = adf_vf_int_noop;
3154 + hw_data->exit_arb = adf_vf_void_noop;
3155 +- hw_data->disable_iov = adf_vf2pf_shutdown;
3156 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
3157 + hw_data->get_accel_mask = get_accel_mask;
3158 + hw_data->get_ae_mask = get_ae_mask;
3159 + hw_data->get_num_accels = get_num_accels;
3160 +diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
3161 +index c61476553728d..dd4a811b7e89f 100644
3162 +--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
3163 ++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
3164 +@@ -198,8 +198,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
3165 + void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
3166 + void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
3167 +
3168 +-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
3169 +-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
3170 ++int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
3171 ++void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
3172 + int adf_init_pf_wq(void);
3173 + void adf_exit_pf_wq(void);
3174 + int adf_init_vf_wq(void);
3175 +@@ -222,12 +222,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
3176 + {
3177 + }
3178 +
3179 +-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
3180 ++static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
3181 + {
3182 + return 0;
3183 + }
3184 +
3185 +-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
3186 ++static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
3187 + {
3188 + }
3189 +
3190 +diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
3191 +index 744c40351428d..02864985dbb04 100644
3192 +--- a/drivers/crypto/qat/qat_common/adf_init.c
3193 ++++ b/drivers/crypto/qat/qat_common/adf_init.c
3194 +@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
3195 + struct service_hndl *service;
3196 + struct list_head *list_itr;
3197 + struct adf_hw_device_data *hw_data = accel_dev->hw_device;
3198 ++ int ret;
3199 +
3200 + if (!hw_data) {
3201 + dev_err(&GET_DEV(accel_dev),
3202 +@@ -127,9 +128,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
3203 + }
3204 +
3205 + hw_data->enable_error_correction(accel_dev);
3206 +- hw_data->enable_vf2pf_comms(accel_dev);
3207 ++ ret = hw_data->enable_vf2pf_comms(accel_dev);
3208 +
3209 +- return 0;
3210 ++ return ret;
3211 + }
3212 + EXPORT_SYMBOL_GPL(adf_dev_init);
3213 +
3214 +diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
3215 +index e3ad5587be49e..daab02011717d 100644
3216 +--- a/drivers/crypto/qat/qat_common/adf_isr.c
3217 ++++ b/drivers/crypto/qat/qat_common/adf_isr.c
3218 +@@ -15,6 +15,8 @@
3219 + #include "adf_transport_access_macros.h"
3220 + #include "adf_transport_internal.h"
3221 +
3222 ++#define ADF_MAX_NUM_VFS 32
3223 ++
3224 + static int adf_enable_msix(struct adf_accel_dev *accel_dev)
3225 + {
3226 + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
3227 +@@ -72,7 +74,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
3228 + struct adf_bar *pmisc =
3229 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
3230 + void __iomem *pmisc_bar_addr = pmisc->virt_addr;
3231 +- u32 vf_mask;
3232 ++ unsigned long vf_mask;
3233 +
3234 + /* Get the interrupt sources triggered by VFs */
3235 + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
3236 +@@ -93,8 +95,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
3237 + * unless the VF is malicious and is attempting to
3238 + * flood the host OS with VF2PF interrupts.
3239 + */
3240 +- for_each_set_bit(i, (const unsigned long *)&vf_mask,
3241 +- (sizeof(vf_mask) * BITS_PER_BYTE)) {
3242 ++ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
3243 + vf_info = accel_dev->pf.vf_info + i;
3244 +
3245 + if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
3246 +diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
3247 +index a1b77bd7a8944..efa4bffb4f601 100644
3248 +--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
3249 ++++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
3250 +@@ -186,7 +186,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
3251 +
3252 + return ret;
3253 + }
3254 +-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
3255 +
3256 + void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
3257 + {
3258 +@@ -316,6 +315,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
3259 + msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
3260 + BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
3261 +
3262 ++ reinit_completion(&accel_dev->vf.iov_msg_completion);
3263 ++
3264 + /* Send request from VF to PF */
3265 + ret = adf_iov_putmsg(accel_dev, msg, 0);
3266 + if (ret) {
3267 +diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
3268 +index e85bd62d134a4..3e25fac051b25 100644
3269 +--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
3270 ++++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
3271 +@@ -5,14 +5,14 @@
3272 + #include "adf_pf2vf_msg.h"
3273 +
3274 + /**
3275 +- * adf_vf2pf_init() - send init msg to PF
3276 ++ * adf_vf2pf_notify_init() - send init msg to PF
3277 + * @accel_dev: Pointer to acceleration VF device.
3278 + *
3279 + * Function sends an init message from the VF to a PF
3280 + *
3281 + * Return: 0 on success, error code otherwise.
3282 + */
3283 +-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
3284 ++int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
3285 + {
3286 + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
3287 + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
3288 +@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
3289 + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
3290 + return 0;
3291 + }
3292 +-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
3293 ++EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
3294 +
3295 + /**
3296 +- * adf_vf2pf_shutdown() - send shutdown msg to PF
3297 ++ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
3298 + * @accel_dev: Pointer to acceleration VF device.
3299 + *
3300 + * Function sends a shutdown message from the VF to a PF
3301 + *
3302 + * Return: void
3303 + */
3304 +-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
3305 ++void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
3306 + {
3307 + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
3308 + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
3309 +@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
3310 + dev_err(&GET_DEV(accel_dev),
3311 + "Failed to send Shutdown event to PF\n");
3312 + }
3313 +-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
3314 ++EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
3315 +diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
3316 +index 888388acb6bd3..3e4f64d248f9b 100644
3317 +--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
3318 ++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
3319 +@@ -160,6 +160,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
3320 + struct adf_bar *pmisc =
3321 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
3322 + void __iomem *pmisc_bar_addr = pmisc->virt_addr;
3323 ++ bool handled = false;
3324 + u32 v_int;
3325 +
3326 + /* Read VF INT source CSR to determine the source of VF interrupt */
3327 +@@ -172,7 +173,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
3328 +
3329 + /* Schedule tasklet to handle interrupt BH */
3330 + tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
3331 +- return IRQ_HANDLED;
3332 ++ handled = true;
3333 + }
3334 +
3335 + /* Check bundle interrupt */
3336 +@@ -184,10 +185,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
3337 + csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
3338 + bank->bank_number, 0);
3339 + tasklet_hi_schedule(&bank->resp_handler);
3340 +- return IRQ_HANDLED;
3341 ++ handled = true;
3342 + }
3343 +
3344 +- return IRQ_NONE;
3345 ++ return handled ? IRQ_HANDLED : IRQ_NONE;
3346 + }
3347 +
3348 + static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
3349 +diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
3350 +index f14fb82ed6dfc..744734caaf7b7 100644
3351 +--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
3352 ++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
3353 +@@ -81,10 +81,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
3354 + hw_data->enable_error_correction = adf_vf_void_noop;
3355 + hw_data->init_admin_comms = adf_vf_int_noop;
3356 + hw_data->exit_admin_comms = adf_vf_void_noop;
3357 +- hw_data->send_admin_init = adf_vf2pf_init;
3358 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
3359 + hw_data->init_arb = adf_vf_int_noop;
3360 + hw_data->exit_arb = adf_vf_void_noop;
3361 +- hw_data->disable_iov = adf_vf2pf_shutdown;
3362 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
3363 + hw_data->get_accel_mask = get_accel_mask;
3364 + hw_data->get_ae_mask = get_ae_mask;
3365 + hw_data->get_num_accels = get_num_accels;
3366 +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
3367 +index 6ce0ed2ffaaf1..b4a024cb8b97d 100644
3368 +--- a/drivers/edac/i10nm_base.c
3369 ++++ b/drivers/edac/i10nm_base.c
3370 +@@ -33,9 +33,9 @@
3371 + #define I10NM_GET_DIMMMTR(m, i, j) \
3372 + readl((m)->mbase + ((m)->hbm_mc ? 0x80c : 0x2080c) + \
3373 + (i) * (m)->chan_mmio_sz + (j) * 4)
3374 +-#define I10NM_GET_MCDDRTCFG(m, i, j) \
3375 ++#define I10NM_GET_MCDDRTCFG(m, i) \
3376 + readl((m)->mbase + ((m)->hbm_mc ? 0x970 : 0x20970) + \
3377 +- (i) * (m)->chan_mmio_sz + (j) * 4)
3378 ++ (i) * (m)->chan_mmio_sz)
3379 + #define I10NM_GET_MCMTR(m, i) \
3380 + readl((m)->mbase + ((m)->hbm_mc ? 0xef8 : 0x20ef8) + \
3381 + (i) * (m)->chan_mmio_sz)
3382 +@@ -321,10 +321,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
3383 +
3384 + ndimms = 0;
3385 + amap = I10NM_GET_AMAP(imc, i);
3386 ++ mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
3387 + for (j = 0; j < imc->num_dimms; j++) {
3388 + dimm = edac_get_dimm(mci, i, j, 0);
3389 + mtr = I10NM_GET_DIMMMTR(imc, i, j);
3390 +- mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
3391 + edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
3392 + mtr, mcddrtcfg, imc->mc, i, j);
3393 +
3394 +diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
3395 +index 27d56920b4690..67dbf4c312716 100644
3396 +--- a/drivers/edac/mce_amd.c
3397 ++++ b/drivers/edac/mce_amd.c
3398 +@@ -1246,6 +1246,9 @@ static int __init mce_amd_init(void)
3399 + c->x86_vendor != X86_VENDOR_HYGON)
3400 + return -ENODEV;
3401 +
3402 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
3403 ++ return -ENODEV;
3404 ++
3405 + if (boot_cpu_has(X86_FEATURE_SMCA)) {
3406 + xec_mask = 0x3f;
3407 + goto out;
3408 +diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
3409 +index 250e016807422..4b8978b254f9a 100644
3410 +--- a/drivers/firmware/raspberrypi.c
3411 ++++ b/drivers/firmware/raspberrypi.c
3412 +@@ -329,12 +329,18 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
3413 +
3414 + fw = platform_get_drvdata(pdev);
3415 + if (!fw)
3416 +- return NULL;
3417 ++ goto err_put_device;
3418 +
3419 + if (!kref_get_unless_zero(&fw->consumers))
3420 +- return NULL;
3421 ++ goto err_put_device;
3422 ++
3423 ++ put_device(&pdev->dev);
3424 +
3425 + return fw;
3426 ++
3427 ++err_put_device:
3428 ++ put_device(&pdev->dev);
3429 ++ return NULL;
3430 + }
3431 + EXPORT_SYMBOL_GPL(rpi_firmware_get);
3432 +
3433 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
3434 +index b8655ff73a658..cc9c9f8b23b2c 100644
3435 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
3436 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
3437 +@@ -160,17 +160,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
3438 + return 0;
3439 + }
3440 +
3441 +-static struct device *get_mfd_cell_dev(const char *device_name, int r)
3442 ++static int acp_genpd_add_device(struct device *dev, void *data)
3443 + {
3444 +- char auto_dev_name[25];
3445 +- struct device *dev;
3446 ++ struct generic_pm_domain *gpd = data;
3447 ++ int ret;
3448 +
3449 +- snprintf(auto_dev_name, sizeof(auto_dev_name),
3450 +- "%s.%d.auto", device_name, r);
3451 +- dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
3452 +- dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
3453 ++ ret = pm_genpd_add_device(gpd, dev);
3454 ++ if (ret)
3455 ++ dev_err(dev, "Failed to add dev to genpd %d\n", ret);
3456 +
3457 +- return dev;
3458 ++ return ret;
3459 ++}
3460 ++
3461 ++static int acp_genpd_remove_device(struct device *dev, void *data)
3462 ++{
3463 ++ int ret;
3464 ++
3465 ++ ret = pm_genpd_remove_device(dev);
3466 ++ if (ret)
3467 ++ dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
3468 ++
3469 ++ /* Continue to remove */
3470 ++ return 0;
3471 + }
3472 +
3473 + /**
3474 +@@ -181,11 +192,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
3475 + */
3476 + static int acp_hw_init(void *handle)
3477 + {
3478 +- int r, i;
3479 ++ int r;
3480 + uint64_t acp_base;
3481 + u32 val = 0;
3482 + u32 count = 0;
3483 +- struct device *dev;
3484 + struct i2s_platform_data *i2s_pdata = NULL;
3485 +
3486 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3487 +@@ -341,15 +351,10 @@ static int acp_hw_init(void *handle)
3488 + if (r)
3489 + goto failure;
3490 +
3491 +- for (i = 0; i < ACP_DEVS ; i++) {
3492 +- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
3493 +- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
3494 +- if (r) {
3495 +- dev_err(dev, "Failed to add dev to genpd\n");
3496 +- goto failure;
3497 +- }
3498 +- }
3499 +-
3500 ++ r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
3501 ++ acp_genpd_add_device);
3502 ++ if (r)
3503 ++ goto failure;
3504 +
3505 + /* Assert Soft reset of ACP */
3506 + val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
3507 +@@ -410,10 +415,8 @@ failure:
3508 + */
3509 + static int acp_hw_fini(void *handle)
3510 + {
3511 +- int i, ret;
3512 + u32 val = 0;
3513 + u32 count = 0;
3514 +- struct device *dev;
3515 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3516 +
3517 + /* return early if no ACP */
3518 +@@ -458,13 +461,8 @@ static int acp_hw_fini(void *handle)
3519 + udelay(100);
3520 + }
3521 +
3522 +- for (i = 0; i < ACP_DEVS ; i++) {
3523 +- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
3524 +- ret = pm_genpd_remove_device(dev);
3525 +- /* If removal fails, dont giveup and try rest */
3526 +- if (ret)
3527 +- dev_err(dev, "remove dev from genpd failed\n");
3528 +- }
3529 ++ device_for_each_child(adev->acp.parent, NULL,
3530 ++ acp_genpd_remove_device);
3531 +
3532 + mfd_remove_devices(adev->acp.parent);
3533 + kfree(adev->acp.acp_res);
3534 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
3535 +index e802f9a95f087..415be74df28c7 100644
3536 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
3537 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
3538 +@@ -55,7 +55,7 @@
3539 +
3540 + #undef __SMU_DUMMY_MAP
3541 + #define __SMU_DUMMY_MAP(type) #type
3542 +-static const char* __smu_message_names[] = {
3543 ++static const char * const __smu_message_names[] = {
3544 + SMU_MESSAGE_TYPES
3545 + };
3546 +
3547 +@@ -76,55 +76,256 @@ static void smu_cmn_read_arg(struct smu_context *smu,
3548 + *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
3549 + }
3550 +
3551 +-int smu_cmn_wait_for_response(struct smu_context *smu)
3552 ++/* Redefine the SMU error codes here.
3553 ++ *
3554 ++ * Note that these definitions are redundant and should be removed
3555 ++ * when the SMU has exported a unified header file containing these
3556 ++ * macros, which header file we can just include and use the SMU's
3557 ++ * macros. At the moment, these error codes are defined by the SMU
3558 ++ * per-ASIC unfortunately, yet we're a one driver for all ASICs.
3559 ++ */
3560 ++#define SMU_RESP_NONE 0
3561 ++#define SMU_RESP_OK 1
3562 ++#define SMU_RESP_CMD_FAIL 0xFF
3563 ++#define SMU_RESP_CMD_UNKNOWN 0xFE
3564 ++#define SMU_RESP_CMD_BAD_PREREQ 0xFD
3565 ++#define SMU_RESP_BUSY_OTHER 0xFC
3566 ++#define SMU_RESP_DEBUG_END 0xFB
3567 ++
3568 ++/**
3569 ++ * __smu_cmn_poll_stat -- poll for a status from the SMU
3570 ++ * smu: a pointer to SMU context
3571 ++ *
3572 ++ * Returns the status of the SMU, which could be,
3573 ++ * 0, the SMU is busy with your previous command;
3574 ++ * 1, execution status: success, execution result: success;
3575 ++ * 0xFF, execution status: success, execution result: failure;
3576 ++ * 0xFE, unknown command;
3577 ++ * 0xFD, valid command, but bad (command) prerequisites;
3578 ++ * 0xFC, the command was rejected as the SMU is busy;
3579 ++ * 0xFB, "SMC_Result_DebugDataDumpEnd".
3580 ++ *
3581 ++ * The values here are not defined by macros, because I'd rather we
3582 ++ * include a single header file which defines them, which is
3583 ++ * maintained by the SMU FW team, so that we're impervious to firmware
3584 ++ * changes. At the moment those values are defined in various header
3585 ++ * files, one for each ASIC, yet here we're a single ASIC-agnostic
3586 ++ * interface. Such a change can be followed-up by a subsequent patch.
3587 ++ */
3588 ++static u32 __smu_cmn_poll_stat(struct smu_context *smu)
3589 + {
3590 + struct amdgpu_device *adev = smu->adev;
3591 +- uint32_t cur_value, i, timeout = adev->usec_timeout * 20;
3592 ++ int timeout = adev->usec_timeout * 20;
3593 ++ u32 reg;
3594 +
3595 +- for (i = 0; i < timeout; i++) {
3596 +- cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
3597 +- if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
3598 +- return cur_value;
3599 ++ for ( ; timeout > 0; timeout--) {
3600 ++ reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
3601 ++ if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
3602 ++ break;
3603 +
3604 + udelay(1);
3605 + }
3606 +
3607 +- /* timeout means wrong logic */
3608 +- if (i == timeout)
3609 +- return -ETIME;
3610 +-
3611 +- return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
3612 ++ return reg;
3613 + }
3614 +
3615 +-int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
3616 +- uint16_t msg, uint32_t param)
3617 ++static void __smu_cmn_reg_print_error(struct smu_context *smu,
3618 ++ u32 reg_c2pmsg_90,
3619 ++ int msg_index,
3620 ++ u32 param,
3621 ++ enum smu_message_type msg)
3622 + {
3623 + struct amdgpu_device *adev = smu->adev;
3624 +- int ret;
3625 ++ const char *message = smu_get_message_name(smu, msg);
3626 +
3627 +- ret = smu_cmn_wait_for_response(smu);
3628 +- if (ret != 0x1) {
3629 +- dev_err(adev->dev, "Msg issuing pre-check failed(0x%x) and "
3630 +- "SMU may be not in the right state!\n", ret);
3631 +- if (ret != -ETIME)
3632 +- ret = -EIO;
3633 +- return ret;
3634 ++ switch (reg_c2pmsg_90) {
3635 ++ case SMU_RESP_NONE:
3636 ++ dev_err_ratelimited(adev->dev,
3637 ++ "SMU: I'm not done with your previous command!");
3638 ++ break;
3639 ++ case SMU_RESP_OK:
3640 ++ /* The SMU executed the command. It completed with a
3641 ++ * successful result.
3642 ++ */
3643 ++ break;
3644 ++ case SMU_RESP_CMD_FAIL:
3645 ++ /* The SMU executed the command. It completed with an
3646 ++ * unsuccessful result.
3647 ++ */
3648 ++ break;
3649 ++ case SMU_RESP_CMD_UNKNOWN:
3650 ++ dev_err_ratelimited(adev->dev,
3651 ++ "SMU: unknown command: index:%d param:0x%08X message:%s",
3652 ++ msg_index, param, message);
3653 ++ break;
3654 ++ case SMU_RESP_CMD_BAD_PREREQ:
3655 ++ dev_err_ratelimited(adev->dev,
3656 ++ "SMU: valid command, bad prerequisites: index:%d param:0x%08X message:%s",
3657 ++ msg_index, param, message);
3658 ++ break;
3659 ++ case SMU_RESP_BUSY_OTHER:
3660 ++ dev_err_ratelimited(adev->dev,
3661 ++ "SMU: I'm very busy for your command: index:%d param:0x%08X message:%s",
3662 ++ msg_index, param, message);
3663 ++ break;
3664 ++ case SMU_RESP_DEBUG_END:
3665 ++ dev_err_ratelimited(adev->dev,
3666 ++ "SMU: I'm debugging!");
3667 ++ break;
3668 ++ default:
3669 ++ dev_err_ratelimited(adev->dev,
3670 ++ "SMU: response:0x%08X for index:%d param:0x%08X message:%s?",
3671 ++ reg_c2pmsg_90, msg_index, param, message);
3672 ++ break;
3673 + }
3674 ++}
3675 ++
3676 ++static int __smu_cmn_reg2errno(struct smu_context *smu, u32 reg_c2pmsg_90)
3677 ++{
3678 ++ int res;
3679 ++
3680 ++ switch (reg_c2pmsg_90) {
3681 ++ case SMU_RESP_NONE:
3682 ++ /* The SMU is busy--still executing your command.
3683 ++ */
3684 ++ res = -ETIME;
3685 ++ break;
3686 ++ case SMU_RESP_OK:
3687 ++ res = 0;
3688 ++ break;
3689 ++ case SMU_RESP_CMD_FAIL:
3690 ++ /* Command completed successfully, but the command
3691 ++ * status was failure.
3692 ++ */
3693 ++ res = -EIO;
3694 ++ break;
3695 ++ case SMU_RESP_CMD_UNKNOWN:
3696 ++ /* Unknown command--ignored by the SMU.
3697 ++ */
3698 ++ res = -EOPNOTSUPP;
3699 ++ break;
3700 ++ case SMU_RESP_CMD_BAD_PREREQ:
3701 ++ /* Valid command--bad prerequisites.
3702 ++ */
3703 ++ res = -EINVAL;
3704 ++ break;
3705 ++ case SMU_RESP_BUSY_OTHER:
3706 ++ /* The SMU is busy with other commands. The client
3707 ++ * should retry in 10 us.
3708 ++ */
3709 ++ res = -EBUSY;
3710 ++ break;
3711 ++ default:
3712 ++ /* Unknown or debug response from the SMU.
3713 ++ */
3714 ++ res = -EREMOTEIO;
3715 ++ break;
3716 ++ }
3717 ++
3718 ++ return res;
3719 ++}
3720 ++
3721 ++static void __smu_cmn_send_msg(struct smu_context *smu,
3722 ++ u16 msg,
3723 ++ u32 param)
3724 ++{
3725 ++ struct amdgpu_device *adev = smu->adev;
3726 +
3727 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
3728 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
3729 + WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
3730 ++}
3731 +
3732 +- return 0;
3733 ++/**
3734 ++ * smu_cmn_send_msg_without_waiting -- send the message; don't wait for status
3735 ++ * @smu: pointer to an SMU context
3736 ++ * @msg_index: message index
3737 ++ * @param: message parameter to send to the SMU
3738 ++ *
3739 ++ * Send a message to the SMU with the parameter passed. Do not wait
3740 ++ * for status/result of the message, thus the "without_waiting".
3741 ++ *
3742 ++ * Return 0 on success, -errno on error if we weren't able to _send_
3743 ++ * the message for some reason. See __smu_cmn_reg2errno() for details
3744 ++ * of the -errno.
3745 ++ */
3746 ++int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
3747 ++ uint16_t msg_index,
3748 ++ uint32_t param)
3749 ++{
3750 ++ u32 reg;
3751 ++ int res;
3752 ++
3753 ++ if (smu->adev->no_hw_access)
3754 ++ return 0;
3755 ++
3756 ++ reg = __smu_cmn_poll_stat(smu);
3757 ++ res = __smu_cmn_reg2errno(smu, reg);
3758 ++ if (reg == SMU_RESP_NONE ||
3759 ++ reg == SMU_RESP_BUSY_OTHER ||
3760 ++ res == -EREMOTEIO)
3761 ++ goto Out;
3762 ++ __smu_cmn_send_msg(smu, msg_index, param);
3763 ++ res = 0;
3764 ++Out:
3765 ++ return res;
3766 ++}
3767 ++
3768 ++/**
3769 ++ * smu_cmn_wait_for_response -- wait for response from the SMU
3770 ++ * @smu: pointer to an SMU context
3771 ++ *
3772 ++ * Wait for status from the SMU.
3773 ++ *
3774 ++ * Return 0 on success, -errno on error, indicating the execution
3775 ++ * status and result of the message being waited for. See
3776 ++ * __smu_cmn_reg2errno() for details of the -errno.
3777 ++ */
3778 ++int smu_cmn_wait_for_response(struct smu_context *smu)
3779 ++{
3780 ++ u32 reg;
3781 ++
3782 ++ reg = __smu_cmn_poll_stat(smu);
3783 ++ return __smu_cmn_reg2errno(smu, reg);
3784 + }
3785 +
3786 ++/**
3787 ++ * smu_cmn_send_smc_msg_with_param -- send a message with parameter
3788 ++ * @smu: pointer to an SMU context
3789 ++ * @msg: message to send
3790 ++ * @param: parameter to send to the SMU
3791 ++ * @read_arg: pointer to u32 to return a value from the SMU back
3792 ++ * to the caller
3793 ++ *
3794 ++ * Send the message @msg with parameter @param to the SMU, wait for
3795 ++ * completion of the command, and return back a value from the SMU in
3796 ++ * @read_arg pointer.
3797 ++ *
3798 ++ * Return 0 on success, -errno on error, if we weren't able to send
3799 ++ * the message or if the message completed with some kind of
3800 ++ * error. See __smu_cmn_reg2errno() for details of the -errno.
3801 ++ *
3802 ++ * If we weren't able to send the message to the SMU, we also print
3803 ++ * the error to the standard log.
3804 ++ *
3805 ++ * Command completion status is printed only if the -errno is
3806 ++ * -EREMOTEIO, indicating that the SMU returned back an
3807 ++ * undefined/unknown/unspecified result. All other cases are
3808 ++ * well-defined, not printed, but instead given back to the client to
3809 ++ * decide what further to do.
3810 ++ *
3811 ++ * The return value, @read_arg is read back regardless, to give back
3812 ++ * more information to the client, which on error would most likely be
3813 ++ * @param, but we can't assume that. This also eliminates more
3814 ++ * conditionals.
3815 ++ */
3816 + int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
3817 + enum smu_message_type msg,
3818 + uint32_t param,
3819 + uint32_t *read_arg)
3820 + {
3821 +- struct amdgpu_device *adev = smu->adev;
3822 +- int ret = 0, index = 0;
3823 ++ int res, index;
3824 ++ u32 reg;
3825 +
3826 + if (smu->adev->no_hw_access)
3827 + return 0;
3828 +@@ -136,31 +337,24 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
3829 + return index == -EACCES ? 0 : index;
3830 +
3831 + mutex_lock(&smu->message_lock);
3832 +- ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, param);
3833 +- if (ret)
3834 +- goto out;
3835 +-
3836 +- ret = smu_cmn_wait_for_response(smu);
3837 +- if (ret != 0x1) {
3838 +- if (ret == -ETIME) {
3839 +- dev_err(adev->dev, "message: %15s (%d) \tparam: 0x%08x is timeout (no response)\n",
3840 +- smu_get_message_name(smu, msg), index, param);
3841 +- } else {
3842 +- dev_err(adev->dev, "failed send message: %15s (%d) \tparam: 0x%08x response %#x\n",
3843 +- smu_get_message_name(smu, msg), index, param,
3844 +- ret);
3845 +- ret = -EIO;
3846 +- }
3847 +- goto out;
3848 ++ reg = __smu_cmn_poll_stat(smu);
3849 ++ res = __smu_cmn_reg2errno(smu, reg);
3850 ++ if (reg == SMU_RESP_NONE ||
3851 ++ reg == SMU_RESP_BUSY_OTHER ||
3852 ++ res == -EREMOTEIO) {
3853 ++ __smu_cmn_reg_print_error(smu, reg, index, param, msg);
3854 ++ goto Out;
3855 + }
3856 +-
3857 ++ __smu_cmn_send_msg(smu, (uint16_t) index, param);
3858 ++ reg = __smu_cmn_poll_stat(smu);
3859 ++ res = __smu_cmn_reg2errno(smu, reg);
3860 ++ if (res == -EREMOTEIO)
3861 ++ __smu_cmn_reg_print_error(smu, reg, index, param, msg);
3862 + if (read_arg)
3863 + smu_cmn_read_arg(smu, read_arg);
3864 +-
3865 +- ret = 0; /* 0 as driver return value */
3866 +-out:
3867 ++Out:
3868 + mutex_unlock(&smu->message_lock);
3869 +- return ret;
3870 ++ return res;
3871 + }
3872 +
3873 + int smu_cmn_send_smc_msg(struct smu_context *smu,
3874 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
3875 +index 9add5f16ff562..16993daa2ae04 100644
3876 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
3877 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
3878 +@@ -27,7 +27,8 @@
3879 +
3880 + #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
3881 + int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
3882 +- uint16_t msg, uint32_t param);
3883 ++ uint16_t msg_index,
3884 ++ uint32_t param);
3885 + int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
3886 + enum smu_message_type msg,
3887 + uint32_t param,
3888 +diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
3889 +index 7149ed40af83c..2f2a09adb4bc8 100644
3890 +--- a/drivers/gpu/drm/bridge/ite-it66121.c
3891 ++++ b/drivers/gpu/drm/bridge/ite-it66121.c
3892 +@@ -536,6 +536,8 @@ static int it66121_bridge_attach(struct drm_bridge *bridge,
3893 + return -EINVAL;
3894 +
3895 + ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
3896 ++ if (ret)
3897 ++ return ret;
3898 +
3899 + ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
3900 + IT66121_CLK_BANK_PWROFF_RCLK, 0);
3901 +diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
3902 +index ca04c34e82518..997b8827fed27 100644
3903 +--- a/drivers/gpu/drm/drm_of.c
3904 ++++ b/drivers/gpu/drm/drm_of.c
3905 +@@ -315,7 +315,7 @@ static int drm_of_lvds_get_remote_pixels_type(
3906 +
3907 + remote_port = of_graph_get_remote_port(endpoint);
3908 + if (!remote_port) {
3909 +- of_node_put(remote_port);
3910 ++ of_node_put(endpoint);
3911 + return -EPIPE;
3912 + }
3913 +
3914 +@@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
3915 + * configurations by passing the endpoints explicitly to
3916 + * drm_of_lvds_get_dual_link_pixel_order().
3917 + */
3918 +- if (!current_pt || pixels_type != current_pt)
3919 ++ if (!current_pt || pixels_type != current_pt) {
3920 ++ of_node_put(endpoint);
3921 + return -EINVAL;
3922 ++ }
3923 + }
3924 +
3925 + return pixels_type;
3926 +diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
3927 +index cab4d2c370a71..0ed665501ac48 100644
3928 +--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
3929 ++++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
3930 +@@ -897,13 +897,14 @@ static void g2d_runqueue_worker(struct work_struct *work)
3931 + ret = pm_runtime_resume_and_get(g2d->dev);
3932 + if (ret < 0) {
3933 + dev_err(g2d->dev, "failed to enable G2D device.\n");
3934 +- return;
3935 ++ goto out;
3936 + }
3937 +
3938 + g2d_dma_start(g2d, g2d->runqueue_node);
3939 + }
3940 + }
3941 +
3942 ++out:
3943 + mutex_unlock(&g2d->runqueue_mutex);
3944 + }
3945 +
3946 +diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
3947 +index 432bdcc57ac9e..a1332878857b2 100644
3948 +--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
3949 ++++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
3950 +@@ -117,7 +117,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
3951 + continue;
3952 + }
3953 +
3954 +- if (!connector) {
3955 ++ if (list_entry_is_head(connector, &mode_config->connector_list, head)) {
3956 + DRM_ERROR("Couldn't find connector when setting mode");
3957 + gma_power_end(dev);
3958 + return;
3959 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
3960 +index f8a74f6cdc4cb..64740ddb983ea 100644
3961 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
3962 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
3963 +@@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
3964 + int i;
3965 +
3966 + for (i = 0; i < ctx->mixer_count; i++) {
3967 +- DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
3968 +- DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
3969 +- DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
3970 +- DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
3971 ++ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
3972 ++
3973 ++ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
3974 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
3975 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
3976 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
3977 + }
3978 +
3979 + DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
3980 +diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
3981 +index 4a5b518288b06..0712752742f4f 100644
3982 +--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
3983 ++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
3984 +@@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms)
3985 + {
3986 + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
3987 + struct drm_device *dev = mdp4_kms->dev;
3988 +- uint32_t version, major, minor, dmap_cfg, vg_cfg;
3989 ++ u32 dmap_cfg, vg_cfg;
3990 + unsigned long clk;
3991 + int ret = 0;
3992 +
3993 + pm_runtime_get_sync(dev->dev);
3994 +
3995 +- mdp4_enable(mdp4_kms);
3996 +- version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
3997 +- mdp4_disable(mdp4_kms);
3998 +-
3999 +- major = FIELD(version, MDP4_VERSION_MAJOR);
4000 +- minor = FIELD(version, MDP4_VERSION_MINOR);
4001 +-
4002 +- DBG("found MDP4 version v%d.%d", major, minor);
4003 +-
4004 +- if (major != 4) {
4005 +- DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
4006 +- major, minor);
4007 +- ret = -ENXIO;
4008 +- goto out;
4009 +- }
4010 +-
4011 +- mdp4_kms->rev = minor;
4012 +-
4013 + if (mdp4_kms->rev > 1) {
4014 + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
4015 + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
4016 +@@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
4017 + if (mdp4_kms->rev > 1)
4018 + mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
4019 +
4020 +-out:
4021 + pm_runtime_put_sync(dev->dev);
4022 +
4023 + return ret;
4024 +@@ -411,6 +392,22 @@ fail:
4025 + return ret;
4026 + }
4027 +
4028 ++static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
4029 ++ u32 *major, u32 *minor)
4030 ++{
4031 ++ struct drm_device *dev = mdp4_kms->dev;
4032 ++ u32 version;
4033 ++
4034 ++ mdp4_enable(mdp4_kms);
4035 ++ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
4036 ++ mdp4_disable(mdp4_kms);
4037 ++
4038 ++ *major = FIELD(version, MDP4_VERSION_MAJOR);
4039 ++ *minor = FIELD(version, MDP4_VERSION_MINOR);
4040 ++
4041 ++ DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
4042 ++}
4043 ++
4044 + struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4045 + {
4046 + struct platform_device *pdev = to_platform_device(dev->dev);
4047 +@@ -419,6 +416,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4048 + struct msm_kms *kms = NULL;
4049 + struct msm_gem_address_space *aspace;
4050 + int irq, ret;
4051 ++ u32 major, minor;
4052 +
4053 + mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
4054 + if (!mdp4_kms) {
4055 +@@ -479,15 +477,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4056 + if (IS_ERR(mdp4_kms->pclk))
4057 + mdp4_kms->pclk = NULL;
4058 +
4059 +- if (mdp4_kms->rev >= 2) {
4060 +- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
4061 +- if (IS_ERR(mdp4_kms->lut_clk)) {
4062 +- DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
4063 +- ret = PTR_ERR(mdp4_kms->lut_clk);
4064 +- goto fail;
4065 +- }
4066 +- }
4067 +-
4068 + mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
4069 + if (IS_ERR(mdp4_kms->axi_clk)) {
4070 + DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
4071 +@@ -496,8 +485,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
4072 + }
4073 +
4074 + clk_set_rate(mdp4_kms->clk, config->max_clk);
4075 +- if (mdp4_kms->lut_clk)
4076 ++
4077 ++ read_mdp_hw_revision(mdp4_kms, &major, &minor);
4078 ++
4079 ++ if (major != 4) {
4080 ++ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
4081 ++ major, minor);
4082 ++ ret = -ENXIO;
4083 ++ goto fail;
4084 ++ }
4085 ++
4086 ++ mdp4_kms->rev = minor;
4087 ++
4088 ++ if (mdp4_kms->rev >= 2) {
4089 ++ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
4090 ++ if (IS_ERR(mdp4_kms->lut_clk)) {
4091 ++ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
4092 ++ ret = PTR_ERR(mdp4_kms->lut_clk);
4093 ++ goto fail;
4094 ++ }
4095 + clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
4096 ++ }
4097 +
4098 + pm_runtime_enable(dev->dev);
4099 + mdp4_kms->rpm_enabled = true;
4100 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
4101 +index 867388a399adf..997fd67f73799 100644
4102 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
4103 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
4104 +@@ -55,7 +55,6 @@ enum {
4105 + EV_HPD_INIT_SETUP,
4106 + EV_HPD_PLUG_INT,
4107 + EV_IRQ_HPD_INT,
4108 +- EV_HPD_REPLUG_INT,
4109 + EV_HPD_UNPLUG_INT,
4110 + EV_USER_NOTIFICATION,
4111 + EV_CONNECT_PENDING_TIMEOUT,
4112 +@@ -1119,9 +1118,6 @@ static int hpd_event_thread(void *data)
4113 + case EV_IRQ_HPD_INT:
4114 + dp_irq_hpd_handle(dp_priv, todo->data);
4115 + break;
4116 +- case EV_HPD_REPLUG_INT:
4117 +- /* do nothing */
4118 +- break;
4119 + case EV_USER_NOTIFICATION:
4120 + dp_display_send_hpd_notification(dp_priv,
4121 + todo->data);
4122 +@@ -1165,10 +1161,8 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
4123 +
4124 + if (hpd_isr_status & 0x0F) {
4125 + /* hpd related interrupts */
4126 +- if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
4127 +- hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
4128 ++ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
4129 + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
4130 +- }
4131 +
4132 + if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
4133 + /* stop sentinel connect pending checking */
4134 +@@ -1176,8 +1170,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
4135 + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
4136 + }
4137 +
4138 +- if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
4139 +- dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
4140 ++ if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
4141 ++ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
4142 ++ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
4143 ++ }
4144 +
4145 + if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
4146 + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
4147 +@@ -1286,7 +1282,7 @@ static int dp_pm_resume(struct device *dev)
4148 + struct platform_device *pdev = to_platform_device(dev);
4149 + struct msm_dp *dp_display = platform_get_drvdata(pdev);
4150 + struct dp_display_private *dp;
4151 +- u32 status;
4152 ++ int sink_count = 0;
4153 +
4154 + dp = container_of(dp_display, struct dp_display_private, dp_display);
4155 +
4156 +@@ -1300,14 +1296,25 @@ static int dp_pm_resume(struct device *dev)
4157 +
4158 + dp_catalog_ctrl_hpd_config(dp->catalog);
4159 +
4160 +- status = dp_catalog_link_is_connected(dp->catalog);
4161 ++ /*
4162 ++ * set sink to normal operation mode -- D0
4163 ++ * before dpcd read
4164 ++ */
4165 ++ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
4166 ++
4167 ++ if (dp_catalog_link_is_connected(dp->catalog)) {
4168 ++ sink_count = drm_dp_read_sink_count(dp->aux);
4169 ++ if (sink_count < 0)
4170 ++ sink_count = 0;
4171 ++ }
4172 +
4173 ++ dp->link->sink_count = sink_count;
4174 + /*
4175 + * can not declared display is connected unless
4176 + * HDMI cable is plugged in and sink_count of
4177 + * dongle become 1
4178 + */
4179 +- if (status && dp->link->sink_count)
4180 ++ if (dp->link->sink_count)
4181 + dp->dp_display.is_connected = true;
4182 + else
4183 + dp->dp_display.is_connected = false;
4184 +diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
4185 +index 75afc12a7b25a..29d11f1cb79b0 100644
4186 +--- a/drivers/gpu/drm/msm/dsi/dsi.c
4187 ++++ b/drivers/gpu/drm/msm/dsi/dsi.c
4188 +@@ -26,8 +26,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
4189 + }
4190 +
4191 + phy_pdev = of_find_device_by_node(phy_node);
4192 +- if (phy_pdev)
4193 ++ if (phy_pdev) {
4194 + msm_dsi->phy = platform_get_drvdata(phy_pdev);
4195 ++ msm_dsi->phy_dev = &phy_pdev->dev;
4196 ++ }
4197 +
4198 + of_node_put(phy_node);
4199 +
4200 +@@ -36,8 +38,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
4201 + return -EPROBE_DEFER;
4202 + }
4203 +
4204 +- msm_dsi->phy_dev = get_device(&phy_pdev->dev);
4205 +-
4206 + return 0;
4207 + }
4208 +
4209 +diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
4210 +index 9b8fa2ad0d840..729ab68d02034 100644
4211 +--- a/drivers/gpu/drm/msm/msm_drv.c
4212 ++++ b/drivers/gpu/drm/msm/msm_drv.c
4213 +@@ -539,6 +539,7 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
4214 + if (IS_ERR(priv->event_thread[i].worker)) {
4215 + ret = PTR_ERR(priv->event_thread[i].worker);
4216 + DRM_DEV_ERROR(dev, "failed to create crtc_event kthread\n");
4217 ++ ret = PTR_ERR(priv->event_thread[i].worker);
4218 + goto err_msm_uninit;
4219 + }
4220 +
4221 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
4222 +index 6da93551e2e5f..c277d3f61a5ef 100644
4223 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
4224 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
4225 +@@ -51,6 +51,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
4226 + .hs_wdth_mask = 0xff,
4227 + .hs_wdth_shift = 24,
4228 + .has_overlay = false,
4229 ++ .has_ctrl2 = false,
4230 + },
4231 + [MXSFB_V4] = {
4232 + .transfer_count = LCDC_V4_TRANSFER_COUNT,
4233 +@@ -59,6 +60,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
4234 + .hs_wdth_mask = 0x3fff,
4235 + .hs_wdth_shift = 18,
4236 + .has_overlay = false,
4237 ++ .has_ctrl2 = true,
4238 + },
4239 + [MXSFB_V6] = {
4240 + .transfer_count = LCDC_V4_TRANSFER_COUNT,
4241 +@@ -67,6 +69,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
4242 + .hs_wdth_mask = 0x3fff,
4243 + .hs_wdth_shift = 18,
4244 + .has_overlay = true,
4245 ++ .has_ctrl2 = true,
4246 + },
4247 + };
4248 +
4249 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
4250 +index 399d23e91ed10..7c720e226fdfd 100644
4251 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
4252 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
4253 +@@ -22,6 +22,7 @@ struct mxsfb_devdata {
4254 + unsigned int hs_wdth_mask;
4255 + unsigned int hs_wdth_shift;
4256 + bool has_overlay;
4257 ++ bool has_ctrl2;
4258 + };
4259 +
4260 + struct mxsfb_drm_private {
4261 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
4262 +index 300e7bab0f431..54f905ac75c07 100644
4263 +--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
4264 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
4265 +@@ -107,6 +107,14 @@ static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
4266 + clk_prepare_enable(mxsfb->clk_disp_axi);
4267 + clk_prepare_enable(mxsfb->clk);
4268 +
4269 ++ /* Increase number of outstanding requests on all supported IPs */
4270 ++ if (mxsfb->devdata->has_ctrl2) {
4271 ++ reg = readl(mxsfb->base + LCDC_V4_CTRL2);
4272 ++ reg &= ~CTRL2_SET_OUTSTANDING_REQS_MASK;
4273 ++ reg |= CTRL2_SET_OUTSTANDING_REQS_16;
4274 ++ writel(reg, mxsfb->base + LCDC_V4_CTRL2);
4275 ++ }
4276 ++
4277 + /* If it was disabled, re-enable the mode again */
4278 + writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET);
4279 +
4280 +@@ -115,6 +123,35 @@ static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
4281 + reg |= VDCTRL4_SYNC_SIGNALS_ON;
4282 + writel(reg, mxsfb->base + LCDC_VDCTRL4);
4283 +
4284 ++ /*
4285 ++ * Enable recovery on underflow.
4286 ++ *
4287 ++ * There is some sort of corner case behavior of the controller,
4288 ++ * which could rarely be triggered at least on i.MX6SX connected
4289 ++ * to 800x480 DPI panel and i.MX8MM connected to DPI->DSI->LVDS
4290 ++ * bridged 1920x1080 panel (and likely on other setups too), where
4291 ++ * the image on the panel shifts to the right and wraps around.
4292 ++ * This happens either when the controller is enabled on boot or
4293 ++ * even later during run time. The condition does not correct
4294 ++ * itself automatically, i.e. the display image remains shifted.
4295 ++ *
4296 ++ * It seems this problem is known and is due to sporadic underflows
4297 ++ * of the LCDIF FIFO. While the LCDIF IP does have underflow/overflow
4298 ++ * IRQs, neither of the IRQs trigger and neither IRQ status bit is
4299 ++ * asserted when this condition occurs.
4300 ++ *
4301 ++ * All known revisions of the LCDIF IP have CTRL1 RECOVER_ON_UNDERFLOW
4302 ++ * bit, which is described in the reference manual since i.MX23 as
4303 ++ * "
4304 ++ * Set this bit to enable the LCDIF block to recover in the next
4305 ++ * field/frame if there was an underflow in the current field/frame.
4306 ++ * "
4307 ++ * Enable this bit to mitigate the sporadic underflows.
4308 ++ */
4309 ++ reg = readl(mxsfb->base + LCDC_CTRL1);
4310 ++ reg |= CTRL1_RECOVER_ON_UNDERFLOW;
4311 ++ writel(reg, mxsfb->base + LCDC_CTRL1);
4312 ++
4313 + writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET);
4314 + }
4315 +
4316 +@@ -206,6 +243,9 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
4317 +
4318 + /* Clear the FIFOs */
4319 + writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
4320 ++ readl(mxsfb->base + LCDC_CTRL1);
4321 ++ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_CLR);
4322 ++ readl(mxsfb->base + LCDC_CTRL1);
4323 +
4324 + if (mxsfb->devdata->has_overlay)
4325 + writel(0, mxsfb->base + LCDC_AS_CTRL);
4326 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
4327 +index 55d28a27f9124..694fea13e893e 100644
4328 +--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
4329 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
4330 +@@ -15,6 +15,7 @@
4331 + #define LCDC_CTRL 0x00
4332 + #define LCDC_CTRL1 0x10
4333 + #define LCDC_V3_TRANSFER_COUNT 0x20
4334 ++#define LCDC_V4_CTRL2 0x20
4335 + #define LCDC_V4_TRANSFER_COUNT 0x30
4336 + #define LCDC_V4_CUR_BUF 0x40
4337 + #define LCDC_V4_NEXT_BUF 0x50
4338 +@@ -54,12 +55,20 @@
4339 + #define CTRL_DF24 BIT(1)
4340 + #define CTRL_RUN BIT(0)
4341 +
4342 ++#define CTRL1_RECOVER_ON_UNDERFLOW BIT(24)
4343 + #define CTRL1_FIFO_CLEAR BIT(21)
4344 + #define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
4345 + #define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
4346 + #define CTRL1_CUR_FRAME_DONE_IRQ_EN BIT(13)
4347 + #define CTRL1_CUR_FRAME_DONE_IRQ BIT(9)
4348 +
4349 ++#define CTRL2_SET_OUTSTANDING_REQS_1 0
4350 ++#define CTRL2_SET_OUTSTANDING_REQS_2 (0x1 << 21)
4351 ++#define CTRL2_SET_OUTSTANDING_REQS_4 (0x2 << 21)
4352 ++#define CTRL2_SET_OUTSTANDING_REQS_8 (0x3 << 21)
4353 ++#define CTRL2_SET_OUTSTANDING_REQS_16 (0x4 << 21)
4354 ++#define CTRL2_SET_OUTSTANDING_REQS_MASK (0x7 << 21)
4355 ++
4356 + #define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
4357 + #define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
4358 + #define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
4359 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
4360 +index 125ed973feaad..a2a09c51eed7b 100644
4361 +--- a/drivers/gpu/drm/panfrost/panfrost_device.c
4362 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.c
4363 +@@ -54,7 +54,8 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
4364 + if (IS_ERR(pfdev->bus_clock)) {
4365 + dev_err(pfdev->dev, "get bus_clock failed %ld\n",
4366 + PTR_ERR(pfdev->bus_clock));
4367 +- return PTR_ERR(pfdev->bus_clock);
4368 ++ err = PTR_ERR(pfdev->bus_clock);
4369 ++ goto disable_clock;
4370 + }
4371 +
4372 + if (pfdev->bus_clock) {
4373 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
4374 +index bfbff90588cbf..c22551c2facb1 100644
4375 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
4376 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
4377 +@@ -556,8 +556,6 @@ static int rcar_du_remove(struct platform_device *pdev)
4378 +
4379 + drm_kms_helper_poll_fini(ddev);
4380 +
4381 +- drm_dev_put(ddev);
4382 +-
4383 + return 0;
4384 + }
4385 +
4386 +diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
4387 +index 2267bd4c34725..6018b9d1b1fbe 100644
4388 +--- a/drivers/hv/hv_snapshot.c
4389 ++++ b/drivers/hv/hv_snapshot.c
4390 +@@ -375,6 +375,7 @@ hv_vss_init(struct hv_util_service *srv)
4391 + }
4392 + recv_buffer = srv->recv_buffer;
4393 + vss_transaction.recv_channel = srv->channel;
4394 ++ vss_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
4395 +
4396 + /*
4397 + * When this driver loads, the user level daemon that
4398 +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
4399 +index d712c61c1f5e9..0241ed84b692f 100644
4400 +--- a/drivers/hwmon/Makefile
4401 ++++ b/drivers/hwmon/Makefile
4402 +@@ -45,7 +45,6 @@ obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
4403 + obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
4404 + obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
4405 + obj-$(CONFIG_SENSORS_AHT10) += aht10.o
4406 +-obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o
4407 + obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
4408 + obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
4409 + obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
4410 +diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
4411 +index 2be69fedfa361..be76efe67d83f 100644
4412 +--- a/drivers/hwmon/pmbus/bpa-rs600.c
4413 ++++ b/drivers/hwmon/pmbus/bpa-rs600.c
4414 +@@ -12,15 +12,6 @@
4415 + #include <linux/pmbus.h>
4416 + #include "pmbus.h"
4417 +
4418 +-#define BPARS600_MFR_VIN_MIN 0xa0
4419 +-#define BPARS600_MFR_VIN_MAX 0xa1
4420 +-#define BPARS600_MFR_IIN_MAX 0xa2
4421 +-#define BPARS600_MFR_PIN_MAX 0xa3
4422 +-#define BPARS600_MFR_VOUT_MIN 0xa4
4423 +-#define BPARS600_MFR_VOUT_MAX 0xa5
4424 +-#define BPARS600_MFR_IOUT_MAX 0xa6
4425 +-#define BPARS600_MFR_POUT_MAX 0xa7
4426 +-
4427 + static int bpa_rs600_read_byte_data(struct i2c_client *client, int page, int reg)
4428 + {
4429 + int ret;
4430 +@@ -81,29 +72,13 @@ static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int pha
4431 +
4432 + switch (reg) {
4433 + case PMBUS_VIN_UV_WARN_LIMIT:
4434 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VIN_MIN);
4435 +- break;
4436 + case PMBUS_VIN_OV_WARN_LIMIT:
4437 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VIN_MAX);
4438 +- break;
4439 + case PMBUS_VOUT_UV_WARN_LIMIT:
4440 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VOUT_MIN);
4441 +- break;
4442 + case PMBUS_VOUT_OV_WARN_LIMIT:
4443 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VOUT_MAX);
4444 +- break;
4445 + case PMBUS_IIN_OC_WARN_LIMIT:
4446 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_IIN_MAX);
4447 +- break;
4448 + case PMBUS_IOUT_OC_WARN_LIMIT:
4449 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_IOUT_MAX);
4450 +- break;
4451 + case PMBUS_PIN_OP_WARN_LIMIT:
4452 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_PIN_MAX);
4453 +- break;
4454 + case PMBUS_POUT_OP_WARN_LIMIT:
4455 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_POUT_MAX);
4456 +- break;
4457 + case PMBUS_VIN_UV_FAULT_LIMIT:
4458 + case PMBUS_VIN_OV_FAULT_LIMIT:
4459 + case PMBUS_VOUT_UV_FAULT_LIMIT:
4460 +diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
4461 +index 803dad70e2a71..a2add128d0843 100644
4462 +--- a/drivers/i2c/busses/i2c-highlander.c
4463 ++++ b/drivers/i2c/busses/i2c-highlander.c
4464 +@@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
4465 + platform_set_drvdata(pdev, dev);
4466 +
4467 + dev->irq = platform_get_irq(pdev, 0);
4468 +- if (iic_force_poll)
4469 ++ if (dev->irq < 0 || iic_force_poll)
4470 + dev->irq = 0;
4471 +
4472 + if (dev->irq) {
4473 +diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
4474 +index aa00ba8bcb70f..61ae58f570475 100644
4475 +--- a/drivers/i2c/busses/i2c-hix5hd2.c
4476 ++++ b/drivers/i2c/busses/i2c-hix5hd2.c
4477 +@@ -413,7 +413,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
4478 + return PTR_ERR(priv->regs);
4479 +
4480 + irq = platform_get_irq(pdev, 0);
4481 +- if (irq <= 0)
4482 ++ if (irq < 0)
4483 + return irq;
4484 +
4485 + priv->clk = devm_clk_get(&pdev->dev, NULL);
4486 +diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
4487 +index cfecaf18ccbb7..4a6ff54d87fe8 100644
4488 +--- a/drivers/i2c/busses/i2c-iop3xx.c
4489 ++++ b/drivers/i2c/busses/i2c-iop3xx.c
4490 +@@ -469,16 +469,14 @@ iop3xx_i2c_probe(struct platform_device *pdev)
4491 +
4492 + irq = platform_get_irq(pdev, 0);
4493 + if (irq < 0) {
4494 +- ret = -ENXIO;
4495 ++ ret = irq;
4496 + goto unmap;
4497 + }
4498 + ret = request_irq(irq, iop3xx_i2c_irq_handler, 0,
4499 + pdev->name, adapter_data);
4500 +
4501 +- if (ret) {
4502 +- ret = -EIO;
4503 ++ if (ret)
4504 + goto unmap;
4505 +- }
4506 +
4507 + memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
4508 + new_adapter->owner = THIS_MODULE;
4509 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
4510 +index 4ca716e091495..477480d1de6bd 100644
4511 +--- a/drivers/i2c/busses/i2c-mt65xx.c
4512 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
4513 +@@ -1211,7 +1211,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
4514 + return PTR_ERR(i2c->pdmabase);
4515 +
4516 + irq = platform_get_irq(pdev, 0);
4517 +- if (irq <= 0)
4518 ++ if (irq < 0)
4519 + return irq;
4520 +
4521 + init_completion(&i2c->msg_complete);
4522 +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
4523 +index 4d82761e1585e..b49a1b170bb2f 100644
4524 +--- a/drivers/i2c/busses/i2c-s3c2410.c
4525 ++++ b/drivers/i2c/busses/i2c-s3c2410.c
4526 +@@ -1137,7 +1137,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
4527 + */
4528 + if (!(i2c->quirks & QUIRK_POLL)) {
4529 + i2c->irq = ret = platform_get_irq(pdev, 0);
4530 +- if (ret <= 0) {
4531 ++ if (ret < 0) {
4532 + dev_err(&pdev->dev, "cannot find IRQ\n");
4533 + clk_unprepare(i2c->clk);
4534 + return ret;
4535 +diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
4536 +index 31be1811d5e66..e4026c5416b15 100644
4537 +--- a/drivers/i2c/busses/i2c-synquacer.c
4538 ++++ b/drivers/i2c/busses/i2c-synquacer.c
4539 +@@ -578,7 +578,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
4540 +
4541 + i2c->irq = platform_get_irq(pdev, 0);
4542 + if (i2c->irq < 0)
4543 +- return -ENODEV;
4544 ++ return i2c->irq;
4545 +
4546 + ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr,
4547 + 0, dev_name(&pdev->dev), i2c);
4548 +diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
4549 +index f2241cedf5d3f..6d24dc3855229 100644
4550 +--- a/drivers/i2c/busses/i2c-xlp9xx.c
4551 ++++ b/drivers/i2c/busses/i2c-xlp9xx.c
4552 +@@ -517,7 +517,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
4553 + return PTR_ERR(priv->base);
4554 +
4555 + priv->irq = platform_get_irq(pdev, 0);
4556 +- if (priv->irq <= 0)
4557 ++ if (priv->irq < 0)
4558 + return priv->irq;
4559 + /* SMBAlert irq */
4560 + priv->alert_data.irq = platform_get_irq(pdev, 1);
4561 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
4562 +index 3f1c5a4f158bf..19713cdd7b789 100644
4563 +--- a/drivers/infiniband/hw/mlx5/mr.c
4564 ++++ b/drivers/infiniband/hw/mlx5/mr.c
4565 +@@ -1024,7 +1024,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
4566 +
4567 + if (size > MLX5_SPARE_UMR_CHUNK) {
4568 + size = MLX5_SPARE_UMR_CHUNK;
4569 +- *nents = get_order(size) / ent_size;
4570 ++ *nents = size / ent_size;
4571 + res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
4572 + get_order(size));
4573 + if (res)
4574 +diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
4575 +index b8c06bd8659e9..6fc145aacaf02 100644
4576 +--- a/drivers/irqchip/irq-apple-aic.c
4577 ++++ b/drivers/irqchip/irq-apple-aic.c
4578 +@@ -226,7 +226,7 @@ static void aic_irq_eoi(struct irq_data *d)
4579 + * Reading the interrupt reason automatically acknowledges and masks
4580 + * the IRQ, so we just unmask it here if needed.
4581 + */
4582 +- if (!irqd_irq_disabled(d) && !irqd_irq_masked(d))
4583 ++ if (!irqd_irq_masked(d))
4584 + aic_irq_unmask(d);
4585 + }
4586 +
4587 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
4588 +index e0f4debe64e13..3e61210da04be 100644
4589 +--- a/drivers/irqchip/irq-gic-v3.c
4590 ++++ b/drivers/irqchip/irq-gic-v3.c
4591 +@@ -100,6 +100,27 @@ EXPORT_SYMBOL(gic_pmr_sync);
4592 + DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
4593 + EXPORT_SYMBOL(gic_nonsecure_priorities);
4594 +
4595 ++/*
4596 ++ * When the Non-secure world has access to group 0 interrupts (as a
4597 ++ * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
4598 ++ * return the Distributor's view of the interrupt priority.
4599 ++ *
4600 ++ * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
4601 ++ * written by software is moved to the Non-secure range by the Distributor.
4602 ++ *
4603 ++ * If both are true (which is when gic_nonsecure_priorities gets enabled),
4604 ++ * we need to shift down the priority programmed by software to match it
4605 ++ * against the value returned by ICC_RPR_EL1.
4606 ++ */
4607 ++#define GICD_INT_RPR_PRI(priority) \
4608 ++ ({ \
4609 ++ u32 __priority = (priority); \
4610 ++ if (static_branch_unlikely(&gic_nonsecure_priorities)) \
4611 ++ __priority = 0x80 | (__priority >> 1); \
4612 ++ \
4613 ++ __priority; \
4614 ++ })
4615 ++
4616 + /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
4617 + static refcount_t *ppi_nmi_refs;
4618 +
4619 +@@ -687,7 +708,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
4620 + return;
4621 +
4622 + if (gic_supports_nmi() &&
4623 +- unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
4624 ++ unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
4625 + gic_handle_nmi(irqnr, regs);
4626 + return;
4627 + }
4628 +diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
4629 +index f790ca6d78aa4..a4eb8a2181c7f 100644
4630 +--- a/drivers/irqchip/irq-loongson-pch-pic.c
4631 ++++ b/drivers/irqchip/irq-loongson-pch-pic.c
4632 +@@ -92,18 +92,22 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
4633 + case IRQ_TYPE_EDGE_RISING:
4634 + pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
4635 + pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
4636 ++ irq_set_handler_locked(d, handle_edge_irq);
4637 + break;
4638 + case IRQ_TYPE_EDGE_FALLING:
4639 + pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
4640 + pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
4641 ++ irq_set_handler_locked(d, handle_edge_irq);
4642 + break;
4643 + case IRQ_TYPE_LEVEL_HIGH:
4644 + pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
4645 + pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
4646 ++ irq_set_handler_locked(d, handle_level_irq);
4647 + break;
4648 + case IRQ_TYPE_LEVEL_LOW:
4649 + pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
4650 + pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
4651 ++ irq_set_handler_locked(d, handle_level_irq);
4652 + break;
4653 + default:
4654 + ret = -EINVAL;
4655 +@@ -113,11 +117,24 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
4656 + return ret;
4657 + }
4658 +
4659 ++static void pch_pic_ack_irq(struct irq_data *d)
4660 ++{
4661 ++ unsigned int reg;
4662 ++ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
4663 ++
4664 ++ reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
4665 ++ if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
4666 ++ writel(BIT(PIC_REG_BIT(d->hwirq)),
4667 ++ priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
4668 ++ }
4669 ++ irq_chip_ack_parent(d);
4670 ++}
4671 ++
4672 + static struct irq_chip pch_pic_irq_chip = {
4673 + .name = "PCH PIC",
4674 + .irq_mask = pch_pic_mask_irq,
4675 + .irq_unmask = pch_pic_unmask_irq,
4676 +- .irq_ack = irq_chip_ack_parent,
4677 ++ .irq_ack = pch_pic_ack_irq,
4678 + .irq_set_affinity = irq_chip_set_affinity_parent,
4679 + .irq_set_type = pch_pic_set_type,
4680 + };
4681 +diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
4682 +index 7eb2f44f16be5..aa14f0ebe7a02 100644
4683 +--- a/drivers/leds/blink/leds-lgm-sso.c
4684 ++++ b/drivers/leds/blink/leds-lgm-sso.c
4685 +@@ -631,8 +631,10 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
4686 +
4687 + fwnode_for_each_child_node(fw_ssoled, fwnode_child) {
4688 + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
4689 +- if (!led)
4690 +- return -ENOMEM;
4691 ++ if (!led) {
4692 ++ ret = -ENOMEM;
4693 ++ goto __dt_err;
4694 ++ }
4695 +
4696 + INIT_LIST_HEAD(&led->list);
4697 + led->priv = priv;
4698 +@@ -642,7 +644,7 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
4699 + fwnode_child,
4700 + GPIOD_ASIS, NULL);
4701 + if (IS_ERR(led->gpiod)) {
4702 +- dev_err(dev, "led: get gpio fail!\n");
4703 ++ ret = dev_err_probe(dev, PTR_ERR(led->gpiod), "led: get gpio fail!\n");
4704 + goto __dt_err;
4705 + }
4706 +
4707 +@@ -662,8 +664,11 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
4708 + desc->panic_indicator = 1;
4709 +
4710 + ret = fwnode_property_read_u32(fwnode_child, "reg", &prop);
4711 +- if (ret != 0 || prop >= SSO_LED_MAX_NUM) {
4712 ++ if (ret)
4713 ++ goto __dt_err;
4714 ++ if (prop >= SSO_LED_MAX_NUM) {
4715 + dev_err(dev, "invalid LED pin:%u\n", prop);
4716 ++ ret = -EINVAL;
4717 + goto __dt_err;
4718 + }
4719 + desc->pin = prop;
4720 +@@ -699,21 +704,22 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
4721 + desc->brightness = LED_FULL;
4722 + }
4723 +
4724 +- if (sso_create_led(priv, led, fwnode_child))
4725 ++ ret = sso_create_led(priv, led, fwnode_child);
4726 ++ if (ret)
4727 + goto __dt_err;
4728 + }
4729 +- fwnode_handle_put(fw_ssoled);
4730 +
4731 + return 0;
4732 ++
4733 + __dt_err:
4734 +- fwnode_handle_put(fw_ssoled);
4735 ++ fwnode_handle_put(fwnode_child);
4736 + /* unregister leds */
4737 + list_for_each(p, &priv->led_list) {
4738 + led = list_entry(p, struct sso_led, list);
4739 + sso_led_shutdown(led);
4740 + }
4741 +
4742 +- return -EINVAL;
4743 ++ return ret;
4744 + }
4745 +
4746 + static int sso_led_dt_parse(struct sso_led_priv *priv)
4747 +@@ -731,6 +737,7 @@ static int sso_led_dt_parse(struct sso_led_priv *priv)
4748 + fw_ssoled = fwnode_get_named_child_node(fwnode, "ssoled");
4749 + if (fw_ssoled) {
4750 + ret = __sso_led_dt_parse(priv, fw_ssoled);
4751 ++ fwnode_handle_put(fw_ssoled);
4752 + if (ret)
4753 + return ret;
4754 + }
4755 +diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
4756 +index 590bfa180d104..44904fdee3cc0 100644
4757 +--- a/drivers/leds/flash/leds-rt8515.c
4758 ++++ b/drivers/leds/flash/leds-rt8515.c
4759 +@@ -343,8 +343,9 @@ static int rt8515_probe(struct platform_device *pdev)
4760 +
4761 + ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
4762 + if (ret) {
4763 +- dev_err(dev, "can't register LED %s\n", led->name);
4764 ++ fwnode_handle_put(child);
4765 + mutex_destroy(&rt->lock);
4766 ++ dev_err(dev, "can't register LED %s\n", led->name);
4767 + return ret;
4768 + }
4769 +
4770 +@@ -362,6 +363,7 @@ static int rt8515_probe(struct platform_device *pdev)
4771 + */
4772 + }
4773 +
4774 ++ fwnode_handle_put(child);
4775 + return 0;
4776 + }
4777 +
4778 +diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
4779 +index 3b55af9a8c585..22c092a4394ab 100644
4780 +--- a/drivers/leds/leds-is31fl32xx.c
4781 ++++ b/drivers/leds/leds-is31fl32xx.c
4782 +@@ -386,6 +386,7 @@ static int is31fl32xx_parse_dt(struct device *dev,
4783 + dev_err(dev,
4784 + "Node %pOF 'reg' conflicts with another LED\n",
4785 + child);
4786 ++ ret = -EINVAL;
4787 + goto err;
4788 + }
4789 +
4790 +diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
4791 +index 3bb52d3165d90..d0160fde0f94c 100644
4792 +--- a/drivers/leds/leds-lt3593.c
4793 ++++ b/drivers/leds/leds-lt3593.c
4794 +@@ -97,10 +97,9 @@ static int lt3593_led_probe(struct platform_device *pdev)
4795 + init_data.default_label = ":";
4796 +
4797 + ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
4798 +- if (ret < 0) {
4799 +- fwnode_handle_put(child);
4800 ++ fwnode_handle_put(child);
4801 ++ if (ret < 0)
4802 + return ret;
4803 +- }
4804 +
4805 + platform_set_drvdata(pdev, led_data);
4806 +
4807 +diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c
4808 +index f76621e88482d..c6b437e6369b8 100644
4809 +--- a/drivers/leds/trigger/ledtrig-audio.c
4810 ++++ b/drivers/leds/trigger/ledtrig-audio.c
4811 +@@ -6,10 +6,33 @@
4812 + #include <linux/kernel.h>
4813 + #include <linux/leds.h>
4814 + #include <linux/module.h>
4815 ++#include "../leds.h"
4816 +
4817 +-static struct led_trigger *ledtrig_audio[NUM_AUDIO_LEDS];
4818 + static enum led_brightness audio_state[NUM_AUDIO_LEDS];
4819 +
4820 ++static int ledtrig_audio_mute_activate(struct led_classdev *led_cdev)
4821 ++{
4822 ++ led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MUTE]);
4823 ++ return 0;
4824 ++}
4825 ++
4826 ++static int ledtrig_audio_micmute_activate(struct led_classdev *led_cdev)
4827 ++{
4828 ++ led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MICMUTE]);
4829 ++ return 0;
4830 ++}
4831 ++
4832 ++static struct led_trigger ledtrig_audio[NUM_AUDIO_LEDS] = {
4833 ++ [LED_AUDIO_MUTE] = {
4834 ++ .name = "audio-mute",
4835 ++ .activate = ledtrig_audio_mute_activate,
4836 ++ },
4837 ++ [LED_AUDIO_MICMUTE] = {
4838 ++ .name = "audio-micmute",
4839 ++ .activate = ledtrig_audio_micmute_activate,
4840 ++ },
4841 ++};
4842 ++
4843 + enum led_brightness ledtrig_audio_get(enum led_audio type)
4844 + {
4845 + return audio_state[type];
4846 +@@ -19,24 +42,22 @@ EXPORT_SYMBOL_GPL(ledtrig_audio_get);
4847 + void ledtrig_audio_set(enum led_audio type, enum led_brightness state)
4848 + {
4849 + audio_state[type] = state;
4850 +- led_trigger_event(ledtrig_audio[type], state);
4851 ++ led_trigger_event(&ledtrig_audio[type], state);
4852 + }
4853 + EXPORT_SYMBOL_GPL(ledtrig_audio_set);
4854 +
4855 + static int __init ledtrig_audio_init(void)
4856 + {
4857 +- led_trigger_register_simple("audio-mute",
4858 +- &ledtrig_audio[LED_AUDIO_MUTE]);
4859 +- led_trigger_register_simple("audio-micmute",
4860 +- &ledtrig_audio[LED_AUDIO_MICMUTE]);
4861 ++ led_trigger_register(&ledtrig_audio[LED_AUDIO_MUTE]);
4862 ++ led_trigger_register(&ledtrig_audio[LED_AUDIO_MICMUTE]);
4863 + return 0;
4864 + }
4865 + module_init(ledtrig_audio_init);
4866 +
4867 + static void __exit ledtrig_audio_exit(void)
4868 + {
4869 +- led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MUTE]);
4870 +- led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MICMUTE]);
4871 ++ led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MUTE]);
4872 ++ led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MICMUTE]);
4873 + }
4874 + module_exit(ledtrig_audio_exit);
4875 +
4876 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
4877 +index 185246a0d8551..d0f08e946453c 100644
4878 +--- a/drivers/md/bcache/super.c
4879 ++++ b/drivers/md/bcache/super.c
4880 +@@ -931,20 +931,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
4881 + n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
4882 + d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
4883 + if (!d->full_dirty_stripes)
4884 +- return -ENOMEM;
4885 ++ goto out_free_stripe_sectors_dirty;
4886 +
4887 + idx = ida_simple_get(&bcache_device_idx, 0,
4888 + BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
4889 + if (idx < 0)
4890 +- return idx;
4891 ++ goto out_free_full_dirty_stripes;
4892 +
4893 + if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
4894 + BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
4895 +- goto err;
4896 ++ goto out_ida_remove;
4897 +
4898 + d->disk = blk_alloc_disk(NUMA_NO_NODE);
4899 + if (!d->disk)
4900 +- goto err;
4901 ++ goto out_bioset_exit;
4902 +
4903 + set_capacity(d->disk, sectors);
4904 + snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
4905 +@@ -987,8 +987,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
4906 +
4907 + return 0;
4908 +
4909 +-err:
4910 ++out_bioset_exit:
4911 ++ bioset_exit(&d->bio_split);
4912 ++out_ida_remove:
4913 + ida_simple_remove(&bcache_device_idx, idx);
4914 ++out_free_full_dirty_stripes:
4915 ++ kvfree(d->full_dirty_stripes);
4916 ++out_free_stripe_sectors_dirty:
4917 ++ kvfree(d->stripe_sectors_dirty);
4918 + return -ENOMEM;
4919 +
4920 + }
4921 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
4922 +index 3c44c4bb40fc5..19598bd38939d 100644
4923 +--- a/drivers/md/raid1.c
4924 ++++ b/drivers/md/raid1.c
4925 +@@ -1329,6 +1329,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
4926 + struct raid1_plug_cb *plug = NULL;
4927 + int first_clone;
4928 + int max_sectors;
4929 ++ bool write_behind = false;
4930 +
4931 + if (mddev_is_clustered(mddev) &&
4932 + md_cluster_ops->area_resyncing(mddev, WRITE,
4933 +@@ -1381,6 +1382,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
4934 + max_sectors = r1_bio->sectors;
4935 + for (i = 0; i < disks; i++) {
4936 + struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4937 ++
4938 ++ /*
4939 ++ * The write-behind io is only attempted on drives marked as
4940 ++ * write-mostly, which means we could allocate write behind
4941 ++ * bio later.
4942 ++ */
4943 ++ if (rdev && test_bit(WriteMostly, &rdev->flags))
4944 ++ write_behind = true;
4945 ++
4946 + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
4947 + atomic_inc(&rdev->nr_pending);
4948 + blocked_rdev = rdev;
4949 +@@ -1454,6 +1464,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
4950 + goto retry_write;
4951 + }
4952 +
4953 ++ /*
4954 ++ * When using a bitmap, we may call alloc_behind_master_bio below.
4955 ++ * alloc_behind_master_bio allocates a copy of the data payload a page
4956 ++ * at a time and thus needs a new bio that can fit the whole payload
4957 ++ * this bio in page sized chunks.
4958 ++ */
4959 ++ if (write_behind && bitmap)
4960 ++ max_sectors = min_t(int, max_sectors,
4961 ++ BIO_MAX_VECS * (PAGE_SIZE >> 9));
4962 + if (max_sectors < bio_sectors(bio)) {
4963 + struct bio *split = bio_split(bio, max_sectors,
4964 + GFP_NOIO, &conf->bio_split);
4965 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
4966 +index 07119d7e0fdf9..aa2636582841e 100644
4967 +--- a/drivers/md/raid10.c
4968 ++++ b/drivers/md/raid10.c
4969 +@@ -1712,6 +1712,11 @@ retry_discard:
4970 + } else
4971 + r10_bio->master_bio = (struct bio *)first_r10bio;
4972 +
4973 ++ /*
4974 ++ * first select target devices under rcu_lock and
4975 ++ * inc refcount on their rdev. Record them by setting
4976 ++ * bios[x] to bio
4977 ++ */
4978 + rcu_read_lock();
4979 + for (disk = 0; disk < geo->raid_disks; disk++) {
4980 + struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
4981 +@@ -1743,9 +1748,6 @@ retry_discard:
4982 + for (disk = 0; disk < geo->raid_disks; disk++) {
4983 + sector_t dev_start, dev_end;
4984 + struct bio *mbio, *rbio = NULL;
4985 +- struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
4986 +- struct md_rdev *rrdev = rcu_dereference(
4987 +- conf->mirrors[disk].replacement);
4988 +
4989 + /*
4990 + * Now start to calculate the start and end address for each disk.
4991 +@@ -1775,9 +1777,12 @@ retry_discard:
4992 +
4993 + /*
4994 + * It only handles discard bio which size is >= stripe size, so
4995 +- * dev_end > dev_start all the time
4996 ++ * dev_end > dev_start all the time.
4997 ++ * It doesn't need to use rcu lock to get rdev here. We already
4998 ++ * add rdev->nr_pending in the first loop.
4999 + */
5000 + if (r10_bio->devs[disk].bio) {
5001 ++ struct md_rdev *rdev = conf->mirrors[disk].rdev;
5002 + mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
5003 + mbio->bi_end_io = raid10_end_discard_request;
5004 + mbio->bi_private = r10_bio;
5005 +@@ -1790,6 +1795,7 @@ retry_discard:
5006 + bio_endio(mbio);
5007 + }
5008 + if (r10_bio->devs[disk].repl_bio) {
5009 ++ struct md_rdev *rrdev = conf->mirrors[disk].replacement;
5010 + rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
5011 + rbio->bi_end_io = raid10_end_discard_request;
5012 + rbio->bi_private = r10_bio;
5013 +diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
5014 +index 91e6db847bb5a..3a191e257fad0 100644
5015 +--- a/drivers/media/i2c/tda1997x.c
5016 ++++ b/drivers/media/i2c/tda1997x.c
5017 +@@ -2233,6 +2233,7 @@ static int tda1997x_core_init(struct v4l2_subdev *sd)
5018 + /* get initial HDMI status */
5019 + state->hdmi_status = io_read(sd, REG_HDMI_FLAGS);
5020 +
5021 ++ io_write(sd, REG_EDID_ENABLE, EDID_ENABLE_A_EN | EDID_ENABLE_B_EN);
5022 + return 0;
5023 + }
5024 +
5025 +diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
5026 +index 925aa80a139b2..b66f1d174e9d7 100644
5027 +--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
5028 ++++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
5029 +@@ -255,6 +255,23 @@ static void isc_sama5d2_config_rlp(struct isc_device *isc)
5030 + struct regmap *regmap = isc->regmap;
5031 + u32 rlp_mode = isc->config.rlp_cfg_mode;
5032 +
5033 ++ /*
5034 ++ * In sama5d2, the YUV planar modes and the YUYV modes are treated
5035 ++ * in the same way in RLP register.
5036 ++ * Normally, YYCC mode should be Luma(n) - Color B(n) - Color R (n)
5037 ++ * and YCYC should be Luma(n + 1) - Color B (n) - Luma (n) - Color R (n)
5038 ++ * but in sama5d2, the YCYC mode does not exist, and YYCC must be
5039 ++ * selected for both planar and interleaved modes, as in fact
5040 ++ * both modes are supported.
5041 ++ *
5042 ++ * Thus, if the YCYC mode is selected, replace it with the
5043 ++ * sama5d2-compliant mode which is YYCC .
5044 ++ */
5045 ++ if ((rlp_mode & ISC_RLP_CFG_MODE_YCYC) == ISC_RLP_CFG_MODE_YCYC) {
5046 ++ rlp_mode &= ~ISC_RLP_CFG_MODE_MASK;
5047 ++ rlp_mode |= ISC_RLP_CFG_MODE_YYCC;
5048 ++ }
5049 ++
5050 + regmap_update_bits(regmap, ISC_RLP_CFG + isc->offsets.rlp,
5051 + ISC_RLP_CFG_MODE_MASK, rlp_mode);
5052 + }
5053 +diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
5054 +index 2f42808c43a4b..c484c008ab027 100644
5055 +--- a/drivers/media/platform/coda/coda-bit.c
5056 ++++ b/drivers/media/platform/coda/coda-bit.c
5057 +@@ -2053,17 +2053,25 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
5058 + u32 src_fourcc, dst_fourcc;
5059 + int ret;
5060 +
5061 ++ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
5062 ++ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
5063 ++ src_fourcc = q_data_src->fourcc;
5064 ++ dst_fourcc = q_data_dst->fourcc;
5065 ++
5066 + if (!ctx->initialized) {
5067 + ret = __coda_decoder_seq_init(ctx);
5068 + if (ret < 0)
5069 + return ret;
5070 ++ } else {
5071 ++ ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
5072 ++ CODA9_FRAME_TILED2LINEAR);
5073 ++ if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
5074 ++ ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
5075 ++ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
5076 ++ ctx->frame_mem_ctrl |= (0x3 << 9) |
5077 ++ ((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
5078 + }
5079 +
5080 +- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
5081 +- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
5082 +- src_fourcc = q_data_src->fourcc;
5083 +- dst_fourcc = q_data_dst->fourcc;
5084 +-
5085 + coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
5086 +
5087 + ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
5088 +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
5089 +index 53025c8c75312..20f59c59ff8a2 100644
5090 +--- a/drivers/media/platform/omap3isp/isp.c
5091 ++++ b/drivers/media/platform/omap3isp/isp.c
5092 +@@ -2037,8 +2037,10 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
5093 + mutex_lock(&isp->media_dev.graph_mutex);
5094 +
5095 + ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
5096 +- if (ret)
5097 ++ if (ret) {
5098 ++ mutex_unlock(&isp->media_dev.graph_mutex);
5099 + return ret;
5100 ++ }
5101 +
5102 + list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
5103 + if (sd->notifier != &isp->notifier)
5104 +diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
5105 +index 1fe6d463dc993..8012f5c7bf344 100644
5106 +--- a/drivers/media/platform/qcom/venus/helpers.c
5107 ++++ b/drivers/media/platform/qcom/venus/helpers.c
5108 +@@ -1137,6 +1137,9 @@ int venus_helper_set_format_constraints(struct venus_inst *inst)
5109 + if (!IS_V6(inst->core))
5110 + return 0;
5111 +
5112 ++ if (inst->opb_fmt == HFI_COLOR_FORMAT_NV12_UBWC)
5113 ++ return 0;
5114 ++
5115 + pconstraint.buffer_type = HFI_BUFFER_OUTPUT2;
5116 + pconstraint.num_planes = 2;
5117 + pconstraint.plane_format[0].stride_multiples = 128;
5118 +diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
5119 +index d9fde66f6fa8c..9a2bdb002edcc 100644
5120 +--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
5121 ++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
5122 +@@ -261,7 +261,7 @@ sys_get_prop_image_version(struct device *dev,
5123 +
5124 + smem_tbl_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
5125 + SMEM_IMG_VER_TBL, &smem_blk_sz);
5126 +- if (smem_tbl_ptr && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
5127 ++ if (!IS_ERR(smem_tbl_ptr) && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
5128 + memcpy(smem_tbl_ptr + SMEM_IMG_OFFSET_VENUS,
5129 + img_ver, VER_STR_SZ);
5130 + }
5131 +diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
5132 +index 8dd49d4f124cb..1d62e38065d62 100644
5133 +--- a/drivers/media/platform/qcom/venus/venc.c
5134 ++++ b/drivers/media/platform/qcom/venus/venc.c
5135 +@@ -183,6 +183,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
5136 + else
5137 + return NULL;
5138 + fmt = find_format(inst, pixmp->pixelformat, f->type);
5139 ++ if (!fmt)
5140 ++ return NULL;
5141 + }
5142 +
5143 + pixmp->width = clamp(pixmp->width, frame_width_min(inst),
5144 +diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
5145 +index cca15a10c0b34..0d141155f0e3e 100644
5146 +--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
5147 ++++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
5148 +@@ -253,8 +253,8 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
5149 + int ret;
5150 +
5151 + sd_state = v4l2_subdev_alloc_state(sd);
5152 +- if (sd_state == NULL)
5153 +- return -ENOMEM;
5154 ++ if (IS_ERR(sd_state))
5155 ++ return PTR_ERR(sd_state);
5156 +
5157 + if (!rvin_format_from_pixel(vin, pix->pixelformat))
5158 + pix->pixelformat = RVIN_DEFAULT_FORMAT;
5159 +diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
5160 +index bf3fd71ec3aff..6759091b15e09 100644
5161 +--- a/drivers/media/platform/rockchip/rga/rga.c
5162 ++++ b/drivers/media/platform/rockchip/rga/rga.c
5163 +@@ -863,12 +863,12 @@ static int rga_probe(struct platform_device *pdev)
5164 + if (IS_ERR(rga->m2m_dev)) {
5165 + v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
5166 + ret = PTR_ERR(rga->m2m_dev);
5167 +- goto unreg_video_dev;
5168 ++ goto rel_vdev;
5169 + }
5170 +
5171 + ret = pm_runtime_resume_and_get(rga->dev);
5172 + if (ret < 0)
5173 +- goto unreg_video_dev;
5174 ++ goto rel_vdev;
5175 +
5176 + rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
5177 + rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
5178 +@@ -882,11 +882,23 @@ static int rga_probe(struct platform_device *pdev)
5179 + rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
5180 + &rga->cmdbuf_phy, GFP_KERNEL,
5181 + DMA_ATTR_WRITE_COMBINE);
5182 ++ if (!rga->cmdbuf_virt) {
5183 ++ ret = -ENOMEM;
5184 ++ goto rel_vdev;
5185 ++ }
5186 +
5187 + rga->src_mmu_pages =
5188 + (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
5189 ++ if (!rga->src_mmu_pages) {
5190 ++ ret = -ENOMEM;
5191 ++ goto free_dma;
5192 ++ }
5193 + rga->dst_mmu_pages =
5194 + (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
5195 ++ if (rga->dst_mmu_pages) {
5196 ++ ret = -ENOMEM;
5197 ++ goto free_src_pages;
5198 ++ }
5199 +
5200 + def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
5201 + def_frame.size = def_frame.stride * def_frame.height;
5202 +@@ -894,7 +906,7 @@ static int rga_probe(struct platform_device *pdev)
5203 + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
5204 + if (ret) {
5205 + v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
5206 +- goto rel_vdev;
5207 ++ goto free_dst_pages;
5208 + }
5209 +
5210 + v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
5211 +@@ -902,10 +914,15 @@ static int rga_probe(struct platform_device *pdev)
5212 +
5213 + return 0;
5214 +
5215 ++free_dst_pages:
5216 ++ free_pages((unsigned long)rga->dst_mmu_pages, 3);
5217 ++free_src_pages:
5218 ++ free_pages((unsigned long)rga->src_mmu_pages, 3);
5219 ++free_dma:
5220 ++ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
5221 ++ rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
5222 + rel_vdev:
5223 + video_device_release(vfd);
5224 +-unreg_video_dev:
5225 +- video_unregister_device(rga->vfd);
5226 + unreg_v4l2_dev:
5227 + v4l2_device_unregister(&rga->v4l2_dev);
5228 + err_put_clk:
5229 +diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
5230 +index 6f51e5c755432..823c15facd1b4 100644
5231 +--- a/drivers/media/platform/vsp1/vsp1_entity.c
5232 ++++ b/drivers/media/platform/vsp1/vsp1_entity.c
5233 +@@ -676,9 +676,9 @@ int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
5234 + * rectangles.
5235 + */
5236 + entity->config = v4l2_subdev_alloc_state(&entity->subdev);
5237 +- if (entity->config == NULL) {
5238 ++ if (IS_ERR(entity->config)) {
5239 + media_entity_cleanup(&entity->subdev.entity);
5240 +- return -ENOMEM;
5241 ++ return PTR_ERR(entity->config);
5242 + }
5243 +
5244 + return 0;
5245 +diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
5246 +index e5094fff04c5a..b91a1e845b972 100644
5247 +--- a/drivers/media/spi/cxd2880-spi.c
5248 ++++ b/drivers/media/spi/cxd2880-spi.c
5249 +@@ -524,13 +524,13 @@ cxd2880_spi_probe(struct spi_device *spi)
5250 + if (IS_ERR(dvb_spi->vcc_supply)) {
5251 + if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) {
5252 + ret = -EPROBE_DEFER;
5253 +- goto fail_adapter;
5254 ++ goto fail_regulator;
5255 + }
5256 + dvb_spi->vcc_supply = NULL;
5257 + } else {
5258 + ret = regulator_enable(dvb_spi->vcc_supply);
5259 + if (ret)
5260 +- goto fail_adapter;
5261 ++ goto fail_regulator;
5262 + }
5263 +
5264 + dvb_spi->spi = spi;
5265 +@@ -618,6 +618,9 @@ fail_frontend:
5266 + fail_attach:
5267 + dvb_unregister_adapter(&dvb_spi->adapter);
5268 + fail_adapter:
5269 ++ if (!dvb_spi->vcc_supply)
5270 ++ regulator_disable(dvb_spi->vcc_supply);
5271 ++fail_regulator:
5272 + kfree(dvb_spi);
5273 + return ret;
5274 + }
5275 +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
5276 +index 2e07106f46803..bc4b2abdde1a4 100644
5277 +--- a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
5278 ++++ b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
5279 +@@ -17,7 +17,8 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
5280 +
5281 + if (d->props.i2c_algo == NULL) {
5282 + err("no i2c algorithm specified");
5283 +- return -EINVAL;
5284 ++ ret = -EINVAL;
5285 ++ goto err;
5286 + }
5287 +
5288 + strscpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name));
5289 +@@ -27,11 +28,15 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
5290 +
5291 + i2c_set_adapdata(&d->i2c_adap, d);
5292 +
5293 +- if ((ret = i2c_add_adapter(&d->i2c_adap)) < 0)
5294 ++ ret = i2c_add_adapter(&d->i2c_adap);
5295 ++ if (ret < 0) {
5296 + err("could not add i2c adapter");
5297 ++ goto err;
5298 ++ }
5299 +
5300 + d->state |= DVB_USB_STATE_I2C;
5301 +
5302 ++err:
5303 + return ret;
5304 + }
5305 +
5306 +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
5307 +index 28e1fd64dd3c2..61439c8f33cab 100644
5308 +--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
5309 ++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
5310 +@@ -194,8 +194,8 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
5311 +
5312 + err_adapter_init:
5313 + dvb_usb_adapter_exit(d);
5314 +-err_i2c_init:
5315 + dvb_usb_i2c_exit(d);
5316 ++err_i2c_init:
5317 + if (d->priv && d->props.priv_destroy)
5318 + d->props.priv_destroy(d);
5319 + err_priv_init:
5320 +diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
5321 +index e7b290552b663..9c0eb0d40822e 100644
5322 +--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
5323 ++++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
5324 +@@ -130,7 +130,7 @@ ret:
5325 +
5326 + static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
5327 + {
5328 +- int i;
5329 ++ int i, ret;
5330 + u8 b;
5331 +
5332 + mac[0] = 0x00;
5333 +@@ -139,7 +139,9 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
5334 +
5335 + /* this is a complete guess, but works for my box */
5336 + for (i = 136; i < 139; i++) {
5337 +- dibusb_read_eeprom_byte(d,i, &b);
5338 ++ ret = dibusb_read_eeprom_byte(d, i, &b);
5339 ++ if (ret)
5340 ++ return ret;
5341 +
5342 + mac[5 - (i - 136)] = b;
5343 + }
5344 +diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
5345 +index bf54747e2e01a..a1d9e4801a2ba 100644
5346 +--- a/drivers/media/usb/dvb-usb/vp702x.c
5347 ++++ b/drivers/media/usb/dvb-usb/vp702x.c
5348 +@@ -291,16 +291,22 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
5349 + static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6])
5350 + {
5351 + u8 i, *buf;
5352 ++ int ret;
5353 + struct vp702x_device_state *st = d->priv;
5354 +
5355 + mutex_lock(&st->buf_mutex);
5356 + buf = st->buf;
5357 +- for (i = 6; i < 12; i++)
5358 +- vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1);
5359 ++ for (i = 6; i < 12; i++) {
5360 ++ ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1,
5361 ++ &buf[i - 6], 1);
5362 ++ if (ret < 0)
5363 ++ goto err;
5364 ++ }
5365 +
5366 + memcpy(mac, buf, 6);
5367 ++err:
5368 + mutex_unlock(&st->buf_mutex);
5369 +- return 0;
5370 ++ return ret;
5371 + }
5372 +
5373 + static int vp702x_frontend_attach(struct dvb_usb_adapter *adap)
5374 +diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
5375 +index 59529cbf9cd0b..0b6d77c3bec86 100644
5376 +--- a/drivers/media/usb/em28xx/em28xx-input.c
5377 ++++ b/drivers/media/usb/em28xx/em28xx-input.c
5378 +@@ -842,7 +842,6 @@ error:
5379 + kfree(ir);
5380 + ref_put:
5381 + em28xx_shutdown_buttons(dev);
5382 +- kref_put(&dev->ref, em28xx_free_device);
5383 + return err;
5384 + }
5385 +
5386 +diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
5387 +index f1767be9d8685..6650eab913d81 100644
5388 +--- a/drivers/media/usb/go7007/go7007-driver.c
5389 ++++ b/drivers/media/usb/go7007/go7007-driver.c
5390 +@@ -691,49 +691,23 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board,
5391 + struct device *dev)
5392 + {
5393 + struct go7007 *go;
5394 +- int i;
5395 +
5396 + go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
5397 + if (go == NULL)
5398 + return NULL;
5399 + go->dev = dev;
5400 + go->board_info = board;
5401 +- go->board_id = 0;
5402 + go->tuner_type = -1;
5403 +- go->channel_number = 0;
5404 +- go->name[0] = 0;
5405 + mutex_init(&go->hw_lock);
5406 + init_waitqueue_head(&go->frame_waitq);
5407 + spin_lock_init(&go->spinlock);
5408 + go->status = STATUS_INIT;
5409 +- memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter));
5410 +- go->i2c_adapter_online = 0;
5411 +- go->interrupt_available = 0;
5412 + init_waitqueue_head(&go->interrupt_waitq);
5413 +- go->input = 0;
5414 + go7007_update_board(go);
5415 +- go->encoder_h_halve = 0;
5416 +- go->encoder_v_halve = 0;
5417 +- go->encoder_subsample = 0;
5418 + go->format = V4L2_PIX_FMT_MJPEG;
5419 + go->bitrate = 1500000;
5420 + go->fps_scale = 1;
5421 +- go->pali = 0;
5422 + go->aspect_ratio = GO7007_RATIO_1_1;
5423 +- go->gop_size = 0;
5424 +- go->ipb = 0;
5425 +- go->closed_gop = 0;
5426 +- go->repeat_seqhead = 0;
5427 +- go->seq_header_enable = 0;
5428 +- go->gop_header_enable = 0;
5429 +- go->dvd_mode = 0;
5430 +- go->interlace_coding = 0;
5431 +- for (i = 0; i < 4; ++i)
5432 +- go->modet[i].enable = 0;
5433 +- for (i = 0; i < 1624; ++i)
5434 +- go->modet_map[i] = 0;
5435 +- go->audio_deliver = NULL;
5436 +- go->audio_enabled = 0;
5437 +
5438 + return go;
5439 + }
5440 +diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
5441 +index dbf0455d5d50d..eeb85981e02b6 100644
5442 +--- a/drivers/media/usb/go7007/go7007-usb.c
5443 ++++ b/drivers/media/usb/go7007/go7007-usb.c
5444 +@@ -1134,7 +1134,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
5445 +
5446 + ep = usb->usbdev->ep_in[4];
5447 + if (!ep)
5448 +- return -ENODEV;
5449 ++ goto allocfail;
5450 +
5451 + /* Allocate the URB and buffer for receiving incoming interrupts */
5452 + usb->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
5453 +diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
5454 +index 9dda87c6b54a9..016cb0b150fc7 100644
5455 +--- a/drivers/misc/lkdtm/core.c
5456 ++++ b/drivers/misc/lkdtm/core.c
5457 +@@ -82,7 +82,7 @@ static struct crashpoint crashpoints[] = {
5458 + CRASHPOINT("FS_DEVRW", "ll_rw_block"),
5459 + CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
5460 + CRASHPOINT("TIMERADD", "hrtimer_start"),
5461 +- CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
5462 ++ CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"),
5463 + CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
5464 + #endif
5465 + };
5466 +diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
5467 +index 02b807c788c9f..bb7aa63685388 100644
5468 +--- a/drivers/misc/pvpanic/pvpanic.c
5469 ++++ b/drivers/misc/pvpanic/pvpanic.c
5470 +@@ -85,6 +85,8 @@ int devm_pvpanic_probe(struct device *dev, struct pvpanic_instance *pi)
5471 + list_add(&pi->list, &pvpanic_list);
5472 + spin_unlock(&pvpanic_lock);
5473 +
5474 ++ dev_set_drvdata(dev, pi);
5475 ++
5476 + return devm_add_action_or_reset(dev, pvpanic_remove, pi);
5477 + }
5478 + EXPORT_SYMBOL_GPL(devm_pvpanic_probe);
5479 +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
5480 +index c3229d8c7041c..33cb70aa02aa8 100644
5481 +--- a/drivers/mmc/host/dw_mmc.c
5482 ++++ b/drivers/mmc/host/dw_mmc.c
5483 +@@ -782,6 +782,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
5484 + int ret = 0;
5485 +
5486 + /* Set external dma config: burst size, burst width */
5487 ++ memset(&cfg, 0, sizeof(cfg));
5488 + cfg.dst_addr = host->phy_regs + fifo_offset;
5489 + cfg.src_addr = cfg.dst_addr;
5490 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
5491 +diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
5492 +index bde2988875797..6c9d38132f74c 100644
5493 +--- a/drivers/mmc/host/moxart-mmc.c
5494 ++++ b/drivers/mmc/host/moxart-mmc.c
5495 +@@ -628,6 +628,7 @@ static int moxart_probe(struct platform_device *pdev)
5496 + host->dma_chan_tx, host->dma_chan_rx);
5497 + host->have_dma = true;
5498 +
5499 ++ memset(&cfg, 0, sizeof(cfg));
5500 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
5501 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
5502 +
5503 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
5504 +index aba6e10b86054..fff6c39a343e9 100644
5505 +--- a/drivers/mmc/host/sdhci.c
5506 ++++ b/drivers/mmc/host/sdhci.c
5507 +@@ -1222,6 +1222,7 @@ static int sdhci_external_dma_setup(struct sdhci_host *host,
5508 + if (!host->mapbase)
5509 + return -EINVAL;
5510 +
5511 ++ memset(&cfg, 0, sizeof(cfg));
5512 + cfg.src_addr = host->mapbase + SDHCI_BUFFER;
5513 + cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
5514 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
5515 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
5516 +index b23e3488695ba..bd1417a66cbf2 100644
5517 +--- a/drivers/net/dsa/b53/b53_common.c
5518 ++++ b/drivers/net/dsa/b53/b53_common.c
5519 +@@ -2016,15 +2016,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
5520 + }
5521 + EXPORT_SYMBOL(b53_br_flags);
5522 +
5523 +-int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
5524 +- struct netlink_ext_ack *extack)
5525 +-{
5526 +- b53_port_set_mcast_flood(ds->priv, port, mrouter);
5527 +-
5528 +- return 0;
5529 +-}
5530 +-EXPORT_SYMBOL(b53_set_mrouter);
5531 +-
5532 + static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
5533 + {
5534 + /* Broadcom switches will accept enabling Broadcom tags on the
5535 +@@ -2268,7 +2259,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
5536 + .port_bridge_leave = b53_br_leave,
5537 + .port_pre_bridge_flags = b53_br_flags_pre,
5538 + .port_bridge_flags = b53_br_flags,
5539 +- .port_set_mrouter = b53_set_mrouter,
5540 + .port_stp_state_set = b53_br_set_stp_state,
5541 + .port_fast_age = b53_br_fast_age,
5542 + .port_vlan_filtering = b53_vlan_filtering,
5543 +diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
5544 +index 82700a5714c10..9bf8319342b0b 100644
5545 +--- a/drivers/net/dsa/b53/b53_priv.h
5546 ++++ b/drivers/net/dsa/b53/b53_priv.h
5547 +@@ -328,8 +328,6 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
5548 + int b53_br_flags(struct dsa_switch *ds, int port,
5549 + struct switchdev_brport_flags flags,
5550 + struct netlink_ext_ack *extack);
5551 +-int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
5552 +- struct netlink_ext_ack *extack);
5553 + int b53_setup_devlink_resources(struct dsa_switch *ds);
5554 + void b53_port_event(struct dsa_switch *ds, int port);
5555 + void b53_phylink_validate(struct dsa_switch *ds, int port,
5556 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
5557 +index 3b018fcf44124..6ce9ec1283e05 100644
5558 +--- a/drivers/net/dsa/bcm_sf2.c
5559 ++++ b/drivers/net/dsa/bcm_sf2.c
5560 +@@ -1199,7 +1199,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
5561 + .port_pre_bridge_flags = b53_br_flags_pre,
5562 + .port_bridge_flags = b53_br_flags,
5563 + .port_stp_state_set = b53_br_set_stp_state,
5564 +- .port_set_mrouter = b53_set_mrouter,
5565 + .port_fast_age = b53_br_fast_age,
5566 + .port_vlan_filtering = b53_vlan_filtering,
5567 + .port_vlan_add = b53_vlan_add,
5568 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
5569 +index 05bc46634b369..0cea1572f8260 100644
5570 +--- a/drivers/net/dsa/mt7530.c
5571 ++++ b/drivers/net/dsa/mt7530.c
5572 +@@ -1185,18 +1185,6 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
5573 + return 0;
5574 + }
5575 +
5576 +-static int
5577 +-mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
5578 +- struct netlink_ext_ack *extack)
5579 +-{
5580 +- struct mt7530_priv *priv = ds->priv;
5581 +-
5582 +- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
5583 +- mrouter ? UNM_FFP(BIT(port)) : 0);
5584 +-
5585 +- return 0;
5586 +-}
5587 +-
5588 + static int
5589 + mt7530_port_bridge_join(struct dsa_switch *ds, int port,
5590 + struct net_device *bridge)
5591 +@@ -3058,7 +3046,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
5592 + .port_stp_state_set = mt7530_stp_state_set,
5593 + .port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
5594 + .port_bridge_flags = mt7530_port_bridge_flags,
5595 +- .port_set_mrouter = mt7530_port_set_mrouter,
5596 + .port_bridge_join = mt7530_port_bridge_join,
5597 + .port_bridge_leave = mt7530_port_bridge_leave,
5598 + .port_fdb_add = mt7530_port_fdb_add,
5599 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
5600 +index 272b0535d9461..111a6d5985da6 100644
5601 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
5602 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
5603 +@@ -5781,23 +5781,6 @@ out:
5604 + return err;
5605 + }
5606 +
5607 +-static int mv88e6xxx_port_set_mrouter(struct dsa_switch *ds, int port,
5608 +- bool mrouter,
5609 +- struct netlink_ext_ack *extack)
5610 +-{
5611 +- struct mv88e6xxx_chip *chip = ds->priv;
5612 +- int err;
5613 +-
5614 +- if (!chip->info->ops->port_set_mcast_flood)
5615 +- return -EOPNOTSUPP;
5616 +-
5617 +- mv88e6xxx_reg_lock(chip);
5618 +- err = chip->info->ops->port_set_mcast_flood(chip, port, mrouter);
5619 +- mv88e6xxx_reg_unlock(chip);
5620 +-
5621 +- return err;
5622 +-}
5623 +-
5624 + static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
5625 + struct net_device *lag,
5626 + struct netdev_lag_upper_info *info)
5627 +@@ -6099,7 +6082,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
5628 + .port_bridge_leave = mv88e6xxx_port_bridge_leave,
5629 + .port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
5630 + .port_bridge_flags = mv88e6xxx_port_bridge_flags,
5631 +- .port_set_mrouter = mv88e6xxx_port_set_mrouter,
5632 + .port_stp_state_set = mv88e6xxx_port_stp_state_set,
5633 + .port_fast_age = mv88e6xxx_port_fast_age,
5634 + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
5635 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
5636 +index 59253846e8858..f26d037356191 100644
5637 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
5638 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
5639 +@@ -417,6 +417,9 @@ static int atl_resume_common(struct device *dev, bool deep)
5640 + pci_restore_state(pdev);
5641 +
5642 + if (deep) {
5643 ++ /* Reinitialize Nic/Vecs objects */
5644 ++ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
5645 ++
5646 + ret = aq_nic_init(nic);
5647 + if (ret)
5648 + goto err_exit;
5649 +diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
5650 +index 5bb56b4545415..f089d33dd48e0 100644
5651 +--- a/drivers/net/ethernet/google/gve/gve_adminq.c
5652 ++++ b/drivers/net/ethernet/google/gve/gve_adminq.c
5653 +@@ -322,7 +322,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
5654 + tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
5655 +
5656 + // Check if next command will overflow the buffer.
5657 +- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
5658 ++ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
5659 ++ (tail & priv->adminq_mask)) {
5660 + int err;
5661 +
5662 + // Flush existing commands to make room.
5663 +@@ -332,7 +333,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
5664 +
5665 + // Retry.
5666 + tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
5667 +- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
5668 ++ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
5669 ++ (tail & priv->adminq_mask)) {
5670 + // This should never happen. We just flushed the
5671 + // command queue so there should be enough space.
5672 + return -ENOMEM;
5673 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5674 +index eff0a30790dd7..472f56b360b8c 100644
5675 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5676 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
5677 +@@ -1160,12 +1160,12 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
5678 + }
5679 +
5680 + /**
5681 +- * i40e_getnum_vf_vsi_vlan_filters
5682 ++ * __i40e_getnum_vf_vsi_vlan_filters
5683 + * @vsi: pointer to the vsi
5684 + *
5685 + * called to get the number of VLANs offloaded on this VF
5686 + **/
5687 +-static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
5688 ++static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
5689 + {
5690 + struct i40e_mac_filter *f;
5691 + u16 num_vlans = 0, bkt;
5692 +@@ -1178,6 +1178,23 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
5693 + return num_vlans;
5694 + }
5695 +
5696 ++/**
5697 ++ * i40e_getnum_vf_vsi_vlan_filters
5698 ++ * @vsi: pointer to the vsi
5699 ++ *
5700 ++ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
5701 ++ **/
5702 ++static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
5703 ++{
5704 ++ int num_vlans;
5705 ++
5706 ++ spin_lock_bh(&vsi->mac_filter_hash_lock);
5707 ++ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
5708 ++ spin_unlock_bh(&vsi->mac_filter_hash_lock);
5709 ++
5710 ++ return num_vlans;
5711 ++}
5712 ++
5713 + /**
5714 + * i40e_get_vlan_list_sync
5715 + * @vsi: pointer to the VSI
5716 +@@ -1195,7 +1212,7 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
5717 + int bkt;
5718 +
5719 + spin_lock_bh(&vsi->mac_filter_hash_lock);
5720 +- *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
5721 ++ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
5722 + *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
5723 + if (!(*vlan_list))
5724 + goto err;
5725 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
5726 +index fe2ded775f259..a8bd512d5b450 100644
5727 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
5728 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
5729 +@@ -5122,6 +5122,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
5730 + struct ice_hw *hw = &pf->hw;
5731 + struct sockaddr *addr = pi;
5732 + enum ice_status status;
5733 ++ u8 old_mac[ETH_ALEN];
5734 + u8 flags = 0;
5735 + int err = 0;
5736 + u8 *mac;
5737 +@@ -5144,8 +5145,13 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
5738 + }
5739 +
5740 + netif_addr_lock_bh(netdev);
5741 ++ ether_addr_copy(old_mac, netdev->dev_addr);
5742 ++ /* change the netdev's MAC address */
5743 ++ memcpy(netdev->dev_addr, mac, netdev->addr_len);
5744 ++ netif_addr_unlock_bh(netdev);
5745 ++
5746 + /* Clean up old MAC filter. Not an error if old filter doesn't exist */
5747 +- status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
5748 ++ status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
5749 + if (status && status != ICE_ERR_DOES_NOT_EXIST) {
5750 + err = -EADDRNOTAVAIL;
5751 + goto err_update_filters;
5752 +@@ -5168,13 +5174,12 @@ err_update_filters:
5753 + if (err) {
5754 + netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
5755 + mac);
5756 ++ netif_addr_lock_bh(netdev);
5757 ++ ether_addr_copy(netdev->dev_addr, old_mac);
5758 + netif_addr_unlock_bh(netdev);
5759 + return err;
5760 + }
5761 +
5762 +- /* change the netdev's MAC address */
5763 +- memcpy(netdev->dev_addr, mac, netdev->addr_len);
5764 +- netif_addr_unlock_bh(netdev);
5765 + netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
5766 + netdev->dev_addr);
5767 +
5768 +diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
5769 +index 9e3ddb9b8b516..234bc68e79f96 100644
5770 +--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
5771 ++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
5772 +@@ -22,7 +22,7 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
5773 + return;
5774 +
5775 + /* Set the timestamp enable flag for all the Tx rings */
5776 +- ice_for_each_rxq(vsi, i) {
5777 ++ ice_for_each_txq(vsi, i) {
5778 + if (!vsi->tx_rings[i])
5779 + continue;
5780 + vsi->tx_rings[i]->ptp_tx = on;
5781 +@@ -688,6 +688,41 @@ err:
5782 + return -EFAULT;
5783 + }
5784 +
5785 ++/**
5786 ++ * ice_ptp_disable_all_clkout - Disable all currently configured outputs
5787 ++ * @pf: pointer to the PF structure
5788 ++ *
5789 ++ * Disable all currently configured clock outputs. This is necessary before
5790 ++ * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to
5791 ++ * re-enable the clocks again.
5792 ++ */
5793 ++static void ice_ptp_disable_all_clkout(struct ice_pf *pf)
5794 ++{
5795 ++ uint i;
5796 ++
5797 ++ for (i = 0; i < pf->ptp.info.n_per_out; i++)
5798 ++ if (pf->ptp.perout_channels[i].ena)
5799 ++ ice_ptp_cfg_clkout(pf, i, NULL, false);
5800 ++}
5801 ++
5802 ++/**
5803 ++ * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs
5804 ++ * @pf: pointer to the PF structure
5805 ++ *
5806 ++ * Enable all currently configured clock outputs. Use this after
5807 ++ * ice_ptp_disable_all_clkout to reconfigure the output signals according to
5808 ++ * their configuration.
5809 ++ */
5810 ++static void ice_ptp_enable_all_clkout(struct ice_pf *pf)
5811 ++{
5812 ++ uint i;
5813 ++
5814 ++ for (i = 0; i < pf->ptp.info.n_per_out; i++)
5815 ++ if (pf->ptp.perout_channels[i].ena)
5816 ++ ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i],
5817 ++ false);
5818 ++}
5819 ++
5820 + /**
5821 + * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC
5822 + * @info: the driver's PTP info structure
5823 +@@ -783,12 +818,17 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
5824 + goto exit;
5825 + }
5826 +
5827 ++ /* Disable periodic outputs */
5828 ++ ice_ptp_disable_all_clkout(pf);
5829 ++
5830 + err = ice_ptp_write_init(pf, &ts64);
5831 + ice_ptp_unlock(hw);
5832 +
5833 + if (!err)
5834 + ice_ptp_update_cached_phctime(pf);
5835 +
5836 ++ /* Reenable periodic outputs */
5837 ++ ice_ptp_enable_all_clkout(pf);
5838 + exit:
5839 + if (err) {
5840 + dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
5841 +@@ -842,8 +882,14 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
5842 + return -EBUSY;
5843 + }
5844 +
5845 ++ /* Disable periodic outputs */
5846 ++ ice_ptp_disable_all_clkout(pf);
5847 ++
5848 + err = ice_ptp_write_adj(pf, delta);
5849 +
5850 ++ /* Reenable periodic outputs */
5851 ++ ice_ptp_enable_all_clkout(pf);
5852 ++
5853 + ice_ptp_unlock(hw);
5854 +
5855 + if (err) {
5856 +@@ -1278,6 +1324,8 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
5857 + {
5858 + u8 idx;
5859 +
5860 ++ spin_lock(&tx->lock);
5861 ++
5862 + for (idx = 0; idx < tx->len; idx++) {
5863 + u8 phy_idx = idx + tx->quad_offset;
5864 +
5865 +@@ -1290,6 +1338,8 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
5866 + tx->tstamps[idx].skb = NULL;
5867 + }
5868 + }
5869 ++
5870 ++ spin_unlock(&tx->lock);
5871 + }
5872 +
5873 + /**
5874 +@@ -1550,6 +1600,9 @@ void ice_ptp_release(struct ice_pf *pf)
5875 + if (!pf->ptp.clock)
5876 + return;
5877 +
5878 ++ /* Disable periodic outputs */
5879 ++ ice_ptp_disable_all_clkout(pf);
5880 ++
5881 + ice_clear_ptp_clock_index(pf);
5882 + ptp_clock_unregister(pf->ptp.clock);
5883 + pf->ptp.clock = NULL;
5884 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
5885 +index 47f5ed006a932..e0b43aad203c1 100644
5886 +--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
5887 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
5888 +@@ -195,8 +195,6 @@ enum nix_scheduler {
5889 + #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
5890 + #define NIX_CHAN_SDP_CH_START (0x700ull)
5891 +
5892 +-#define SDP_CHANNELS 256
5893 +-
5894 + /* The mask is to extract lower 10-bits of channel number
5895 + * which CPT will pass to X2P.
5896 + */
5897 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
5898 +index 8d48b64485c69..dbe9149a215e8 100644
5899 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
5900 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
5901 +@@ -82,10 +82,10 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
5902 + dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
5903 + return -EIO;
5904 + }
5905 +- /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
5906 ++ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18]
5907 + * PA[11:0] = IOVA[11:0]
5908 + */
5909 +- pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
5910 ++ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18;
5911 + pa &= GENMASK_ULL(39, 0);
5912 + *lmt_addr = (pa << 12) | (iova & 0xFFF);
5913 +
5914 +@@ -212,9 +212,10 @@ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
5915 +
5916 + int rvu_set_channels_base(struct rvu *rvu)
5917 + {
5918 ++ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
5919 ++ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
5920 + struct rvu_hwinfo *hw = rvu->hw;
5921 +- u16 cpt_chan_base;
5922 +- u64 nix_const;
5923 ++ u64 nix_const, nix_const1;
5924 + int blkaddr;
5925 +
5926 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
5927 +@@ -222,6 +223,7 @@ int rvu_set_channels_base(struct rvu *rvu)
5928 + return blkaddr;
5929 +
5930 + nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5931 ++ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
5932 +
5933 + hw->cgx = (nix_const >> 12) & 0xFULL;
5934 + hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
5935 +@@ -244,14 +246,24 @@ int rvu_set_channels_base(struct rvu *rvu)
5936 + * channels such that all channel numbers are contiguous
5937 + * leaving no holes. This way the new CPT channels can be
5938 + * accomodated. The order of channel numbers assigned is
5939 +- * LBK, SDP, CGX and CPT.
5940 ++ * LBK, SDP, CGX and CPT. Also the base channel number
5941 ++ * of a block must be multiple of number of channels
5942 ++ * of the block.
5943 + */
5944 +- hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
5945 +- ((nix_const >> 16) & 0xFFULL);
5946 +- hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
5947 ++ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
5948 ++ nr_sdp_chans = nix_const1 & 0xFFFULL;
5949 ++ nr_cgx_chans = nix_const & 0xFFULL;
5950 ++ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
5951 +
5952 +- cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
5953 +- (nix_const & 0xFFULL);
5954 ++ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
5955 ++ /* Round up base channel to multiple of number of channels */
5956 ++ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
5957 ++
5958 ++ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
5959 ++ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
5960 ++
5961 ++ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
5962 ++ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
5963 +
5964 + /* Out of 4096 channels start CPT from 2048 so
5965 + * that MSB for CPT channels is always set
5966 +@@ -355,6 +367,7 @@ err_put:
5967 +
5968 + static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
5969 + {
5970 ++ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
5971 + u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
5972 + u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
5973 + struct rvu_hwinfo *hw = rvu->hw;
5974 +@@ -364,7 +377,7 @@ static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
5975 +
5976 + cgx_chans = nix_const & 0xFFULL;
5977 + lbk_chans = (nix_const >> 16) & 0xFFULL;
5978 +- sdp_chans = SDP_CHANNELS;
5979 ++ sdp_chans = nix_const1 & 0xFFFULL;
5980 + cpt_chans = (nix_const >> 32) & 0xFFFULL;
5981 +
5982 + start = hw->cgx_chan_base;
5983 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
5984 +index 4bfbbdf387709..c32195073e8a5 100644
5985 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
5986 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
5987 +@@ -25,7 +25,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc,
5988 + int type, bool add);
5989 + static int nix_setup_ipolicers(struct rvu *rvu,
5990 + struct nix_hw *nix_hw, int blkaddr);
5991 +-static void nix_ipolicer_freemem(struct nix_hw *nix_hw);
5992 ++static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw);
5993 + static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
5994 + struct nix_hw *nix_hw, u16 pcifunc);
5995 + static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
5996 +@@ -3849,7 +3849,7 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr,
5997 + kfree(txsch->schq.bmap);
5998 + }
5999 +
6000 +- nix_ipolicer_freemem(nix_hw);
6001 ++ nix_ipolicer_freemem(rvu, nix_hw);
6002 +
6003 + vlan = &nix_hw->txvlan;
6004 + kfree(vlan->rsrc.bmap);
6005 +@@ -4225,11 +4225,14 @@ static int nix_setup_ipolicers(struct rvu *rvu,
6006 + return 0;
6007 + }
6008 +
6009 +-static void nix_ipolicer_freemem(struct nix_hw *nix_hw)
6010 ++static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw)
6011 + {
6012 + struct nix_ipolicer *ipolicer;
6013 + int layer;
6014 +
6015 ++ if (!rvu->hw->cap.ipolicer)
6016 ++ return;
6017 ++
6018 + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
6019 + ipolicer = &nix_hw->ipolicer[layer];
6020 +
6021 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
6022 +index 52b255426c22a..26a792407c40a 100644
6023 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
6024 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
6025 +@@ -23,7 +23,7 @@
6026 + #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
6027 +
6028 + #define NPC_PARSE_RESULT_DMAC_OFFSET 8
6029 +-#define NPC_HW_TSTAMP_OFFSET 8
6030 ++#define NPC_HW_TSTAMP_OFFSET 8ULL
6031 + #define NPC_KEX_CHAN_MASK 0xFFFULL
6032 + #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
6033 +
6034 +@@ -938,7 +938,7 @@ void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf,
6035 + static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
6036 + int blkaddr, u16 pcifunc, u64 rx_action)
6037 + {
6038 +- int actindex, index, bank;
6039 ++ int actindex, index, bank, entry;
6040 + bool enable;
6041 +
6042 + if (!(pcifunc & RVU_PFVF_FUNC_MASK))
6043 +@@ -949,7 +949,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
6044 + if (mcam->entry2target_pffunc[index] == pcifunc) {
6045 + bank = npc_get_bank(mcam, index);
6046 + actindex = index;
6047 +- index &= (mcam->banksize - 1);
6048 ++ entry = index & (mcam->banksize - 1);
6049 +
6050 + /* read vf flow entry enable status */
6051 + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
6052 +@@ -959,7 +959,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
6053 + false);
6054 + /* update 'action' */
6055 + rvu_write64(rvu, blkaddr,
6056 +- NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
6057 ++ NPC_AF_MCAMEX_BANKX_ACTION(entry, bank),
6058 + rx_action);
6059 + if (enable)
6060 + npc_enable_mcam_entry(rvu, mcam, blkaddr,
6061 +@@ -2030,14 +2030,15 @@ int rvu_npc_init(struct rvu *rvu)
6062 +
6063 + /* Enable below for Rx pkts.
6064 + * - Outer IPv4 header checksum validation.
6065 +- * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
6066 ++ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B].
6067 ++ * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M].
6068 + * - Inner IPv4 header checksum validation.
6069 + * - Set non zero checksum error code value
6070 + */
6071 + rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
6072 + rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
6073 +- BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
6074 +- BIT_ULL(2) | BIT_ULL(1));
6075 ++ ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
6076 ++ BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
6077 +
6078 + rvu_npc_setup_interfaces(rvu, blkaddr);
6079 +
6080 +@@ -2166,7 +2167,7 @@ static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
6081 + int blkaddr, u16 entry, u16 cntr)
6082 + {
6083 + u16 index = entry & (mcam->banksize - 1);
6084 +- u16 bank = npc_get_bank(mcam, entry);
6085 ++ u32 bank = npc_get_bank(mcam, entry);
6086 +
6087 + /* Remove mapping and reduce counter's refcnt */
6088 + mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
6089 +@@ -2788,8 +2789,8 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
6090 + struct npc_mcam *mcam = &rvu->hw->mcam;
6091 + u16 pcifunc = req->hdr.pcifunc;
6092 + u16 old_entry, new_entry;
6093 ++ int blkaddr, rc = 0;
6094 + u16 index, cntr;
6095 +- int blkaddr, rc;
6096 +
6097 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
6098 + if (blkaddr < 0)
6099 +@@ -2990,10 +2991,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
6100 + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
6101 + if (index >= mcam->bmap_entries)
6102 + break;
6103 ++ entry = index + 1;
6104 ++
6105 + if (mcam->entry2cntr_map[index] != req->cntr)
6106 + continue;
6107 +
6108 +- entry = index + 1;
6109 + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
6110 + index, req->cntr);
6111 + }
6112 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
6113 +index 8b01ef6e2c997..4215841c9f86e 100644
6114 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
6115 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
6116 +@@ -53,7 +53,7 @@
6117 + #define RVU_AF_SMMU_TXN_REQ (0x6008)
6118 + #define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
6119 + #define RVU_AF_SMMU_ADDR_TLN (0x6018)
6120 +-#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
6121 ++#define RVU_AF_SMMU_TLN_FLIT0 (0x6020)
6122 +
6123 + /* Admin function's privileged PF/VF registers */
6124 + #define RVU_PRIV_CONST (0x8000000)
6125 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
6126 +index 70fcc1fd962fc..94dfd64f526fa 100644
6127 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
6128 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
6129 +@@ -208,7 +208,8 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
6130 + if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
6131 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
6132 + /* update dmac field in vlan offload rule */
6133 +- if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
6134 ++ if (netif_running(netdev) &&
6135 ++ pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
6136 + otx2_install_rxvlan_offload_flow(pfvf);
6137 + /* update dmac address in ntuple and DMAC filter list */
6138 + if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
6139 +@@ -268,6 +269,7 @@ unlock:
6140 + int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
6141 + {
6142 + struct otx2_rss_info *rss = &pfvf->hw.rss_info;
6143 ++ struct nix_rss_flowkey_cfg_rsp *rsp;
6144 + struct nix_rss_flowkey_cfg *req;
6145 + int err;
6146 +
6147 +@@ -282,6 +284,18 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
6148 + req->group = DEFAULT_RSS_CONTEXT_GROUP;
6149 +
6150 + err = otx2_sync_mbox_msg(&pfvf->mbox);
6151 ++ if (err)
6152 ++ goto fail;
6153 ++
6154 ++ rsp = (struct nix_rss_flowkey_cfg_rsp *)
6155 ++ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
6156 ++ if (IS_ERR(rsp)) {
6157 ++ err = PTR_ERR(rsp);
6158 ++ goto fail;
6159 ++ }
6160 ++
6161 ++ pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
6162 ++fail:
6163 + mutex_unlock(&pfvf->mbox.lock);
6164 + return err;
6165 + }
6166 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
6167 +index 8fd58cd07f50b..8c602d27108a7 100644
6168 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
6169 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
6170 +@@ -196,6 +196,9 @@ struct otx2_hw {
6171 + u8 lso_udpv4_idx;
6172 + u8 lso_udpv6_idx;
6173 +
6174 ++ /* RSS */
6175 ++ u8 flowkey_alg_idx;
6176 ++
6177 + /* MSI-X */
6178 + u8 cint_cnt; /* CQ interrupt count */
6179 + u16 npa_msixoff; /* Offset of NPA vectors */
6180 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
6181 +index 4d9de525802d0..fdd27c4fea86d 100644
6182 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
6183 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
6184 +@@ -858,6 +858,7 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
6185 + if (flow->flow_spec.flow_type & FLOW_RSS) {
6186 + req->op = NIX_RX_ACTIONOP_RSS;
6187 + req->index = flow->rss_ctx_id;
6188 ++ req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
6189 + } else {
6190 + req->op = NIX_RX_ACTIONOP_UCAST;
6191 + req->index = ethtool_get_flow_spec_ring(ring_cookie);
6192 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
6193 +index 972b202b9884d..32d5c623fdfaf 100644
6194 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
6195 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
6196 +@@ -485,8 +485,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
6197 + match.key->vlan_priority << 13;
6198 +
6199 + vlan_tci_mask = match.mask->vlan_id |
6200 +- match.key->vlan_dei << 12 |
6201 +- match.key->vlan_priority << 13;
6202 ++ match.mask->vlan_dei << 12 |
6203 ++ match.mask->vlan_priority << 13;
6204 +
6205 + flow_spec->vlan_tci = htons(vlan_tci);
6206 + flow_mask->vlan_tci = htons(vlan_tci_mask);
6207 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
6208 +index def2156e50eeb..20bb372662541 100644
6209 +--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
6210 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
6211 +@@ -397,7 +397,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
6212 + void mlx5_unregister_device(struct mlx5_core_dev *dev)
6213 + {
6214 + mutex_lock(&mlx5_intf_mutex);
6215 +- dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
6216 ++ dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
6217 + mlx5_rescan_drivers_locked(dev);
6218 + mutex_unlock(&mlx5_intf_mutex);
6219 + }
6220 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
6221 +index d791d351b489d..be6b75bd10f1e 100644
6222 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
6223 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
6224 +@@ -670,6 +670,7 @@ params_reg_err:
6225 + void mlx5_devlink_unregister(struct devlink *devlink)
6226 + {
6227 + mlx5_devlink_traps_unregister(devlink);
6228 ++ devlink_params_unpublish(devlink);
6229 + devlink_params_unregister(devlink, mlx5_devlink_params,
6230 + ARRAY_SIZE(mlx5_devlink_params));
6231 + devlink_unregister(devlink);
6232 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
6233 +index 1d5ce07b83f45..43b092f5565af 100644
6234 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
6235 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
6236 +@@ -248,18 +248,12 @@ struct ttc_params {
6237 +
6238 + void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
6239 + void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
6240 +-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
6241 +
6242 + int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
6243 + struct mlx5e_ttc_table *ttc);
6244 + void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
6245 + struct mlx5e_ttc_table *ttc);
6246 +
6247 +-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
6248 +- struct mlx5e_ttc_table *ttc);
6249 +-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
6250 +- struct mlx5e_ttc_table *ttc);
6251 +-
6252 + void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
6253 + int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
6254 + struct mlx5_flow_destination *new_dest);
6255 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
6256 +index 5efe3278b0f64..1fd8baf198296 100644
6257 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
6258 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
6259 +@@ -733,8 +733,8 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
6260 + spin_unlock_bh(qdisc_lock(qdisc));
6261 + }
6262 +
6263 +-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
6264 +- u16 *new_qid, struct netlink_ext_ack *extack)
6265 ++int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
6266 ++ struct netlink_ext_ack *extack)
6267 + {
6268 + struct mlx5e_qos_node *node;
6269 + struct netdev_queue *txq;
6270 +@@ -742,11 +742,9 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
6271 + bool opened;
6272 + int err;
6273 +
6274 +- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
6275 +-
6276 +- *old_qid = *new_qid = 0;
6277 ++ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
6278 +
6279 +- node = mlx5e_sw_node_find(priv, classid);
6280 ++ node = mlx5e_sw_node_find(priv, *classid);
6281 + if (!node)
6282 + return -ENOENT;
6283 +
6284 +@@ -764,7 +762,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
6285 + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
6286 + if (err) /* Not fatal. */
6287 + qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
6288 +- node->hw_id, classid, err);
6289 ++ node->hw_id, *classid, err);
6290 +
6291 + mlx5e_sw_node_delete(priv, node);
6292 +
6293 +@@ -826,8 +824,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
6294 + if (opened)
6295 + mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
6296 +
6297 +- *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
6298 +- *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
6299 ++ *classid = node->classid;
6300 + return 0;
6301 + }
6302 +
6303 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
6304 +index 5af7991fcd194..757682b7c0e04 100644
6305 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
6306 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
6307 +@@ -34,8 +34,8 @@ int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
6308 + struct netlink_ext_ack *extack);
6309 + int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
6310 + u64 rate, u64 ceil, struct netlink_ext_ack *extack);
6311 +-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
6312 +- u16 *new_qid, struct netlink_ext_ack *extack);
6313 ++int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
6314 ++ struct netlink_ext_ack *extack);
6315 + int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
6316 + struct netlink_ext_ack *extack);
6317 + int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
6318 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
6319 +index 2e846b7412806..1c44c6c345f5d 100644
6320 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
6321 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
6322 +@@ -147,7 +147,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
6323 + mlx5e_rep_queue_neigh_stats_work(priv);
6324 +
6325 + list_for_each_entry(flow, flow_list, tmp_list) {
6326 +- if (!mlx5e_is_offloaded_flow(flow))
6327 ++ if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
6328 + continue;
6329 + attr = flow->attr;
6330 + esw_attr = attr->esw_attr;
6331 +@@ -188,7 +188,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
6332 + int err;
6333 +
6334 + list_for_each_entry(flow, flow_list, tmp_list) {
6335 +- if (!mlx5e_is_offloaded_flow(flow))
6336 ++ if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
6337 + continue;
6338 + attr = flow->attr;
6339 + esw_attr = attr->esw_attr;
6340 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
6341 +index 0b75fab41ae8f..6464ac3f294e7 100644
6342 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
6343 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
6344 +@@ -1324,7 +1324,7 @@ void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
6345 + ttc_params->inner_ttc = &priv->fs.inner_ttc;
6346 + }
6347 +
6348 +-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
6349 ++static void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
6350 + {
6351 + struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
6352 +
6353 +@@ -1343,8 +1343,8 @@ void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
6354 + ft_attr->prio = MLX5E_NIC_PRIO;
6355 + }
6356 +
6357 +-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
6358 +- struct mlx5e_ttc_table *ttc)
6359 ++static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
6360 ++ struct mlx5e_ttc_table *ttc)
6361 + {
6362 + struct mlx5e_flow_table *ft = &ttc->ft;
6363 + int err;
6364 +@@ -1374,8 +1374,8 @@ err:
6365 + return err;
6366 + }
6367 +
6368 +-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
6369 +- struct mlx5e_ttc_table *ttc)
6370 ++static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
6371 ++ struct mlx5e_ttc_table *ttc)
6372 + {
6373 + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
6374 + return;
6375 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6376 +index 24f919ef9b8e4..2d53eaf3b9241 100644
6377 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6378 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
6379 +@@ -2567,6 +2567,14 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
6380 + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
6381 + if (err)
6382 + goto free_in;
6383 ++
6384 ++ /* Verify inner tirs resources allocated */
6385 ++ if (!priv->inner_indir_tir[0].tirn)
6386 ++ continue;
6387 ++
6388 ++ err = mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
6389 ++ if (err)
6390 ++ goto free_in;
6391 + }
6392 +
6393 + for (ix = 0; ix < priv->max_nch; ix++) {
6394 +@@ -3445,8 +3453,7 @@ static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offloa
6395 + return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
6396 + htb->rate, htb->ceil, htb->extack);
6397 + case TC_HTB_LEAF_DEL:
6398 +- return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
6399 +- htb->extack);
6400 ++ return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
6401 + case TC_HTB_LEAF_DEL_LAST:
6402 + case TC_HTB_LEAF_DEL_LAST_FORCE:
6403 + return mlx5e_htb_leaf_del_last(priv, htb->classid,
6404 +@@ -4812,7 +4819,14 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
6405 + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
6406 + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
6407 +
6408 ++ /* Tunneled LRO is not supported in the driver, and the same RQs are
6409 ++ * shared between inner and outer TIRs, so the driver can't disable LRO
6410 ++ * for inner TIRs while having it enabled for outer TIRs. Due to this,
6411 ++ * block LRO altogether if the firmware declares tunneled LRO support.
6412 ++ */
6413 + if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
6414 ++ !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
6415 ++ !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
6416 + mlx5e_check_fragmented_striding_rq_cap(mdev))
6417 + netdev->vlan_features |= NETIF_F_LRO;
6418 +
6419 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
6420 +index d273758255c3a..6eba574c5a364 100644
6421 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
6422 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
6423 +@@ -1338,6 +1338,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
6424 + int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
6425 + {
6426 + struct mlx5e_priv *out_priv, *route_priv;
6427 ++ struct mlx5_devcom *devcom = NULL;
6428 + struct mlx5_core_dev *route_mdev;
6429 + struct mlx5_eswitch *esw;
6430 + u16 vhca_id;
6431 +@@ -1349,7 +1350,24 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
6432 + route_mdev = route_priv->mdev;
6433 +
6434 + vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
6435 ++ if (mlx5_lag_is_active(out_priv->mdev)) {
6436 ++ /* In lag case we may get devices from different eswitch instances.
6437 ++ * If we failed to get vport num, it means, mostly, that we on the wrong
6438 ++ * eswitch.
6439 ++ */
6440 ++ err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
6441 ++ if (err != -ENOENT)
6442 ++ return err;
6443 ++
6444 ++ devcom = out_priv->mdev->priv.devcom;
6445 ++ esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
6446 ++ if (!esw)
6447 ++ return -ENODEV;
6448 ++ }
6449 ++
6450 + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
6451 ++ if (devcom)
6452 ++ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
6453 + return err;
6454 + }
6455 +
6456 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
6457 +index 3da7becc1069f..425c91814b34f 100644
6458 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
6459 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
6460 +@@ -364,6 +364,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
6461 + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
6462 + dest.vport.num = e->vport;
6463 + dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
6464 ++ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
6465 + e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
6466 + if (IS_ERR(e->fwd_rule)) {
6467 + mlx5_destroy_flow_group(e->fwd_grp);
6468 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
6469 +index 3bb71a1860042..fc945945ae33e 100644
6470 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
6471 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
6472 +@@ -3091,8 +3091,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
6473 +
6474 + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
6475 + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
6476 +- if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
6477 ++ if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
6478 ++ err = 0;
6479 + goto out;
6480 ++ }
6481 ++
6482 + fallthrough;
6483 + case MLX5_CAP_INLINE_MODE_L2:
6484 + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
6485 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
6486 +index 7d7ed025db0da..620d638e1e8ff 100644
6487 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
6488 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
6489 +@@ -331,17 +331,6 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
6490 + }
6491 +
6492 + mlx5e_set_ttc_basic_params(priv, &ttc_params);
6493 +- mlx5e_set_inner_ttc_ft_params(&ttc_params);
6494 +- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6495 +- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
6496 +-
6497 +- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
6498 +- if (err) {
6499 +- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
6500 +- err);
6501 +- goto err_destroy_arfs_tables;
6502 +- }
6503 +-
6504 + mlx5e_set_ttc_ft_params(&ttc_params);
6505 + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
6506 + ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
6507 +@@ -350,13 +339,11 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
6508 + if (err) {
6509 + netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
6510 + err);
6511 +- goto err_destroy_inner_ttc_table;
6512 ++ goto err_destroy_arfs_tables;
6513 + }
6514 +
6515 + return 0;
6516 +
6517 +-err_destroy_inner_ttc_table:
6518 +- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
6519 + err_destroy_arfs_tables:
6520 + mlx5e_arfs_destroy_tables(priv);
6521 +
6522 +@@ -366,7 +353,6 @@ err_destroy_arfs_tables:
6523 + static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
6524 + {
6525 + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
6526 +- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
6527 + mlx5e_arfs_destroy_tables(priv);
6528 + }
6529 +
6530 +@@ -392,7 +378,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
6531 + if (err)
6532 + goto err_destroy_indirect_rqts;
6533 +
6534 +- err = mlx5e_create_indirect_tirs(priv, true);
6535 ++ err = mlx5e_create_indirect_tirs(priv, false);
6536 + if (err)
6537 + goto err_destroy_direct_rqts;
6538 +
6539 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
6540 +index 5c043c5cc4035..40ef60f562b42 100644
6541 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
6542 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
6543 +@@ -277,6 +277,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
6544 + int err;
6545 +
6546 + ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
6547 ++ mlx5_lag_mp_reset(ldev);
6548 +
6549 + MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG);
6550 + err = mlx5_cmd_exec_in(dev0, destroy_lag, in);
6551 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
6552 +index c4bf8b679541e..516bfc2bd797b 100644
6553 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
6554 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
6555 +@@ -302,6 +302,14 @@ static int mlx5_lag_fib_event(struct notifier_block *nb,
6556 + return NOTIFY_DONE;
6557 + }
6558 +
6559 ++void mlx5_lag_mp_reset(struct mlx5_lag *ldev)
6560 ++{
6561 ++ /* Clear mfi, as it might become stale when a route delete event
6562 ++ * has been missed, see mlx5_lag_fib_route_event().
6563 ++ */
6564 ++ ldev->lag_mp.mfi = NULL;
6565 ++}
6566 ++
6567 + int mlx5_lag_mp_init(struct mlx5_lag *ldev)
6568 + {
6569 + struct lag_mp *mp = &ldev->lag_mp;
6570 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
6571 +index 258ac7b2964e8..729c839397a89 100644
6572 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
6573 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
6574 +@@ -21,11 +21,13 @@ struct lag_mp {
6575 +
6576 + #ifdef CONFIG_MLX5_ESWITCH
6577 +
6578 ++void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
6579 + int mlx5_lag_mp_init(struct mlx5_lag *ldev);
6580 + void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
6581 +
6582 + #else /* CONFIG_MLX5_ESWITCH */
6583 +
6584 ++static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
6585 + static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
6586 + static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
6587 +
6588 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
6589 +index b41301a5b0df8..cd520e4c5522f 100644
6590 +--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
6591 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
6592 +@@ -91,20 +91,20 @@ int ionic_devlink_register(struct ionic *ionic)
6593 + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
6594 + devlink_port_attrs_set(&ionic->dl_port, &attrs);
6595 + err = devlink_port_register(dl, &ionic->dl_port, 0);
6596 +- if (err)
6597 ++ if (err) {
6598 + dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
6599 +- else
6600 +- devlink_port_type_eth_set(&ionic->dl_port,
6601 +- ionic->lif->netdev);
6602 ++ devlink_unregister(dl);
6603 ++ return err;
6604 ++ }
6605 +
6606 +- return err;
6607 ++ devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
6608 ++ return 0;
6609 + }
6610 +
6611 + void ionic_devlink_unregister(struct ionic *ionic)
6612 + {
6613 + struct devlink *dl = priv_to_devlink(ionic);
6614 +
6615 +- if (ionic->dl_port.registered)
6616 +- devlink_port_unregister(&ionic->dl_port);
6617 ++ devlink_port_unregister(&ionic->dl_port);
6618 + devlink_unregister(dl);
6619 + }
6620 +diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
6621 +index b64c254e00ba1..8427fe1b8fd1c 100644
6622 +--- a/drivers/net/ethernet/qualcomm/qca_spi.c
6623 ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
6624 +@@ -434,7 +434,7 @@ qcaspi_receive(struct qcaspi *qca)
6625 + skb_put(qca->rx_skb, retcode);
6626 + qca->rx_skb->protocol = eth_type_trans(
6627 + qca->rx_skb, qca->rx_skb->dev);
6628 +- qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
6629 ++ skb_checksum_none_assert(qca->rx_skb);
6630 + netif_rx_ni(qca->rx_skb);
6631 + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
6632 + net_dev->mtu + VLAN_ETH_HLEN);
6633 +diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
6634 +index bcdeca7b33664..ce3f7ce31adc1 100644
6635 +--- a/drivers/net/ethernet/qualcomm/qca_uart.c
6636 ++++ b/drivers/net/ethernet/qualcomm/qca_uart.c
6637 +@@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
6638 + skb_put(qca->rx_skb, retcode);
6639 + qca->rx_skb->protocol = eth_type_trans(
6640 + qca->rx_skb, qca->rx_skb->dev);
6641 +- qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
6642 ++ skb_checksum_none_assert(qca->rx_skb);
6643 + netif_rx_ni(qca->rx_skb);
6644 + qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
6645 + netdev->mtu +
6646 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
6647 +index e632702675787..f83db62938dd1 100644
6648 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
6649 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
6650 +@@ -172,11 +172,12 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
6651 + x->rx_normal_irq_n++;
6652 + ret |= handle_rx;
6653 + }
6654 +- if (likely(intr_status & (DMA_CHAN_STATUS_TI |
6655 +- DMA_CHAN_STATUS_TBU))) {
6656 ++ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
6657 + x->tx_normal_irq_n++;
6658 + ret |= handle_tx;
6659 + }
6660 ++ if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
6661 ++ ret |= handle_tx;
6662 + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
6663 + x->rx_early_irq++;
6664 +
6665 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
6666 +index 67a08cbba859d..e967cd1ade36b 100644
6667 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
6668 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
6669 +@@ -518,6 +518,10 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
6670 + }
6671 +
6672 + napi_enable(&common->napi_rx);
6673 ++ if (common->rx_irq_disabled) {
6674 ++ common->rx_irq_disabled = false;
6675 ++ enable_irq(common->rx_chns.irq);
6676 ++ }
6677 +
6678 + dev_dbg(common->dev, "cpsw_nuss started\n");
6679 + return 0;
6680 +@@ -871,8 +875,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
6681 +
6682 + dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
6683 +
6684 +- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
6685 +- enable_irq(common->rx_chns.irq);
6686 ++ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
6687 ++ if (common->rx_irq_disabled) {
6688 ++ common->rx_irq_disabled = false;
6689 ++ enable_irq(common->rx_chns.irq);
6690 ++ }
6691 ++ }
6692 +
6693 + return num_rx;
6694 + }
6695 +@@ -1090,6 +1098,7 @@ static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
6696 + {
6697 + struct am65_cpsw_common *common = dev_id;
6698 +
6699 ++ common->rx_irq_disabled = true;
6700 + disable_irq_nosync(irq);
6701 + napi_schedule(&common->napi_rx);
6702 +
6703 +@@ -2388,21 +2397,6 @@ static const struct devlink_param am65_cpsw_devlink_params[] = {
6704 + am65_cpsw_dl_switch_mode_set, NULL),
6705 + };
6706 +
6707 +-static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
6708 +-{
6709 +- struct devlink_port *dl_port;
6710 +- struct am65_cpsw_port *port;
6711 +- int i;
6712 +-
6713 +- for (i = 1; i <= common->port_num; i++) {
6714 +- port = am65_common_get_port(common, i);
6715 +- dl_port = &port->devlink_port;
6716 +-
6717 +- if (dl_port->registered)
6718 +- devlink_port_unregister(dl_port);
6719 +- }
6720 +-}
6721 +-
6722 + static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
6723 + {
6724 + struct devlink_port_attrs attrs = {};
6725 +@@ -2464,7 +2458,12 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
6726 + return ret;
6727 +
6728 + dl_port_unreg:
6729 +- am65_cpsw_unregister_devlink_ports(common);
6730 ++ for (i = i - 1; i >= 1; i--) {
6731 ++ port = am65_common_get_port(common, i);
6732 ++ dl_port = &port->devlink_port;
6733 ++
6734 ++ devlink_port_unregister(dl_port);
6735 ++ }
6736 + dl_unreg:
6737 + devlink_unregister(common->devlink);
6738 + dl_free:
6739 +@@ -2475,6 +2474,17 @@ dl_free:
6740 +
6741 + static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
6742 + {
6743 ++ struct devlink_port *dl_port;
6744 ++ struct am65_cpsw_port *port;
6745 ++ int i;
6746 ++
6747 ++ for (i = 1; i <= common->port_num; i++) {
6748 ++ port = am65_common_get_port(common, i);
6749 ++ dl_port = &port->devlink_port;
6750 ++
6751 ++ devlink_port_unregister(dl_port);
6752 ++ }
6753 ++
6754 + if (!AM65_CPSW_IS_CPSW2G(common) &&
6755 + IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
6756 + devlink_params_unpublish(common->devlink);
6757 +@@ -2482,7 +2492,6 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
6758 + ARRAY_SIZE(am65_cpsw_devlink_params));
6759 + }
6760 +
6761 +- am65_cpsw_unregister_devlink_ports(common);
6762 + devlink_unregister(common->devlink);
6763 + devlink_free(common->devlink);
6764 + }
6765 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
6766 +index 5d93e346f05eb..048ed10143c17 100644
6767 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
6768 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
6769 +@@ -126,6 +126,8 @@ struct am65_cpsw_common {
6770 + struct am65_cpsw_rx_chn rx_chns;
6771 + struct napi_struct napi_rx;
6772 +
6773 ++ bool rx_irq_disabled;
6774 ++
6775 + u32 nuss_ver;
6776 + u32 cpsw_ver;
6777 + unsigned long bus_freq;
6778 +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
6779 +index 53a433442803a..f4d758f8a1ee1 100644
6780 +--- a/drivers/net/phy/marvell10g.c
6781 ++++ b/drivers/net/phy/marvell10g.c
6782 +@@ -987,11 +987,19 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev)
6783 +
6784 + static int mv3310_match_phy_device(struct phy_device *phydev)
6785 + {
6786 ++ if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
6787 ++ MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
6788 ++ return 0;
6789 ++
6790 + return mv3310_get_number_of_ports(phydev) == 1;
6791 + }
6792 +
6793 + static int mv3340_match_phy_device(struct phy_device *phydev)
6794 + {
6795 ++ if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
6796 ++ MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
6797 ++ return 0;
6798 ++
6799 + return mv3310_get_number_of_ports(phydev) == 4;
6800 + }
6801 +
6802 +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
6803 +index dc87e8caf954a..53c3c680c0832 100644
6804 +--- a/drivers/net/usb/asix_devices.c
6805 ++++ b/drivers/net/usb/asix_devices.c
6806 +@@ -1220,6 +1220,7 @@ static const struct driver_info ax88772b_info = {
6807 + .unbind = ax88772_unbind,
6808 + .status = asix_status,
6809 + .reset = ax88772_reset,
6810 ++ .stop = ax88772_stop,
6811 + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
6812 + FLAG_MULTI_PACKET,
6813 + .rx_fixup = asix_rx_fixup_common,
6814 +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
6815 +index b137e7f343979..bd1ef63349978 100644
6816 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c
6817 ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
6818 +@@ -2504,8 +2504,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
6819 + goto free_data_skb;
6820 +
6821 + for (index = 0; index < num_pri_streams; index++) {
6822 +- if (WARN_ON(!data_sync_bufs[index].skb))
6823 ++ if (WARN_ON(!data_sync_bufs[index].skb)) {
6824 ++ ret = -ENOMEM;
6825 + goto free_data_skb;
6826 ++ }
6827 +
6828 + ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
6829 + data_sync_bufs[index].
6830 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
6831 +index c49dd0c36ae43..bbd72c2db0886 100644
6832 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
6833 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
6834 +@@ -2075,7 +2075,7 @@ cleanup:
6835 +
6836 + err = brcmf_pcie_probe(pdev, NULL);
6837 + if (err)
6838 +- brcmf_err(bus, "probe after resume failed, err=%d\n", err);
6839 ++ __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
6840 +
6841 + return err;
6842 + }
6843 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
6844 +index 34933f133a0ae..66f8d949c1e69 100644
6845 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
6846 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
6847 +@@ -264,7 +264,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
6848 + goto out_free;
6849 + }
6850 +
6851 +- enabled = !!wifi_pkg->package.elements[0].integer.value;
6852 ++ enabled = !!wifi_pkg->package.elements[1].integer.value;
6853 +
6854 + if (!enabled) {
6855 + *block_list_size = -1;
6856 +@@ -273,15 +273,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
6857 + goto out_free;
6858 + }
6859 +
6860 +- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
6861 +- wifi_pkg->package.elements[1].integer.value >
6862 ++ if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
6863 ++ wifi_pkg->package.elements[2].integer.value >
6864 + APCI_WTAS_BLACK_LIST_MAX) {
6865 + IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
6866 + wifi_pkg->package.elements[1].integer.value);
6867 + ret = -EINVAL;
6868 + goto out_free;
6869 + }
6870 +- *block_list_size = wifi_pkg->package.elements[1].integer.value;
6871 ++ *block_list_size = wifi_pkg->package.elements[2].integer.value;
6872 +
6873 + IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size);
6874 + if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
6875 +@@ -294,15 +294,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
6876 + for (i = 0; i < *block_list_size; i++) {
6877 + u32 country;
6878 +
6879 +- if (wifi_pkg->package.elements[2 + i].type !=
6880 ++ if (wifi_pkg->package.elements[3 + i].type !=
6881 + ACPI_TYPE_INTEGER) {
6882 + IWL_DEBUG_RADIO(fwrt,
6883 +- "TAS invalid array elem %d\n", 2 + i);
6884 ++ "TAS invalid array elem %d\n", 3 + i);
6885 + ret = -EINVAL;
6886 + goto out_free;
6887 + }
6888 +
6889 +- country = wifi_pkg->package.elements[2 + i].integer.value;
6890 ++ country = wifi_pkg->package.elements[3 + i].integer.value;
6891 + block_list_array[i] = cpu_to_le32(country);
6892 + IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
6893 + }
6894 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
6895 +index 0b8a0cd3b652d..6f49950a5f6d1 100644
6896 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
6897 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
6898 +@@ -558,6 +558,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
6899 + IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
6900 + IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
6901 + IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
6902 ++ IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
6903 + IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
6904 + IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
6905 + IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
6906 +diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
6907 +index 99b21a2c83861..f4a26f16f00f4 100644
6908 +--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
6909 ++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
6910 +@@ -1038,8 +1038,10 @@ static int rsi_load_9116_firmware(struct rsi_hw *adapter)
6911 + }
6912 +
6913 + ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
6914 +- if (!ta_firmware)
6915 ++ if (!ta_firmware) {
6916 ++ status = -ENOMEM;
6917 + goto fail_release_fw;
6918 ++ }
6919 + fw_p = ta_firmware;
6920 + instructions_sz = fw_entry->size;
6921 + rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz);
6922 +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
6923 +index 3fbe2a3c14550..416976f098882 100644
6924 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
6925 ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
6926 +@@ -816,6 +816,7 @@ static int rsi_probe(struct usb_interface *pfunction,
6927 + } else {
6928 + rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
6929 + __func__, id->idProduct);
6930 ++ status = -ENODEV;
6931 + goto err1;
6932 + }
6933 +
6934 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
6935 +index 7f6b3a9915014..3bd9cbc80246f 100644
6936 +--- a/drivers/nvme/host/rdma.c
6937 ++++ b/drivers/nvme/host/rdma.c
6938 +@@ -735,13 +735,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
6939 + if (ret)
6940 + return ret;
6941 +
6942 +- ctrl->ctrl.queue_count = nr_io_queues + 1;
6943 +- if (ctrl->ctrl.queue_count < 2) {
6944 ++ if (nr_io_queues == 0) {
6945 + dev_err(ctrl->ctrl.device,
6946 + "unable to set any I/O queues\n");
6947 + return -ENOMEM;
6948 + }
6949 +
6950 ++ ctrl->ctrl.queue_count = nr_io_queues + 1;
6951 + dev_info(ctrl->ctrl.device,
6952 + "creating %d I/O queues.\n", nr_io_queues);
6953 +
6954 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
6955 +index 8cb15ee5b249e..18bd68b82d78f 100644
6956 +--- a/drivers/nvme/host/tcp.c
6957 ++++ b/drivers/nvme/host/tcp.c
6958 +@@ -1769,13 +1769,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
6959 + if (ret)
6960 + return ret;
6961 +
6962 +- ctrl->queue_count = nr_io_queues + 1;
6963 +- if (ctrl->queue_count < 2) {
6964 ++ if (nr_io_queues == 0) {
6965 + dev_err(ctrl->device,
6966 + "unable to set any I/O queues\n");
6967 + return -ENOMEM;
6968 + }
6969 +
6970 ++ ctrl->queue_count = nr_io_queues + 1;
6971 + dev_info(ctrl->device,
6972 + "creating %d I/O queues.\n", nr_io_queues);
6973 +
6974 +diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
6975 +index 7d0f3523fdab2..8ef564c3b32c8 100644
6976 +--- a/drivers/nvme/target/fabrics-cmd.c
6977 ++++ b/drivers/nvme/target/fabrics-cmd.c
6978 +@@ -120,6 +120,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
6979 + if (!sqsize) {
6980 + pr_warn("queue size zero!\n");
6981 + req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
6982 ++ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
6983 + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
6984 + goto err;
6985 + }
6986 +@@ -260,11 +261,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
6987 + }
6988 +
6989 + status = nvmet_install_queue(ctrl, req);
6990 +- if (status) {
6991 +- /* pass back cntlid that had the issue of installing queue */
6992 +- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
6993 ++ if (status)
6994 + goto out_ctrl_put;
6995 +- }
6996 ++
6997 ++ /* pass back cntlid for successful completion */
6998 ++ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
6999 +
7000 + pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
7001 +
7002 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
7003 +index aacf575c15cff..3f353572588df 100644
7004 +--- a/drivers/pci/pci.c
7005 ++++ b/drivers/pci/pci.c
7006 +@@ -2495,7 +2495,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
7007 + if (enable) {
7008 + int error;
7009 +
7010 +- if (pci_pme_capable(dev, state))
7011 ++ /*
7012 ++ * Enable PME signaling if the device can signal PME from
7013 ++ * D3cold regardless of whether or not it can signal PME from
7014 ++ * the current target state, because that will allow it to
7015 ++ * signal PME when the hierarchy above it goes into D3cold and
7016 ++ * the device itself ends up in D3cold as a result of that.
7017 ++ */
7018 ++ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
7019 + pci_pme_active(dev, true);
7020 + else
7021 + ret = 1;
7022 +@@ -2599,16 +2606,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
7023 + if (dev->current_state == PCI_D3cold)
7024 + target_state = PCI_D3cold;
7025 +
7026 +- if (wakeup) {
7027 ++ if (wakeup && dev->pme_support) {
7028 ++ pci_power_t state = target_state;
7029 ++
7030 + /*
7031 + * Find the deepest state from which the device can generate
7032 + * PME#.
7033 + */
7034 +- if (dev->pme_support) {
7035 +- while (target_state
7036 +- && !(dev->pme_support & (1 << target_state)))
7037 +- target_state--;
7038 +- }
7039 ++ while (state && !(dev->pme_support & (1 << state)))
7040 ++ state--;
7041 ++
7042 ++ if (state)
7043 ++ return state;
7044 ++ else if (dev->pme_support & 1)
7045 ++ return PCI_D0;
7046 + }
7047 +
7048 + return target_state;
7049 +diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
7050 +index 2ba2d8d6b8e63..d1bcc52e67c35 100644
7051 +--- a/drivers/power/supply/axp288_fuel_gauge.c
7052 ++++ b/drivers/power/supply/axp288_fuel_gauge.c
7053 +@@ -147,7 +147,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
7054 + }
7055 +
7056 + if (ret < 0) {
7057 +- dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
7058 ++ dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
7059 + return ret;
7060 + }
7061 +
7062 +@@ -161,7 +161,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
7063 + ret = regmap_write(info->regmap, reg, (unsigned int)val);
7064 +
7065 + if (ret < 0)
7066 +- dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
7067 ++ dev_err(&info->pdev->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
7068 +
7069 + return ret;
7070 + }
7071 +diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
7072 +index d110597746b0a..091868e9e9e82 100644
7073 +--- a/drivers/power/supply/cw2015_battery.c
7074 ++++ b/drivers/power/supply/cw2015_battery.c
7075 +@@ -679,7 +679,9 @@ static int cw_bat_probe(struct i2c_client *client)
7076 + &cw2015_bat_desc,
7077 + &psy_cfg);
7078 + if (IS_ERR(cw_bat->rk_bat)) {
7079 +- dev_err(cw_bat->dev, "Failed to register power supply\n");
7080 ++ /* try again if this happens */
7081 ++ dev_err_probe(&client->dev, PTR_ERR(cw_bat->rk_bat),
7082 ++ "Failed to register power supply\n");
7083 + return PTR_ERR(cw_bat->rk_bat);
7084 + }
7085 +
7086 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
7087 +index ce2041b30a066..215e77d3b6d93 100644
7088 +--- a/drivers/power/supply/max17042_battery.c
7089 ++++ b/drivers/power/supply/max17042_battery.c
7090 +@@ -748,7 +748,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
7091 + struct max17042_config_data *config = chip->pdata->config_data;
7092 +
7093 + max17042_override_por(map, MAX17042_TGAIN, config->tgain);
7094 +- max17042_override_por(map, MAx17042_TOFF, config->toff);
7095 ++ max17042_override_por(map, MAX17042_TOFF, config->toff);
7096 + max17042_override_por(map, MAX17042_CGAIN, config->cgain);
7097 + max17042_override_por(map, MAX17042_COFF, config->coff);
7098 +
7099 +diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
7100 +index df240420f2de0..9d8c2fadd4d03 100644
7101 +--- a/drivers/power/supply/smb347-charger.c
7102 ++++ b/drivers/power/supply/smb347-charger.c
7103 +@@ -55,6 +55,7 @@
7104 + #define CFG_PIN_EN_CTRL_ACTIVE_LOW 0x60
7105 + #define CFG_PIN_EN_APSD_IRQ BIT(1)
7106 + #define CFG_PIN_EN_CHARGER_ERROR BIT(2)
7107 ++#define CFG_PIN_EN_CTRL BIT(4)
7108 + #define CFG_THERM 0x07
7109 + #define CFG_THERM_SOFT_HOT_COMPENSATION_MASK 0x03
7110 + #define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT 0
7111 +@@ -724,6 +725,15 @@ static int smb347_hw_init(struct smb347_charger *smb)
7112 + if (ret < 0)
7113 + goto fail;
7114 +
7115 ++ /* Activate pin control, making it writable. */
7116 ++ switch (smb->enable_control) {
7117 ++ case SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW:
7118 ++ case SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH:
7119 ++ ret = regmap_set_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL);
7120 ++ if (ret < 0)
7121 ++ goto fail;
7122 ++ }
7123 ++
7124 + /*
7125 + * Make the charging functionality controllable by a write to the
7126 + * command register unless pin control is specified in the platform
7127 +diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
7128 +index 1d5b0a1b86f78..06cbe60c990f9 100644
7129 +--- a/drivers/regulator/tps65910-regulator.c
7130 ++++ b/drivers/regulator/tps65910-regulator.c
7131 +@@ -1211,12 +1211,10 @@ static int tps65910_probe(struct platform_device *pdev)
7132 +
7133 + rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
7134 + &config);
7135 +- if (IS_ERR(rdev)) {
7136 +- dev_err(tps65910->dev,
7137 +- "failed to register %s regulator\n",
7138 +- pdev->name);
7139 +- return PTR_ERR(rdev);
7140 +- }
7141 ++ if (IS_ERR(rdev))
7142 ++ return dev_err_probe(tps65910->dev, PTR_ERR(rdev),
7143 ++ "failed to register %s regulator\n",
7144 ++ pdev->name);
7145 +
7146 + /* Save regulator for cleanup */
7147 + pmic->rdev[i] = rdev;
7148 +diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
7149 +index cbadb1c996790..d2a37978fc3a8 100644
7150 +--- a/drivers/regulator/vctrl-regulator.c
7151 ++++ b/drivers/regulator/vctrl-regulator.c
7152 +@@ -37,7 +37,6 @@ struct vctrl_voltage_table {
7153 + struct vctrl_data {
7154 + struct regulator_dev *rdev;
7155 + struct regulator_desc desc;
7156 +- struct regulator *ctrl_reg;
7157 + bool enabled;
7158 + unsigned int min_slew_down_rate;
7159 + unsigned int ovp_threshold;
7160 +@@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
7161 + static int vctrl_get_voltage(struct regulator_dev *rdev)
7162 + {
7163 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7164 +- int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
7165 ++ int ctrl_uV;
7166 ++
7167 ++ if (!rdev->supply)
7168 ++ return -EPROBE_DEFER;
7169 ++
7170 ++ ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
7171 +
7172 + return vctrl_calc_output_voltage(vctrl, ctrl_uV);
7173 + }
7174 +@@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
7175 + unsigned int *selector)
7176 + {
7177 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7178 +- struct regulator *ctrl_reg = vctrl->ctrl_reg;
7179 +- int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
7180 +- int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
7181 ++ int orig_ctrl_uV;
7182 ++ int uV;
7183 + int ret;
7184 +
7185 ++ if (!rdev->supply)
7186 ++ return -EPROBE_DEFER;
7187 ++
7188 ++ orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
7189 ++ uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
7190 ++
7191 + if (req_min_uV >= uV || !vctrl->ovp_threshold)
7192 + /* voltage rising or no OVP */
7193 +- return regulator_set_voltage_rdev(ctrl_reg->rdev,
7194 ++ return regulator_set_voltage_rdev(rdev->supply->rdev,
7195 + vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
7196 + vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
7197 + PM_SUSPEND_ON);
7198 +@@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
7199 + next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
7200 + next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
7201 +
7202 +- ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
7203 ++ ret = regulator_set_voltage_rdev(rdev->supply->rdev,
7204 + next_ctrl_uV,
7205 + next_ctrl_uV,
7206 + PM_SUSPEND_ON);
7207 +@@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
7208 +
7209 + err:
7210 + /* Try to go back to original voltage */
7211 +- regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
7212 ++ regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
7213 + PM_SUSPEND_ON);
7214 +
7215 + return ret;
7216 +@@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
7217 + unsigned int selector)
7218 + {
7219 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7220 +- struct regulator *ctrl_reg = vctrl->ctrl_reg;
7221 + unsigned int orig_sel = vctrl->sel;
7222 + int ret;
7223 +
7224 ++ if (!rdev->supply)
7225 ++ return -EPROBE_DEFER;
7226 ++
7227 + if (selector >= rdev->desc->n_voltages)
7228 + return -EINVAL;
7229 +
7230 + if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
7231 + /* voltage rising or no OVP */
7232 +- ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
7233 ++ ret = regulator_set_voltage_rdev(rdev->supply->rdev,
7234 + vctrl->vtable[selector].ctrl,
7235 + vctrl->vtable[selector].ctrl,
7236 + PM_SUSPEND_ON);
7237 +@@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
7238 + else
7239 + next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
7240 +
7241 +- ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
7242 ++ ret = regulator_set_voltage_rdev(rdev->supply->rdev,
7243 + vctrl->vtable[next_sel].ctrl,
7244 + vctrl->vtable[next_sel].ctrl,
7245 + PM_SUSPEND_ON);
7246 +@@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
7247 + err:
7248 + if (vctrl->sel != orig_sel) {
7249 + /* Try to go back to original voltage */
7250 +- if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
7251 ++ if (!regulator_set_voltage_rdev(rdev->supply->rdev,
7252 + vctrl->vtable[orig_sel].ctrl,
7253 + vctrl->vtable[orig_sel].ctrl,
7254 + PM_SUSPEND_ON))
7255 +@@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
7256 + u32 pval;
7257 + u32 vrange_ctrl[2];
7258 +
7259 +- vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
7260 +- if (IS_ERR(vctrl->ctrl_reg))
7261 +- return PTR_ERR(vctrl->ctrl_reg);
7262 +-
7263 + ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
7264 + if (!ret) {
7265 + vctrl->ovp_threshold = pval;
7266 +@@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
7267 + return at->ctrl - bt->ctrl;
7268 + }
7269 +
7270 +-static int vctrl_init_vtable(struct platform_device *pdev)
7271 ++static int vctrl_init_vtable(struct platform_device *pdev,
7272 ++ struct regulator *ctrl_reg)
7273 + {
7274 + struct vctrl_data *vctrl = platform_get_drvdata(pdev);
7275 + struct regulator_desc *rdesc = &vctrl->desc;
7276 +- struct regulator *ctrl_reg = vctrl->ctrl_reg;
7277 + struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
7278 + int n_voltages;
7279 + int ctrl_uV;
7280 +@@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
7281 + static int vctrl_enable(struct regulator_dev *rdev)
7282 + {
7283 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7284 +- int ret = regulator_enable(vctrl->ctrl_reg);
7285 +
7286 +- if (!ret)
7287 +- vctrl->enabled = true;
7288 ++ vctrl->enabled = true;
7289 +
7290 +- return ret;
7291 ++ return 0;
7292 + }
7293 +
7294 + static int vctrl_disable(struct regulator_dev *rdev)
7295 + {
7296 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
7297 +- int ret = regulator_disable(vctrl->ctrl_reg);
7298 +
7299 +- if (!ret)
7300 +- vctrl->enabled = false;
7301 ++ vctrl->enabled = false;
7302 +
7303 +- return ret;
7304 ++ return 0;
7305 + }
7306 +
7307 + static int vctrl_is_enabled(struct regulator_dev *rdev)
7308 +@@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
7309 + struct regulator_desc *rdesc;
7310 + struct regulator_config cfg = { };
7311 + struct vctrl_voltage_range *vrange_ctrl;
7312 ++ struct regulator *ctrl_reg;
7313 + int ctrl_uV;
7314 + int ret;
7315 +
7316 +@@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
7317 + if (ret)
7318 + return ret;
7319 +
7320 ++ ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
7321 ++ if (IS_ERR(ctrl_reg))
7322 ++ return PTR_ERR(ctrl_reg);
7323 ++
7324 + vrange_ctrl = &vctrl->vrange.ctrl;
7325 +
7326 + rdesc = &vctrl->desc;
7327 + rdesc->name = "vctrl";
7328 + rdesc->type = REGULATOR_VOLTAGE;
7329 + rdesc->owner = THIS_MODULE;
7330 ++ rdesc->supply_name = "ctrl";
7331 +
7332 +- if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
7333 +- (regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
7334 ++ if ((regulator_get_linear_step(ctrl_reg) == 1) ||
7335 ++ (regulator_count_voltages(ctrl_reg) == -EINVAL)) {
7336 + rdesc->continuous_voltage_range = true;
7337 + rdesc->ops = &vctrl_ops_cont;
7338 + } else {
7339 +@@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
7340 + cfg.init_data = init_data;
7341 +
7342 + if (!rdesc->continuous_voltage_range) {
7343 +- ret = vctrl_init_vtable(pdev);
7344 ++ ret = vctrl_init_vtable(pdev, ctrl_reg);
7345 + if (ret)
7346 + return ret;
7347 +
7348 +- ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
7349 ++ /* Use locked consumer API when not in regulator framework */
7350 ++ ctrl_uV = regulator_get_voltage(ctrl_reg);
7351 + if (ctrl_uV < 0) {
7352 + dev_err(&pdev->dev, "failed to get control voltage\n");
7353 + return ctrl_uV;
7354 +@@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
7355 + }
7356 + }
7357 +
7358 ++ /* Drop ctrl-supply here in favor of regulator core managed supply */
7359 ++ devm_regulator_put(ctrl_reg);
7360 ++
7361 + vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
7362 + if (IS_ERR(vctrl->rdev)) {
7363 + ret = PTR_ERR(vctrl->rdev);
7364 +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
7365 +index a974943c27dac..9fcdb8d81eee6 100644
7366 +--- a/drivers/s390/cio/css.c
7367 ++++ b/drivers/s390/cio/css.c
7368 +@@ -430,9 +430,26 @@ static ssize_t pimpampom_show(struct device *dev,
7369 + }
7370 + static DEVICE_ATTR_RO(pimpampom);
7371 +
7372 ++static ssize_t dev_busid_show(struct device *dev,
7373 ++ struct device_attribute *attr,
7374 ++ char *buf)
7375 ++{
7376 ++ struct subchannel *sch = to_subchannel(dev);
7377 ++ struct pmcw *pmcw = &sch->schib.pmcw;
7378 ++
7379 ++ if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
7380 ++ pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
7381 ++ return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
7382 ++ pmcw->dev);
7383 ++ else
7384 ++ return sysfs_emit(buf, "none\n");
7385 ++}
7386 ++static DEVICE_ATTR_RO(dev_busid);
7387 ++
7388 + static struct attribute *io_subchannel_type_attrs[] = {
7389 + &dev_attr_chpids.attr,
7390 + &dev_attr_pimpampom.attr,
7391 ++ &dev_attr_dev_busid.attr,
7392 + NULL,
7393 + };
7394 + ATTRIBUTE_GROUPS(io_subchannel_type);
7395 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
7396 +index 8d3a1d84a7575..9c4f3c3889345 100644
7397 +--- a/drivers/s390/crypto/ap_bus.c
7398 ++++ b/drivers/s390/crypto/ap_bus.c
7399 +@@ -127,22 +127,13 @@ static struct bus_type ap_bus_type;
7400 + /* Adapter interrupt definitions */
7401 + static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
7402 +
7403 +-static int ap_airq_flag;
7404 ++static bool ap_irq_flag;
7405 +
7406 + static struct airq_struct ap_airq = {
7407 + .handler = ap_interrupt_handler,
7408 + .isc = AP_ISC,
7409 + };
7410 +
7411 +-/**
7412 +- * ap_using_interrupts() - Returns non-zero if interrupt support is
7413 +- * available.
7414 +- */
7415 +-static inline int ap_using_interrupts(void)
7416 +-{
7417 +- return ap_airq_flag;
7418 +-}
7419 +-
7420 + /**
7421 + * ap_airq_ptr() - Get the address of the adapter interrupt indicator
7422 + *
7423 +@@ -152,7 +143,7 @@ static inline int ap_using_interrupts(void)
7424 + */
7425 + void *ap_airq_ptr(void)
7426 + {
7427 +- if (ap_using_interrupts())
7428 ++ if (ap_irq_flag)
7429 + return ap_airq.lsi_ptr;
7430 + return NULL;
7431 + }
7432 +@@ -396,7 +387,7 @@ void ap_wait(enum ap_sm_wait wait)
7433 + switch (wait) {
7434 + case AP_SM_WAIT_AGAIN:
7435 + case AP_SM_WAIT_INTERRUPT:
7436 +- if (ap_using_interrupts())
7437 ++ if (ap_irq_flag)
7438 + break;
7439 + if (ap_poll_kthread) {
7440 + wake_up(&ap_poll_wait);
7441 +@@ -471,7 +462,7 @@ static void ap_tasklet_fn(unsigned long dummy)
7442 + * be received. Doing it in the beginning of the tasklet is therefor
7443 + * important that no requests on any AP get lost.
7444 + */
7445 +- if (ap_using_interrupts())
7446 ++ if (ap_irq_flag)
7447 + xchg(ap_airq.lsi_ptr, 0);
7448 +
7449 + spin_lock_bh(&ap_queues_lock);
7450 +@@ -541,7 +532,7 @@ static int ap_poll_thread_start(void)
7451 + {
7452 + int rc;
7453 +
7454 +- if (ap_using_interrupts() || ap_poll_kthread)
7455 ++ if (ap_irq_flag || ap_poll_kthread)
7456 + return 0;
7457 + mutex_lock(&ap_poll_thread_mutex);
7458 + ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
7459 +@@ -1187,7 +1178,7 @@ static BUS_ATTR_RO(ap_adapter_mask);
7460 + static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
7461 + {
7462 + return scnprintf(buf, PAGE_SIZE, "%d\n",
7463 +- ap_using_interrupts() ? 1 : 0);
7464 ++ ap_irq_flag ? 1 : 0);
7465 + }
7466 +
7467 + static BUS_ATTR_RO(ap_interrupts);
7468 +@@ -1912,7 +1903,7 @@ static int __init ap_module_init(void)
7469 + /* enable interrupts if available */
7470 + if (ap_interrupts_available()) {
7471 + rc = register_adapter_interrupt(&ap_airq);
7472 +- ap_airq_flag = (rc == 0);
7473 ++ ap_irq_flag = (rc == 0);
7474 + }
7475 +
7476 + /* Create /sys/bus/ap. */
7477 +@@ -1956,7 +1947,7 @@ out_work:
7478 + out_bus:
7479 + bus_unregister(&ap_bus_type);
7480 + out:
7481 +- if (ap_using_interrupts())
7482 ++ if (ap_irq_flag)
7483 + unregister_adapter_interrupt(&ap_airq);
7484 + kfree(ap_qci_info);
7485 + return rc;
7486 +diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
7487 +index 8f18abdbbc2ba..6dd5e8f0380ce 100644
7488 +--- a/drivers/s390/crypto/ap_bus.h
7489 ++++ b/drivers/s390/crypto/ap_bus.h
7490 +@@ -80,12 +80,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
7491 + #define AP_FUNC_EP11 5
7492 + #define AP_FUNC_APXA 6
7493 +
7494 +-/*
7495 +- * AP interrupt states
7496 +- */
7497 +-#define AP_INTR_DISABLED 0 /* AP interrupt disabled */
7498 +-#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
7499 +-
7500 + /*
7501 + * AP queue state machine states
7502 + */
7503 +@@ -112,7 +106,7 @@ enum ap_sm_event {
7504 + * AP queue state wait behaviour
7505 + */
7506 + enum ap_sm_wait {
7507 +- AP_SM_WAIT_AGAIN, /* retry immediately */
7508 ++ AP_SM_WAIT_AGAIN = 0, /* retry immediately */
7509 + AP_SM_WAIT_TIMEOUT, /* wait for timeout */
7510 + AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
7511 + AP_SM_WAIT_NONE, /* no wait */
7512 +@@ -186,7 +180,7 @@ struct ap_queue {
7513 + enum ap_dev_state dev_state; /* queue device state */
7514 + bool config; /* configured state */
7515 + ap_qid_t qid; /* AP queue id. */
7516 +- int interrupt; /* indicate if interrupts are enabled */
7517 ++ bool interrupt; /* indicate if interrupts are enabled */
7518 + int queue_count; /* # messages currently on AP queue. */
7519 + int pendingq_count; /* # requests on pendingq list. */
7520 + int requestq_count; /* # requests on requestq list. */
7521 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
7522 +index 669f96fddad61..d70c4d3d0907f 100644
7523 +--- a/drivers/s390/crypto/ap_queue.c
7524 ++++ b/drivers/s390/crypto/ap_queue.c
7525 +@@ -19,7 +19,7 @@
7526 + static void __ap_flush_queue(struct ap_queue *aq);
7527 +
7528 + /**
7529 +- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
7530 ++ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
7531 + * @qid: The AP queue number
7532 + * @ind: the notification indicator byte
7533 + *
7534 +@@ -27,7 +27,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
7535 + * value it waits a while and tests the AP queue if interrupts
7536 + * have been switched on using ap_test_queue().
7537 + */
7538 +-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
7539 ++static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
7540 + {
7541 + struct ap_queue_status status;
7542 + struct ap_qirq_ctrl qirqctrl = { 0 };
7543 +@@ -218,7 +218,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
7544 + return AP_SM_WAIT_NONE;
7545 + case AP_RESPONSE_NO_PENDING_REPLY:
7546 + if (aq->queue_count > 0)
7547 +- return AP_SM_WAIT_INTERRUPT;
7548 ++ return aq->interrupt ?
7549 ++ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
7550 + aq->sm_state = AP_SM_STATE_IDLE;
7551 + return AP_SM_WAIT_NONE;
7552 + default:
7553 +@@ -272,7 +273,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
7554 + fallthrough;
7555 + case AP_RESPONSE_Q_FULL:
7556 + aq->sm_state = AP_SM_STATE_QUEUE_FULL;
7557 +- return AP_SM_WAIT_INTERRUPT;
7558 ++ return aq->interrupt ?
7559 ++ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
7560 + case AP_RESPONSE_RESET_IN_PROGRESS:
7561 + aq->sm_state = AP_SM_STATE_RESET_WAIT;
7562 + return AP_SM_WAIT_TIMEOUT;
7563 +@@ -322,7 +324,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
7564 + case AP_RESPONSE_NORMAL:
7565 + case AP_RESPONSE_RESET_IN_PROGRESS:
7566 + aq->sm_state = AP_SM_STATE_RESET_WAIT;
7567 +- aq->interrupt = AP_INTR_DISABLED;
7568 ++ aq->interrupt = false;
7569 + return AP_SM_WAIT_TIMEOUT;
7570 + default:
7571 + aq->dev_state = AP_DEV_STATE_ERROR;
7572 +@@ -355,7 +357,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
7573 + switch (status.response_code) {
7574 + case AP_RESPONSE_NORMAL:
7575 + lsi_ptr = ap_airq_ptr();
7576 +- if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
7577 ++ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
7578 + aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
7579 + else
7580 + aq->sm_state = (aq->queue_count > 0) ?
7581 +@@ -396,7 +398,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
7582 +
7583 + if (status.irq_enabled == 1) {
7584 + /* Irqs are now enabled */
7585 +- aq->interrupt = AP_INTR_ENABLED;
7586 ++ aq->interrupt = true;
7587 + aq->sm_state = (aq->queue_count > 0) ?
7588 + AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
7589 + }
7590 +@@ -586,7 +588,7 @@ static ssize_t interrupt_show(struct device *dev,
7591 + spin_lock_bh(&aq->lock);
7592 + if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
7593 + rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
7594 +- else if (aq->interrupt == AP_INTR_ENABLED)
7595 ++ else if (aq->interrupt)
7596 + rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
7597 + else
7598 + rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
7599 +@@ -767,7 +769,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
7600 + aq->ap_dev.device.type = &ap_queue_type;
7601 + aq->ap_dev.device_type = device_type;
7602 + aq->qid = qid;
7603 +- aq->interrupt = AP_INTR_DISABLED;
7604 ++ aq->interrupt = false;
7605 + spin_lock_init(&aq->lock);
7606 + INIT_LIST_HEAD(&aq->pendingq);
7607 + INIT_LIST_HEAD(&aq->requestq);
7608 +diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
7609 +index bc34bedf9db8b..6a3c2b4609652 100644
7610 +--- a/drivers/s390/crypto/zcrypt_ccamisc.c
7611 ++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
7612 +@@ -1724,10 +1724,10 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
7613 + rlen = vlen = PAGE_SIZE/2;
7614 + rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
7615 + rarray, &rlen, varray, &vlen);
7616 +- if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
7617 +- ci->new_apka_mk_state = (char) rarray[7*8];
7618 +- ci->cur_apka_mk_state = (char) rarray[8*8];
7619 +- ci->old_apka_mk_state = (char) rarray[9*8];
7620 ++ if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
7621 ++ ci->new_apka_mk_state = (char) rarray[10*8];
7622 ++ ci->cur_apka_mk_state = (char) rarray[11*8];
7623 ++ ci->old_apka_mk_state = (char) rarray[12*8];
7624 + if (ci->old_apka_mk_state == '2')
7625 + memcpy(&ci->old_apka_mkvp, varray + 208, 8);
7626 + if (ci->cur_apka_mk_state == '2')
7627 +diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
7628 +index 579dfc8dc8fc9..9dee485807c94 100644
7629 +--- a/drivers/soc/mediatek/mt8183-mmsys.h
7630 ++++ b/drivers/soc/mediatek/mt8183-mmsys.h
7631 +@@ -28,25 +28,32 @@
7632 + static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
7633 + {
7634 + DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
7635 +- MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L
7636 ++ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
7637 ++ MT8183_OVL0_MOUT_EN_OVL0_2L
7638 + }, {
7639 + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
7640 +- MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
7641 ++ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
7642 ++ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
7643 + }, {
7644 + DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
7645 +- MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1
7646 ++ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
7647 ++ MT8183_OVL1_2L_MOUT_EN_RDMA1
7648 + }, {
7649 + DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
7650 +- MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0
7651 ++ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
7652 ++ MT8183_DITHER0_MOUT_IN_DSI0
7653 + }, {
7654 + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
7655 +- MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L
7656 ++ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
7657 ++ MT8183_DISP_PATH0_SEL_IN_OVL0_2L
7658 + }, {
7659 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
7660 +- MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1
7661 ++ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
7662 ++ MT8183_DPI0_SEL_IN_RDMA1
7663 + }, {
7664 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
7665 +- MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0
7666 ++ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
7667 ++ MT8183_RDMA0_SOUT_COLOR0
7668 + }
7669 + };
7670 +
7671 +diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
7672 +index 080660ef11bfa..0f949896fd064 100644
7673 +--- a/drivers/soc/mediatek/mtk-mmsys.c
7674 ++++ b/drivers/soc/mediatek/mtk-mmsys.c
7675 +@@ -68,7 +68,9 @@ void mtk_mmsys_ddp_connect(struct device *dev,
7676 +
7677 + for (i = 0; i < mmsys->data->num_routes; i++)
7678 + if (cur == routes[i].from_comp && next == routes[i].to_comp) {
7679 +- reg = readl_relaxed(mmsys->regs + routes[i].addr) | routes[i].val;
7680 ++ reg = readl_relaxed(mmsys->regs + routes[i].addr);
7681 ++ reg &= ~routes[i].mask;
7682 ++ reg |= routes[i].val;
7683 + writel_relaxed(reg, mmsys->regs + routes[i].addr);
7684 + }
7685 + }
7686 +@@ -85,7 +87,8 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
7687 +
7688 + for (i = 0; i < mmsys->data->num_routes; i++)
7689 + if (cur == routes[i].from_comp && next == routes[i].to_comp) {
7690 +- reg = readl_relaxed(mmsys->regs + routes[i].addr) & ~routes[i].val;
7691 ++ reg = readl_relaxed(mmsys->regs + routes[i].addr);
7692 ++ reg &= ~routes[i].mask;
7693 + writel_relaxed(reg, mmsys->regs + routes[i].addr);
7694 + }
7695 + }
7696 +diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
7697 +index a760a34e6eca8..5f3e2bf0c40bc 100644
7698 +--- a/drivers/soc/mediatek/mtk-mmsys.h
7699 ++++ b/drivers/soc/mediatek/mtk-mmsys.h
7700 +@@ -35,41 +35,54 @@
7701 + #define RDMA0_SOUT_DSI1 0x1
7702 + #define RDMA0_SOUT_DSI2 0x4
7703 + #define RDMA0_SOUT_DSI3 0x5
7704 ++#define RDMA0_SOUT_MASK 0x7
7705 + #define RDMA1_SOUT_DPI0 0x2
7706 + #define RDMA1_SOUT_DPI1 0x3
7707 + #define RDMA1_SOUT_DSI1 0x1
7708 + #define RDMA1_SOUT_DSI2 0x4
7709 + #define RDMA1_SOUT_DSI3 0x5
7710 ++#define RDMA1_SOUT_MASK 0x7
7711 + #define RDMA2_SOUT_DPI0 0x2
7712 + #define RDMA2_SOUT_DPI1 0x3
7713 + #define RDMA2_SOUT_DSI1 0x1
7714 + #define RDMA2_SOUT_DSI2 0x4
7715 + #define RDMA2_SOUT_DSI3 0x5
7716 ++#define RDMA2_SOUT_MASK 0x7
7717 + #define DPI0_SEL_IN_RDMA1 0x1
7718 + #define DPI0_SEL_IN_RDMA2 0x3
7719 ++#define DPI0_SEL_IN_MASK 0x3
7720 + #define DPI1_SEL_IN_RDMA1 (0x1 << 8)
7721 + #define DPI1_SEL_IN_RDMA2 (0x3 << 8)
7722 ++#define DPI1_SEL_IN_MASK (0x3 << 8)
7723 + #define DSI0_SEL_IN_RDMA1 0x1
7724 + #define DSI0_SEL_IN_RDMA2 0x4
7725 ++#define DSI0_SEL_IN_MASK 0x7
7726 + #define DSI1_SEL_IN_RDMA1 0x1
7727 + #define DSI1_SEL_IN_RDMA2 0x4
7728 ++#define DSI1_SEL_IN_MASK 0x7
7729 + #define DSI2_SEL_IN_RDMA1 (0x1 << 16)
7730 + #define DSI2_SEL_IN_RDMA2 (0x4 << 16)
7731 ++#define DSI2_SEL_IN_MASK (0x7 << 16)
7732 + #define DSI3_SEL_IN_RDMA1 (0x1 << 16)
7733 + #define DSI3_SEL_IN_RDMA2 (0x4 << 16)
7734 ++#define DSI3_SEL_IN_MASK (0x7 << 16)
7735 + #define COLOR1_SEL_IN_OVL1 0x1
7736 +
7737 + #define OVL_MOUT_EN_RDMA 0x1
7738 + #define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
7739 + #define BLS_TO_DPI_RDMA1_TO_DSI 0x2
7740 ++#define BLS_RDMA1_DSI_DPI_MASK 0xf
7741 + #define DSI_SEL_IN_BLS 0x0
7742 + #define DPI_SEL_IN_BLS 0x0
7743 ++#define DPI_SEL_IN_MASK 0x1
7744 + #define DSI_SEL_IN_RDMA 0x1
7745 ++#define DSI_SEL_IN_MASK 0x1
7746 +
7747 + struct mtk_mmsys_routes {
7748 + u32 from_comp;
7749 + u32 to_comp;
7750 + u32 addr;
7751 ++ u32 mask;
7752 + u32 val;
7753 + };
7754 +
7755 +@@ -91,124 +104,164 @@ struct mtk_mmsys_driver_data {
7756 + static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
7757 + {
7758 + DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
7759 +- DISP_REG_CONFIG_OUT_SEL, BLS_TO_DSI_RDMA1_TO_DPI1
7760 ++ DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
7761 ++ BLS_TO_DSI_RDMA1_TO_DPI1
7762 + }, {
7763 + DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
7764 +- DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_BLS
7765 ++ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
7766 ++ DSI_SEL_IN_BLS
7767 + }, {
7768 + DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
7769 +- DISP_REG_CONFIG_OUT_SEL, BLS_TO_DPI_RDMA1_TO_DSI
7770 ++ DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
7771 ++ BLS_TO_DPI_RDMA1_TO_DSI
7772 + }, {
7773 + DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
7774 +- DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_RDMA
7775 ++ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
7776 ++ DSI_SEL_IN_RDMA
7777 + }, {
7778 + DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
7779 +- DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_BLS
7780 ++ DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_MASK,
7781 ++ DPI_SEL_IN_BLS
7782 + }, {
7783 + DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
7784 +- DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1
7785 ++ DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1,
7786 ++ GAMMA_MOUT_EN_RDMA1
7787 + }, {
7788 + DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
7789 +- DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0
7790 ++ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0,
7791 ++ OD_MOUT_EN_RDMA0
7792 + }, {
7793 + DDP_COMPONENT_OD1, DDP_COMPONENT_RDMA1,
7794 +- DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1
7795 ++ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1,
7796 ++ OD1_MOUT_EN_RDMA1
7797 + }, {
7798 + DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
7799 +- DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0
7800 ++ DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
7801 ++ OVL0_MOUT_EN_COLOR0
7802 + }, {
7803 + DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
7804 +- DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
7805 ++ DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
7806 ++ COLOR0_SEL_IN_OVL0
7807 + }, {
7808 + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
7809 +- DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA
7810 ++ DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA,
7811 ++ OVL_MOUT_EN_RDMA
7812 + }, {
7813 + DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
7814 +- DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1
7815 ++ DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1,
7816 ++ OVL1_MOUT_EN_COLOR1
7817 + }, {
7818 + DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
7819 +- DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1
7820 ++ DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
7821 ++ COLOR1_SEL_IN_OVL1
7822 + }, {
7823 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI0,
7824 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI0
7825 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
7826 ++ RDMA0_SOUT_DPI0
7827 + }, {
7828 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI1,
7829 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI1
7830 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
7831 ++ RDMA0_SOUT_DPI1
7832 + }, {
7833 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI1,
7834 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI1
7835 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
7836 ++ RDMA0_SOUT_DSI1
7837 + }, {
7838 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI2,
7839 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI2
7840 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
7841 ++ RDMA0_SOUT_DSI2
7842 + }, {
7843 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI3,
7844 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI3
7845 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
7846 ++ RDMA0_SOUT_DSI3
7847 + }, {
7848 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
7849 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI0
7850 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
7851 ++ RDMA1_SOUT_DPI0
7852 + }, {
7853 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
7854 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA1
7855 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
7856 ++ DPI0_SEL_IN_RDMA1
7857 + }, {
7858 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
7859 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI1
7860 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
7861 ++ RDMA1_SOUT_DPI1
7862 + }, {
7863 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
7864 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA1
7865 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
7866 ++ DPI1_SEL_IN_RDMA1
7867 + }, {
7868 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI0,
7869 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA1
7870 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
7871 ++ DSI0_SEL_IN_RDMA1
7872 + }, {
7873 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
7874 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI1
7875 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
7876 ++ RDMA1_SOUT_DSI1
7877 + }, {
7878 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
7879 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA1
7880 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
7881 ++ DSI1_SEL_IN_RDMA1
7882 + }, {
7883 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
7884 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI2
7885 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
7886 ++ RDMA1_SOUT_DSI2
7887 + }, {
7888 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
7889 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA1
7890 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
7891 ++ DSI2_SEL_IN_RDMA1
7892 + }, {
7893 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
7894 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI3
7895 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
7896 ++ RDMA1_SOUT_DSI3
7897 + }, {
7898 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
7899 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA1
7900 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
7901 ++ DSI3_SEL_IN_RDMA1
7902 + }, {
7903 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
7904 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI0
7905 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
7906 ++ RDMA2_SOUT_DPI0
7907 + }, {
7908 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
7909 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA2
7910 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
7911 ++ DPI0_SEL_IN_RDMA2
7912 + }, {
7913 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
7914 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI1
7915 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
7916 ++ RDMA2_SOUT_DPI1
7917 + }, {
7918 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
7919 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA2
7920 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
7921 ++ DPI1_SEL_IN_RDMA2
7922 + }, {
7923 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI0,
7924 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA2
7925 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
7926 ++ DSI0_SEL_IN_RDMA2
7927 + }, {
7928 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
7929 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI1
7930 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
7931 ++ RDMA2_SOUT_DSI1
7932 + }, {
7933 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
7934 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA2
7935 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
7936 ++ DSI1_SEL_IN_RDMA2
7937 + }, {
7938 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
7939 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI2
7940 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
7941 ++ RDMA2_SOUT_DSI2
7942 + }, {
7943 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
7944 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA2
7945 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
7946 ++ DSI2_SEL_IN_RDMA2
7947 + }, {
7948 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
7949 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI3
7950 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
7951 ++ RDMA2_SOUT_DSI3
7952 + }, {
7953 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
7954 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA2
7955 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
7956 ++ DSI3_SEL_IN_RDMA2
7957 + }
7958 + };
7959 +
7960 +diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
7961 +index 2daa17ba54a3f..fa209b479ab35 100644
7962 +--- a/drivers/soc/qcom/rpmhpd.c
7963 ++++ b/drivers/soc/qcom/rpmhpd.c
7964 +@@ -403,12 +403,11 @@ static int rpmhpd_power_on(struct generic_pm_domain *domain)
7965 + static int rpmhpd_power_off(struct generic_pm_domain *domain)
7966 + {
7967 + struct rpmhpd *pd = domain_to_rpmhpd(domain);
7968 +- int ret = 0;
7969 ++ int ret;
7970 +
7971 + mutex_lock(&rpmhpd_lock);
7972 +
7973 +- ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
7974 +-
7975 ++ ret = rpmhpd_aggregate_corner(pd, 0);
7976 + if (!ret)
7977 + pd->enabled = false;
7978 +
7979 +diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
7980 +index 1d3d5e3ec2b07..6e9a9cd28b178 100644
7981 +--- a/drivers/soc/qcom/smsm.c
7982 ++++ b/drivers/soc/qcom/smsm.c
7983 +@@ -109,7 +109,7 @@ struct smsm_entry {
7984 + DECLARE_BITMAP(irq_enabled, 32);
7985 + DECLARE_BITMAP(irq_rising, 32);
7986 + DECLARE_BITMAP(irq_falling, 32);
7987 +- u32 last_value;
7988 ++ unsigned long last_value;
7989 +
7990 + u32 *remote_state;
7991 + u32 *subscription;
7992 +@@ -204,8 +204,7 @@ static irqreturn_t smsm_intr(int irq, void *data)
7993 + u32 val;
7994 +
7995 + val = readl(entry->remote_state);
7996 +- changed = val ^ entry->last_value;
7997 +- entry->last_value = val;
7998 ++ changed = val ^ xchg(&entry->last_value, val);
7999 +
8000 + for_each_set_bit(i, entry->irq_enabled, 32) {
8001 + if (!(changed & BIT(i)))
8002 +@@ -264,6 +263,12 @@ static void smsm_unmask_irq(struct irq_data *irqd)
8003 + struct qcom_smsm *smsm = entry->smsm;
8004 + u32 val;
8005 +
8006 ++ /* Make sure our last cached state is up-to-date */
8007 ++ if (readl(entry->remote_state) & BIT(irq))
8008 ++ set_bit(irq, &entry->last_value);
8009 ++ else
8010 ++ clear_bit(irq, &entry->last_value);
8011 ++
8012 + set_bit(irq, entry->irq_enabled);
8013 +
8014 + if (entry->subscription) {
8015 +diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
8016 +index 2c13bf4dd5dbe..25eb2c1e31bb2 100644
8017 +--- a/drivers/soc/rockchip/Kconfig
8018 ++++ b/drivers/soc/rockchip/Kconfig
8019 +@@ -6,8 +6,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
8020 + #
8021 +
8022 + config ROCKCHIP_GRF
8023 +- bool
8024 +- default y
8025 ++ bool "Rockchip General Register Files support" if COMPILE_TEST
8026 ++ default y if ARCH_ROCKCHIP
8027 + help
8028 + The General Register Files are a central component providing
8029 + special additional settings registers for a lot of soc-components.
8030 +diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
8031 +index 8996115ce736a..263ce90473277 100644
8032 +--- a/drivers/spi/spi-coldfire-qspi.c
8033 ++++ b/drivers/spi/spi-coldfire-qspi.c
8034 +@@ -444,7 +444,7 @@ static int mcfqspi_remove(struct platform_device *pdev)
8035 + mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
8036 +
8037 + mcfqspi_cs_teardown(mcfqspi);
8038 +- clk_disable(mcfqspi->clk);
8039 ++ clk_disable_unprepare(mcfqspi->clk);
8040 +
8041 + return 0;
8042 + }
8043 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
8044 +index e114e6fe5ea5b..d112c2cac042b 100644
8045 +--- a/drivers/spi/spi-davinci.c
8046 ++++ b/drivers/spi/spi-davinci.c
8047 +@@ -213,12 +213,6 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
8048 + * line for the controller
8049 + */
8050 + if (spi->cs_gpiod) {
8051 +- /*
8052 +- * FIXME: is this code ever executed? This host does not
8053 +- * set SPI_MASTER_GPIO_SS so this chipselect callback should
8054 +- * not get called from the SPI core when we are using
8055 +- * GPIOs for chip select.
8056 +- */
8057 + if (value == BITBANG_CS_ACTIVE)
8058 + gpiod_set_value(spi->cs_gpiod, 1);
8059 + else
8060 +@@ -945,7 +939,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
8061 + master->bus_num = pdev->id;
8062 + master->num_chipselect = pdata->num_chipselect;
8063 + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
8064 +- master->flags = SPI_MASTER_MUST_RX;
8065 ++ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
8066 + master->setup = davinci_spi_setup;
8067 + master->cleanup = davinci_spi_cleanup;
8068 + master->can_dma = davinci_spi_can_dma;
8069 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
8070 +index fb45e6af66381..fd004c9db9dc0 100644
8071 +--- a/drivers/spi/spi-fsl-dspi.c
8072 ++++ b/drivers/spi/spi-fsl-dspi.c
8073 +@@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
8074 + goto err_rx_dma_buf;
8075 + }
8076 +
8077 ++ memset(&cfg, 0, sizeof(cfg));
8078 + cfg.src_addr = phy_addr + SPI_POPR;
8079 + cfg.dst_addr = phy_addr + SPI_PUSHR;
8080 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
8081 +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
8082 +index 104bde153efd2..5eb7b61bbb4d8 100644
8083 +--- a/drivers/spi/spi-pic32.c
8084 ++++ b/drivers/spi/spi-pic32.c
8085 +@@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
8086 + struct dma_slave_config cfg;
8087 + int ret;
8088 +
8089 ++ memset(&cfg, 0, sizeof(cfg));
8090 + cfg.device_fc = true;
8091 + cfg.src_addr = pic32s->dma_base + buf_offset;
8092 + cfg.dst_addr = pic32s->dma_base + buf_offset;
8093 +diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
8094 +index ab19068be8675..98ef17389952a 100644
8095 +--- a/drivers/spi/spi-sprd-adi.c
8096 ++++ b/drivers/spi/spi-sprd-adi.c
8097 +@@ -103,7 +103,7 @@
8098 + #define HWRST_STATUS_WATCHDOG 0xf0
8099 +
8100 + /* Use default timeout 50 ms that converts to watchdog values */
8101 +-#define WDG_LOAD_VAL ((50 * 1000) / 32768)
8102 ++#define WDG_LOAD_VAL ((50 * 32768) / 1000)
8103 + #define WDG_LOAD_MASK GENMASK(15, 0)
8104 + #define WDG_UNLOCK_KEY 0xe551
8105 +
8106 +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
8107 +index 9262c6418463b..cfa222c9bd5e7 100644
8108 +--- a/drivers/spi/spi-zynq-qspi.c
8109 ++++ b/drivers/spi/spi-zynq-qspi.c
8110 +@@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
8111 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
8112 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
8113 + ZYNQ_QSPI_IXR_RXTX_MASK);
8114 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
8115 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
8116 + msecs_to_jiffies(1000)))
8117 + err = -ETIMEDOUT;
8118 + }
8119 +@@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
8120 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
8121 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
8122 + ZYNQ_QSPI_IXR_RXTX_MASK);
8123 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
8124 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
8125 + msecs_to_jiffies(1000)))
8126 + err = -ETIMEDOUT;
8127 + }
8128 +@@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
8129 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
8130 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
8131 + ZYNQ_QSPI_IXR_RXTX_MASK);
8132 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
8133 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
8134 + msecs_to_jiffies(1000)))
8135 + err = -ETIMEDOUT;
8136 +
8137 +@@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
8138 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
8139 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
8140 + ZYNQ_QSPI_IXR_RXTX_MASK);
8141 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
8142 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
8143 + msecs_to_jiffies(1000)))
8144 + err = -ETIMEDOUT;
8145 + }
8146 +diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
8147 +index 69cf51445f082..2324b5d737886 100644
8148 +--- a/drivers/staging/clocking-wizard/Kconfig
8149 ++++ b/drivers/staging/clocking-wizard/Kconfig
8150 +@@ -5,6 +5,6 @@
8151 +
8152 + config COMMON_CLK_XLNX_CLKWZRD
8153 + tristate "Xilinx Clocking Wizard"
8154 +- depends on COMMON_CLK && OF && IOMEM
8155 ++ depends on COMMON_CLK && OF && HAS_IOMEM
8156 + help
8157 + Support for the Xilinx Clocking Wizard IP core clock generator.
8158 +diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
8159 +index 11196180a2066..34bf92de2f29b 100644
8160 +--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
8161 ++++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
8162 +@@ -1545,16 +1545,19 @@ static struct v4l2_ctrl_config mt9m114_controls[] = {
8163 + static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client)
8164 + {
8165 + struct i2c_adapter *adapter = client->adapter;
8166 +- u32 retvalue;
8167 ++ u32 model;
8168 ++ int ret;
8169 +
8170 + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
8171 + dev_err(&client->dev, "%s: i2c error", __func__);
8172 + return -ENODEV;
8173 + }
8174 +- mt9m114_read_reg(client, MISENSOR_16BIT, (u32)MT9M114_PID, &retvalue);
8175 +- dev->real_model_id = retvalue;
8176 ++ ret = mt9m114_read_reg(client, MISENSOR_16BIT, MT9M114_PID, &model);
8177 ++ if (ret)
8178 ++ return ret;
8179 ++ dev->real_model_id = model;
8180 +
8181 +- if (retvalue != MT9M114_MOD_ID) {
8182 ++ if (model != MT9M114_MOD_ID) {
8183 + dev_err(&client->dev, "%s: failed: client->addr = %x\n",
8184 + __func__, client->addr);
8185 + return -ENODEV;
8186 +diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
8187 +index 89709cd06d4d3..d321790b07d95 100644
8188 +--- a/drivers/staging/media/tegra-video/vi.c
8189 ++++ b/drivers/staging/media/tegra-video/vi.c
8190 +@@ -508,8 +508,8 @@ static int __tegra_channel_try_format(struct tegra_vi_channel *chan,
8191 + return -ENODEV;
8192 +
8193 + sd_state = v4l2_subdev_alloc_state(subdev);
8194 +- if (!sd_state)
8195 +- return -ENOMEM;
8196 ++ if (IS_ERR(sd_state))
8197 ++ return PTR_ERR(sd_state);
8198 + /*
8199 + * Retrieve the format information and if requested format isn't
8200 + * supported, keep the current format.
8201 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
8202 +index f0e5da77ed6d4..460e428b7592f 100644
8203 +--- a/drivers/tty/serial/fsl_lpuart.c
8204 ++++ b/drivers/tty/serial/fsl_lpuart.c
8205 +@@ -2611,7 +2611,7 @@ static int lpuart_probe(struct platform_device *pdev)
8206 + return PTR_ERR(sport->port.membase);
8207 +
8208 + sport->port.membase += sdata->reg_off;
8209 +- sport->port.mapbase = res->start;
8210 ++ sport->port.mapbase = res->start + sdata->reg_off;
8211 + sport->port.dev = &pdev->dev;
8212 + sport->port.type = PORT_LPUART;
8213 + sport->devtype = sdata->devtype;
8214 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
8215 +index 26debec26b4e1..79c6cc39e5dd6 100644
8216 +--- a/drivers/tty/tty_io.c
8217 ++++ b/drivers/tty/tty_io.c
8218 +@@ -2290,8 +2290,6 @@ static int tty_fasync(int fd, struct file *filp, int on)
8219 + * Locking:
8220 + * Called functions take tty_ldiscs_lock
8221 + * current->signal->tty check is safe without locks
8222 +- *
8223 +- * FIXME: may race normal receive processing
8224 + */
8225 +
8226 + static int tiocsti(struct tty_struct *tty, char __user *p)
8227 +@@ -2307,8 +2305,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
8228 + ld = tty_ldisc_ref_wait(tty);
8229 + if (!ld)
8230 + return -EIO;
8231 ++ tty_buffer_lock_exclusive(tty->port);
8232 + if (ld->ops->receive_buf)
8233 + ld->ops->receive_buf(tty, &ch, &mbz, 1);
8234 ++ tty_buffer_unlock_exclusive(tty->port);
8235 + tty_ldisc_deref(ld);
8236 + return 0;
8237 + }
8238 +diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
8239 +index ffe301d6ea359..d0f9b7c296b0d 100644
8240 +--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
8241 ++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
8242 +@@ -598,6 +598,8 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
8243 + USB_R5_ID_DIG_IRQ, 0);
8244 +
8245 + irq = platform_get_irq(pdev, 0);
8246 ++ if (irq < 0)
8247 ++ return irq;
8248 + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
8249 + dwc3_meson_g12a_irq_thread,
8250 + IRQF_ONESHOT, pdev->name, priv);
8251 +diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
8252 +index 49e6ca94486dd..cfbb96f6627e4 100644
8253 +--- a/drivers/usb/dwc3/dwc3-qcom.c
8254 ++++ b/drivers/usb/dwc3/dwc3-qcom.c
8255 +@@ -614,6 +614,10 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
8256 + qcom->acpi_pdata->dwc3_core_base_size;
8257 +
8258 + irq = platform_get_irq(pdev_irq, 0);
8259 ++ if (irq < 0) {
8260 ++ ret = irq;
8261 ++ goto out;
8262 ++ }
8263 + child_res[1].flags = IORESOURCE_IRQ;
8264 + child_res[1].start = child_res[1].end = irq;
8265 +
8266 +diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
8267 +index eede5cedacb4a..d9ad9adf7348f 100644
8268 +--- a/drivers/usb/gadget/udc/at91_udc.c
8269 ++++ b/drivers/usb/gadget/udc/at91_udc.c
8270 +@@ -1876,7 +1876,9 @@ static int at91udc_probe(struct platform_device *pdev)
8271 + clk_disable(udc->iclk);
8272 +
8273 + /* request UDC and maybe VBUS irqs */
8274 +- udc->udp_irq = platform_get_irq(pdev, 0);
8275 ++ udc->udp_irq = retval = platform_get_irq(pdev, 0);
8276 ++ if (retval < 0)
8277 ++ goto err_unprepare_iclk;
8278 + retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
8279 + driver_name, udc);
8280 + if (retval) {
8281 +diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
8282 +index 0bef6b3f049b9..fa1a3908ec3bb 100644
8283 +--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
8284 ++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
8285 +@@ -488,27 +488,14 @@ static int bdc_probe(struct platform_device *pdev)
8286 + int irq;
8287 + u32 temp;
8288 + struct device *dev = &pdev->dev;
8289 +- struct clk *clk;
8290 + int phy_num;
8291 +
8292 + dev_dbg(dev, "%s()\n", __func__);
8293 +
8294 +- clk = devm_clk_get_optional(dev, "sw_usbd");
8295 +- if (IS_ERR(clk))
8296 +- return PTR_ERR(clk);
8297 +-
8298 +- ret = clk_prepare_enable(clk);
8299 +- if (ret) {
8300 +- dev_err(dev, "could not enable clock\n");
8301 +- return ret;
8302 +- }
8303 +-
8304 + bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
8305 + if (!bdc)
8306 + return -ENOMEM;
8307 +
8308 +- bdc->clk = clk;
8309 +-
8310 + bdc->regs = devm_platform_ioremap_resource(pdev, 0);
8311 + if (IS_ERR(bdc->regs))
8312 + return PTR_ERR(bdc->regs);
8313 +@@ -545,10 +532,20 @@ static int bdc_probe(struct platform_device *pdev)
8314 + }
8315 + }
8316 +
8317 ++ bdc->clk = devm_clk_get_optional(dev, "sw_usbd");
8318 ++ if (IS_ERR(bdc->clk))
8319 ++ return PTR_ERR(bdc->clk);
8320 ++
8321 ++ ret = clk_prepare_enable(bdc->clk);
8322 ++ if (ret) {
8323 ++ dev_err(dev, "could not enable clock\n");
8324 ++ return ret;
8325 ++ }
8326 ++
8327 + ret = bdc_phy_init(bdc);
8328 + if (ret) {
8329 + dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
8330 +- return ret;
8331 ++ goto disable_clk;
8332 + }
8333 +
8334 + temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
8335 +@@ -560,7 +557,8 @@ static int bdc_probe(struct platform_device *pdev)
8336 + if (ret) {
8337 + dev_err(dev,
8338 + "No suitable DMA config available, abort\n");
8339 +- return -ENOTSUPP;
8340 ++ ret = -ENOTSUPP;
8341 ++ goto phycleanup;
8342 + }
8343 + dev_dbg(dev, "Using 32-bit address\n");
8344 + }
8345 +@@ -580,6 +578,8 @@ cleanup:
8346 + bdc_hw_exit(bdc);
8347 + phycleanup:
8348 + bdc_phy_exit(bdc);
8349 ++disable_clk:
8350 ++ clk_disable_unprepare(bdc->clk);
8351 + return ret;
8352 + }
8353 +
8354 +diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
8355 +index ce3d7a3eb7e33..a1057ddfbda33 100644
8356 +--- a/drivers/usb/gadget/udc/mv_u3d_core.c
8357 ++++ b/drivers/usb/gadget/udc/mv_u3d_core.c
8358 +@@ -1921,14 +1921,6 @@ static int mv_u3d_probe(struct platform_device *dev)
8359 + goto err_get_irq;
8360 + }
8361 + u3d->irq = r->start;
8362 +- if (request_irq(u3d->irq, mv_u3d_irq,
8363 +- IRQF_SHARED, driver_name, u3d)) {
8364 +- u3d->irq = 0;
8365 +- dev_err(&dev->dev, "Request irq %d for u3d failed\n",
8366 +- u3d->irq);
8367 +- retval = -ENODEV;
8368 +- goto err_request_irq;
8369 +- }
8370 +
8371 + /* initialize gadget structure */
8372 + u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
8373 +@@ -1941,6 +1933,15 @@ static int mv_u3d_probe(struct platform_device *dev)
8374 +
8375 + mv_u3d_eps_init(u3d);
8376 +
8377 ++ if (request_irq(u3d->irq, mv_u3d_irq,
8378 ++ IRQF_SHARED, driver_name, u3d)) {
8379 ++ u3d->irq = 0;
8380 ++ dev_err(&dev->dev, "Request irq %d for u3d failed\n",
8381 ++ u3d->irq);
8382 ++ retval = -ENODEV;
8383 ++ goto err_request_irq;
8384 ++ }
8385 ++
8386 + /* external vbus detection */
8387 + if (u3d->vbus) {
8388 + u3d->clock_gating = 1;
8389 +@@ -1964,8 +1965,8 @@ static int mv_u3d_probe(struct platform_device *dev)
8390 +
8391 + err_unregister:
8392 + free_irq(u3d->irq, u3d);
8393 +-err_request_irq:
8394 + err_get_irq:
8395 ++err_request_irq:
8396 + kfree(u3d->status_req);
8397 + err_alloc_status_req:
8398 + kfree(u3d->eps);
8399 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
8400 +index f1b35a39d1ba8..57d417a7c3e0a 100644
8401 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
8402 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
8403 +@@ -2707,10 +2707,15 @@ static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
8404 +
8405 + static const struct of_device_id usb3_of_match[] = {
8406 + {
8407 ++ .compatible = "renesas,r8a774c0-usb3-peri",
8408 ++ .data = &renesas_usb3_priv_r8a77990,
8409 ++ }, {
8410 + .compatible = "renesas,r8a7795-usb3-peri",
8411 + .data = &renesas_usb3_priv_gen3,
8412 +- },
8413 +- {
8414 ++ }, {
8415 ++ .compatible = "renesas,r8a77990-usb3-peri",
8416 ++ .data = &renesas_usb3_priv_r8a77990,
8417 ++ }, {
8418 + .compatible = "renesas,rcar-gen3-usb3-peri",
8419 + .data = &renesas_usb3_priv_gen3,
8420 + },
8421 +@@ -2719,18 +2724,10 @@ static const struct of_device_id usb3_of_match[] = {
8422 + MODULE_DEVICE_TABLE(of, usb3_of_match);
8423 +
8424 + static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
8425 +- {
8426 +- .soc_id = "r8a774c0",
8427 +- .data = &renesas_usb3_priv_r8a77990,
8428 +- },
8429 + {
8430 + .soc_id = "r8a7795", .revision = "ES1.*",
8431 + .data = &renesas_usb3_priv_r8a7795_es1,
8432 + },
8433 +- {
8434 +- .soc_id = "r8a77990",
8435 +- .data = &renesas_usb3_priv_r8a77990,
8436 +- },
8437 + { /* sentinel */ },
8438 + };
8439 +
8440 +diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
8441 +index 179777cb699fb..e3931da24277a 100644
8442 +--- a/drivers/usb/gadget/udc/s3c2410_udc.c
8443 ++++ b/drivers/usb/gadget/udc/s3c2410_udc.c
8444 +@@ -1784,6 +1784,10 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
8445 + s3c2410_udc_reinit(udc);
8446 +
8447 + irq_usbd = platform_get_irq(pdev, 0);
8448 ++ if (irq_usbd < 0) {
8449 ++ retval = irq_usbd;
8450 ++ goto err_udc_clk;
8451 ++ }
8452 +
8453 + /* irq setup after old hardware state is cleaned up */
8454 + retval = request_irq(irq_usbd, s3c2410_udc_irq,
8455 +diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
8456 +index a319b1df3011c..3626758b3e2aa 100644
8457 +--- a/drivers/usb/host/ehci-orion.c
8458 ++++ b/drivers/usb/host/ehci-orion.c
8459 +@@ -264,8 +264,11 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
8460 + * the clock does not exists.
8461 + */
8462 + priv->clk = devm_clk_get(&pdev->dev, NULL);
8463 +- if (!IS_ERR(priv->clk))
8464 +- clk_prepare_enable(priv->clk);
8465 ++ if (!IS_ERR(priv->clk)) {
8466 ++ err = clk_prepare_enable(priv->clk);
8467 ++ if (err)
8468 ++ goto err_put_hcd;
8469 ++ }
8470 +
8471 + priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
8472 + if (IS_ERR(priv->phy)) {
8473 +@@ -311,6 +314,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
8474 + err_dis_clk:
8475 + if (!IS_ERR(priv->clk))
8476 + clk_disable_unprepare(priv->clk);
8477 ++err_put_hcd:
8478 + usb_put_hcd(hcd);
8479 + err:
8480 + dev_err(&pdev->dev, "init %s fail, %d\n",
8481 +diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
8482 +index 7f857bad9e95b..08ec2ab0d95a5 100644
8483 +--- a/drivers/usb/host/ohci-tmio.c
8484 ++++ b/drivers/usb/host/ohci-tmio.c
8485 +@@ -202,6 +202,9 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
8486 + if (!cell)
8487 + return -EINVAL;
8488 +
8489 ++ if (irq < 0)
8490 ++ return irq;
8491 ++
8492 + hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
8493 + if (!hcd) {
8494 + ret = -ENOMEM;
8495 +diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
8496 +index 336653091e3b3..2b2019c19cdeb 100644
8497 +--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
8498 ++++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
8499 +@@ -293,6 +293,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
8500 +
8501 + /* Enable interrupt for out pins */
8502 + irq = platform_get_irq(pdev, 0);
8503 ++ if (irq < 0)
8504 ++ return irq;
8505 + err = devm_request_irq(&pdev->dev, irq,
8506 + brcmstb_usb_pinmap_ovr_isr,
8507 + IRQF_TRIGGER_RISING,
8508 +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
8509 +index f34c9437a182c..972704262b02b 100644
8510 +--- a/drivers/usb/phy/phy-fsl-usb.c
8511 ++++ b/drivers/usb/phy/phy-fsl-usb.c
8512 +@@ -873,6 +873,8 @@ int usb_otg_start(struct platform_device *pdev)
8513 +
8514 + /* request irq */
8515 + p_otg->irq = platform_get_irq(pdev, 0);
8516 ++ if (p_otg->irq < 0)
8517 ++ return p_otg->irq;
8518 + status = request_irq(p_otg->irq, fsl_otg_isr,
8519 + IRQF_SHARED, driver_name, p_otg);
8520 + if (status) {
8521 +diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
8522 +index baebb1f5a9737..a3e043e3e4aae 100644
8523 +--- a/drivers/usb/phy/phy-tahvo.c
8524 ++++ b/drivers/usb/phy/phy-tahvo.c
8525 +@@ -393,7 +393,9 @@ static int tahvo_usb_probe(struct platform_device *pdev)
8526 +
8527 + dev_set_drvdata(&pdev->dev, tu);
8528 +
8529 +- tu->irq = platform_get_irq(pdev, 0);
8530 ++ tu->irq = ret = platform_get_irq(pdev, 0);
8531 ++ if (ret < 0)
8532 ++ return ret;
8533 + ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
8534 + IRQF_ONESHOT,
8535 + "tahvo-vbus", tu);
8536 +diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
8537 +index 8ba6c5a915570..ab3c38a7d8ac0 100644
8538 +--- a/drivers/usb/phy/phy-twl6030-usb.c
8539 ++++ b/drivers/usb/phy/phy-twl6030-usb.c
8540 +@@ -348,6 +348,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
8541 + twl->irq2 = platform_get_irq(pdev, 1);
8542 + twl->linkstat = MUSB_UNKNOWN;
8543 +
8544 ++ if (twl->irq1 < 0)
8545 ++ return twl->irq1;
8546 ++ if (twl->irq2 < 0)
8547 ++ return twl->irq2;
8548 ++
8549 + twl->comparator.set_vbus = twl6030_set_vbus;
8550 + twl->comparator.start_srp = twl6030_start_srp;
8551 +
8552 +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
8553 +index e48fded3e414c..8d8959a70e440 100644
8554 +--- a/drivers/video/backlight/pwm_bl.c
8555 ++++ b/drivers/video/backlight/pwm_bl.c
8556 +@@ -409,6 +409,33 @@ static bool pwm_backlight_is_linear(struct platform_pwm_backlight_data *data)
8557 + static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
8558 + {
8559 + struct device_node *node = pb->dev->of_node;
8560 ++ bool active = true;
8561 ++
8562 ++ /*
8563 ++ * If the enable GPIO is present, observable (either as input
8564 ++ * or output) and off then the backlight is not currently active.
8565 ++ * */
8566 ++ if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
8567 ++ active = false;
8568 ++
8569 ++ if (!regulator_is_enabled(pb->power_supply))
8570 ++ active = false;
8571 ++
8572 ++ if (!pwm_is_enabled(pb->pwm))
8573 ++ active = false;
8574 ++
8575 ++ /*
8576 ++ * Synchronize the enable_gpio with the observed state of the
8577 ++ * hardware.
8578 ++ */
8579 ++ if (pb->enable_gpio)
8580 ++ gpiod_direction_output(pb->enable_gpio, active);
8581 ++
8582 ++ /*
8583 ++ * Do not change pb->enabled here! pb->enabled essentially
8584 ++ * tells us if we own one of the regulator's use counts and
8585 ++ * right now we do not.
8586 ++ */
8587 +
8588 + /* Not booted with device tree or no phandle link to the node */
8589 + if (!node || !node->phandle)
8590 +@@ -420,20 +447,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
8591 + * assume that another driver will enable the backlight at the
8592 + * appropriate time. Therefore, if it is disabled, keep it so.
8593 + */
8594 +-
8595 +- /* if the enable GPIO is disabled, do not enable the backlight */
8596 +- if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
8597 +- return FB_BLANK_POWERDOWN;
8598 +-
8599 +- /* The regulator is disabled, do not enable the backlight */
8600 +- if (!regulator_is_enabled(pb->power_supply))
8601 +- return FB_BLANK_POWERDOWN;
8602 +-
8603 +- /* The PWM is disabled, keep it like this */
8604 +- if (!pwm_is_enabled(pb->pwm))
8605 +- return FB_BLANK_POWERDOWN;
8606 +-
8607 +- return FB_BLANK_UNBLANK;
8608 ++ return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN;
8609 + }
8610 +
8611 + static int pwm_backlight_probe(struct platform_device *pdev)
8612 +@@ -486,18 +500,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
8613 + goto err_alloc;
8614 + }
8615 +
8616 +- /*
8617 +- * If the GPIO is not known to be already configured as output, that
8618 +- * is, if gpiod_get_direction returns either 1 or -EINVAL, change the
8619 +- * direction to output and set the GPIO as active.
8620 +- * Do not force the GPIO to active when it was already output as it
8621 +- * could cause backlight flickering or we would enable the backlight too
8622 +- * early. Leave the decision of the initial backlight state for later.
8623 +- */
8624 +- if (pb->enable_gpio &&
8625 +- gpiod_get_direction(pb->enable_gpio) != 0)
8626 +- gpiod_direction_output(pb->enable_gpio, 1);
8627 +-
8628 + pb->power_supply = devm_regulator_get(&pdev->dev, "power");
8629 + if (IS_ERR(pb->power_supply)) {
8630 + ret = PTR_ERR(pb->power_supply);
8631 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
8632 +index 1c855145711ba..63e2f17f3c619 100644
8633 +--- a/drivers/video/fbdev/core/fbmem.c
8634 ++++ b/drivers/video/fbdev/core/fbmem.c
8635 +@@ -962,6 +962,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
8636 + struct fb_var_screeninfo old_var;
8637 + struct fb_videomode mode;
8638 + struct fb_event event;
8639 ++ u32 unused;
8640 +
8641 + if (var->activate & FB_ACTIVATE_INV_MODE) {
8642 + struct fb_videomode mode1, mode2;
8643 +@@ -1008,6 +1009,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
8644 + if (var->xres < 8 || var->yres < 8)
8645 + return -EINVAL;
8646 +
8647 ++ /* Too huge resolution causes multiplication overflow. */
8648 ++ if (check_mul_overflow(var->xres, var->yres, &unused) ||
8649 ++ check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
8650 ++ return -EINVAL;
8651 ++
8652 + ret = info->fbops->fb_check_var(var, info);
8653 +
8654 + if (ret)
8655 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
8656 +index 9bd03a2310328..171ad8b42107e 100644
8657 +--- a/fs/cifs/cifs_unicode.c
8658 ++++ b/fs/cifs/cifs_unicode.c
8659 +@@ -358,14 +358,9 @@ cifs_strndup_from_utf16(const char *src, const int maxlen,
8660 + if (!dst)
8661 + return NULL;
8662 + cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
8663 +- NO_MAP_UNI_RSVD);
8664 ++ NO_MAP_UNI_RSVD);
8665 + } else {
8666 +- len = strnlen(src, maxlen);
8667 +- len++;
8668 +- dst = kmalloc(len, GFP_KERNEL);
8669 +- if (!dst)
8670 +- return NULL;
8671 +- strlcpy(dst, src, len);
8672 ++ dst = kstrndup(src, maxlen, GFP_KERNEL);
8673 + }
8674 +
8675 + return dst;
8676 +diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
8677 +index eed59bc1d9130..727c8835b2227 100644
8678 +--- a/fs/cifs/fs_context.c
8679 ++++ b/fs/cifs/fs_context.c
8680 +@@ -1266,10 +1266,17 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
8681 + ctx->posix_paths = 1;
8682 + break;
8683 + case Opt_unix:
8684 +- if (result.negated)
8685 ++ if (result.negated) {
8686 ++ if (ctx->linux_ext == 1)
8687 ++ pr_warn_once("conflicting posix mount options specified\n");
8688 + ctx->linux_ext = 0;
8689 +- else
8690 + ctx->no_linux_ext = 1;
8691 ++ } else {
8692 ++ if (ctx->no_linux_ext == 1)
8693 ++ pr_warn_once("conflicting posix mount options specified\n");
8694 ++ ctx->linux_ext = 1;
8695 ++ ctx->no_linux_ext = 0;
8696 ++ }
8697 + break;
8698 + case Opt_nocase:
8699 + ctx->nocase = 1;
8700 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
8701 +index bfee176b901d4..54d77c99e21c0 100644
8702 +--- a/fs/cifs/readdir.c
8703 ++++ b/fs/cifs/readdir.c
8704 +@@ -369,7 +369,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
8705 + */
8706 +
8707 + static int
8708 +-initiate_cifs_search(const unsigned int xid, struct file *file,
8709 ++_initiate_cifs_search(const unsigned int xid, struct file *file,
8710 + const char *full_path)
8711 + {
8712 + __u16 search_flags;
8713 +@@ -451,6 +451,27 @@ error_exit:
8714 + return rc;
8715 + }
8716 +
8717 ++static int
8718 ++initiate_cifs_search(const unsigned int xid, struct file *file,
8719 ++ const char *full_path)
8720 ++{
8721 ++ int rc, retry_count = 0;
8722 ++
8723 ++ do {
8724 ++ rc = _initiate_cifs_search(xid, file, full_path);
8725 ++ /*
8726 ++ * If we don't have enough credits to start reading the
8727 ++ * directory just try again after short wait.
8728 ++ */
8729 ++ if (rc != -EDEADLK)
8730 ++ break;
8731 ++
8732 ++ usleep_range(512, 2048);
8733 ++ } while (retry_count++ < 5);
8734 ++
8735 ++ return rc;
8736 ++}
8737 ++
8738 + /* return length of unicode string in bytes */
8739 + static int cifs_unicode_bytelen(const char *str)
8740 + {
8741 +diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
8742 +index df00231d3ecc9..7d162b0efbf03 100644
8743 +--- a/fs/debugfs/file.c
8744 ++++ b/fs/debugfs/file.c
8745 +@@ -179,8 +179,10 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
8746 + if (!fops_get(real_fops)) {
8747 + #ifdef CONFIG_MODULES
8748 + if (real_fops->owner &&
8749 +- real_fops->owner->state == MODULE_STATE_GOING)
8750 ++ real_fops->owner->state == MODULE_STATE_GOING) {
8751 ++ r = -ENXIO;
8752 + goto out;
8753 ++ }
8754 + #endif
8755 +
8756 + /* Huh? Module did not clean up after itself at exit? */
8757 +@@ -314,8 +316,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
8758 + if (!fops_get(real_fops)) {
8759 + #ifdef CONFIG_MODULES
8760 + if (real_fops->owner &&
8761 +- real_fops->owner->state == MODULE_STATE_GOING)
8762 ++ real_fops->owner->state == MODULE_STATE_GOING) {
8763 ++ r = -ENXIO;
8764 + goto out;
8765 ++ }
8766 + #endif
8767 +
8768 + /* Huh? Module did not cleanup after itself at exit? */
8769 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
8770 +index 6afd4562335fc..97d48c5bdebcb 100644
8771 +--- a/fs/f2fs/file.c
8772 ++++ b/fs/f2fs/file.c
8773 +@@ -261,8 +261,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
8774 + };
8775 + unsigned int seq_id = 0;
8776 +
8777 +- if (unlikely(f2fs_readonly(inode->i_sb) ||
8778 +- is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
8779 ++ if (unlikely(f2fs_readonly(inode->i_sb)))
8780 + return 0;
8781 +
8782 + trace_f2fs_sync_file_enter(inode);
8783 +@@ -276,7 +275,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
8784 + ret = file_write_and_wait_range(file, start, end);
8785 + clear_inode_flag(inode, FI_NEED_IPU);
8786 +
8787 +- if (ret) {
8788 ++ if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
8789 + trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
8790 + return ret;
8791 + }
8792 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
8793 +index 8fecd3050ccd4..ce703e6fdafc0 100644
8794 +--- a/fs/f2fs/super.c
8795 ++++ b/fs/f2fs/super.c
8796 +@@ -2039,8 +2039,17 @@ restore_flag:
8797 +
8798 + static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
8799 + {
8800 ++ int retry = DEFAULT_RETRY_IO_COUNT;
8801 ++
8802 + /* we should flush all the data to keep data consistency */
8803 +- sync_inodes_sb(sbi->sb);
8804 ++ do {
8805 ++ sync_inodes_sb(sbi->sb);
8806 ++ cond_resched();
8807 ++ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
8808 ++ } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
8809 ++
8810 ++ if (unlikely(retry < 0))
8811 ++ f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
8812 +
8813 + down_write(&sbi->gc_lock);
8814 + f2fs_dirty_to_prefree(sbi);
8815 +diff --git a/fs/fcntl.c b/fs/fcntl.c
8816 +index f946bec8f1f1b..68added37c15f 100644
8817 +--- a/fs/fcntl.c
8818 ++++ b/fs/fcntl.c
8819 +@@ -150,7 +150,8 @@ void f_delown(struct file *filp)
8820 + pid_t f_getown(struct file *filp)
8821 + {
8822 + pid_t pid = 0;
8823 +- read_lock(&filp->f_owner.lock);
8824 ++
8825 ++ read_lock_irq(&filp->f_owner.lock);
8826 + rcu_read_lock();
8827 + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
8828 + pid = pid_vnr(filp->f_owner.pid);
8829 +@@ -158,7 +159,7 @@ pid_t f_getown(struct file *filp)
8830 + pid = -pid;
8831 + }
8832 + rcu_read_unlock();
8833 +- read_unlock(&filp->f_owner.lock);
8834 ++ read_unlock_irq(&filp->f_owner.lock);
8835 + return pid;
8836 + }
8837 +
8838 +@@ -208,7 +209,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
8839 + struct f_owner_ex owner = {};
8840 + int ret = 0;
8841 +
8842 +- read_lock(&filp->f_owner.lock);
8843 ++ read_lock_irq(&filp->f_owner.lock);
8844 + rcu_read_lock();
8845 + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
8846 + owner.pid = pid_vnr(filp->f_owner.pid);
8847 +@@ -231,7 +232,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
8848 + ret = -EINVAL;
8849 + break;
8850 + }
8851 +- read_unlock(&filp->f_owner.lock);
8852 ++ read_unlock_irq(&filp->f_owner.lock);
8853 +
8854 + if (!ret) {
8855 + ret = copy_to_user(owner_p, &owner, sizeof(owner));
8856 +@@ -249,10 +250,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
8857 + uid_t src[2];
8858 + int err;
8859 +
8860 +- read_lock(&filp->f_owner.lock);
8861 ++ read_lock_irq(&filp->f_owner.lock);
8862 + src[0] = from_kuid(user_ns, filp->f_owner.uid);
8863 + src[1] = from_kuid(user_ns, filp->f_owner.euid);
8864 +- read_unlock(&filp->f_owner.lock);
8865 ++ read_unlock_irq(&filp->f_owner.lock);
8866 +
8867 + err = put_user(src[0], &dst[0]);
8868 + err |= put_user(src[1], &dst[1]);
8869 +@@ -1003,13 +1004,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
8870 + {
8871 + while (fa) {
8872 + struct fown_struct *fown;
8873 ++ unsigned long flags;
8874 +
8875 + if (fa->magic != FASYNC_MAGIC) {
8876 + printk(KERN_ERR "kill_fasync: bad magic number in "
8877 + "fasync_struct!\n");
8878 + return;
8879 + }
8880 +- read_lock(&fa->fa_lock);
8881 ++ read_lock_irqsave(&fa->fa_lock, flags);
8882 + if (fa->fa_file) {
8883 + fown = &fa->fa_file->f_owner;
8884 + /* Don't send SIGURG to processes which have not set a
8885 +@@ -1018,7 +1020,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
8886 + if (!(sig == SIGURG && fown->signum == 0))
8887 + send_sigio(fown, fa->fa_fd, band);
8888 + }
8889 +- read_unlock(&fa->fa_lock);
8890 ++ read_unlock_irqrestore(&fa->fa_lock, flags);
8891 + fa = rcu_dereference(fa->fa_next);
8892 + }
8893 + }
8894 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
8895 +index 97f860cfc195f..2bca7edfc9f69 100644
8896 +--- a/fs/fuse/file.c
8897 ++++ b/fs/fuse/file.c
8898 +@@ -198,12 +198,11 @@ void fuse_finish_open(struct inode *inode, struct file *file)
8899 + struct fuse_file *ff = file->private_data;
8900 + struct fuse_conn *fc = get_fuse_conn(inode);
8901 +
8902 +- if (!(ff->open_flags & FOPEN_KEEP_CACHE))
8903 +- invalidate_inode_pages2(inode->i_mapping);
8904 + if (ff->open_flags & FOPEN_STREAM)
8905 + stream_open(inode, file);
8906 + else if (ff->open_flags & FOPEN_NONSEEKABLE)
8907 + nonseekable_open(inode, file);
8908 ++
8909 + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
8910 + struct fuse_inode *fi = get_fuse_inode(inode);
8911 +
8912 +@@ -211,10 +210,14 @@ void fuse_finish_open(struct inode *inode, struct file *file)
8913 + fi->attr_version = atomic64_inc_return(&fc->attr_version);
8914 + i_size_write(inode, 0);
8915 + spin_unlock(&fi->lock);
8916 ++ truncate_pagecache(inode, 0);
8917 + fuse_invalidate_attr(inode);
8918 + if (fc->writeback_cache)
8919 + file_update_time(file);
8920 ++ } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
8921 ++ invalidate_inode_pages2(inode->i_mapping);
8922 + }
8923 ++
8924 + if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
8925 + fuse_link_write_file(file);
8926 + }
8927 +@@ -389,6 +392,7 @@ struct fuse_writepage_args {
8928 + struct list_head queue_entry;
8929 + struct fuse_writepage_args *next;
8930 + struct inode *inode;
8931 ++ struct fuse_sync_bucket *bucket;
8932 + };
8933 +
8934 + static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
8935 +@@ -1608,6 +1612,9 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
8936 + struct fuse_args_pages *ap = &wpa->ia.ap;
8937 + int i;
8938 +
8939 ++ if (wpa->bucket)
8940 ++ fuse_sync_bucket_dec(wpa->bucket);
8941 ++
8942 + for (i = 0; i < ap->num_pages; i++)
8943 + __free_page(ap->pages[i]);
8944 +
8945 +@@ -1871,6 +1878,20 @@ static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
8946 +
8947 + }
8948 +
8949 ++static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
8950 ++ struct fuse_writepage_args *wpa)
8951 ++{
8952 ++ if (!fc->sync_fs)
8953 ++ return;
8954 ++
8955 ++ rcu_read_lock();
8956 ++ /* Prevent resurrection of dead bucket in unlikely race with syncfs */
8957 ++ do {
8958 ++ wpa->bucket = rcu_dereference(fc->curr_bucket);
8959 ++ } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
8960 ++ rcu_read_unlock();
8961 ++}
8962 ++
8963 + static int fuse_writepage_locked(struct page *page)
8964 + {
8965 + struct address_space *mapping = page->mapping;
8966 +@@ -1898,6 +1919,7 @@ static int fuse_writepage_locked(struct page *page)
8967 + if (!wpa->ia.ff)
8968 + goto err_nofile;
8969 +
8970 ++ fuse_writepage_add_to_bucket(fc, wpa);
8971 + fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
8972 +
8973 + copy_highpage(tmp_page, page);
8974 +@@ -2148,6 +2170,8 @@ static int fuse_writepages_fill(struct page *page,
8975 + __free_page(tmp_page);
8976 + goto out_unlock;
8977 + }
8978 ++ fuse_writepage_add_to_bucket(fc, wpa);
8979 ++
8980 + data->max_pages = 1;
8981 +
8982 + ap = &wpa->ia.ap;
8983 +@@ -2881,7 +2905,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
8984 +
8985 + static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
8986 + {
8987 +- int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
8988 ++ int err = filemap_write_and_wait_range(inode->i_mapping, start, -1);
8989 +
8990 + if (!err)
8991 + fuse_sync_writes(inode);
8992 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
8993 +index 07829ce78695b..a1cd598860776 100644
8994 +--- a/fs/fuse/fuse_i.h
8995 ++++ b/fs/fuse/fuse_i.h
8996 +@@ -515,6 +515,13 @@ struct fuse_fs_context {
8997 + void **fudptr;
8998 + };
8999 +
9000 ++struct fuse_sync_bucket {
9001 ++ /* count is a possible scalability bottleneck */
9002 ++ atomic_t count;
9003 ++ wait_queue_head_t waitq;
9004 ++ struct rcu_head rcu;
9005 ++};
9006 ++
9007 + /**
9008 + * A Fuse connection.
9009 + *
9010 +@@ -807,6 +814,9 @@ struct fuse_conn {
9011 +
9012 + /** List of filesystems using this connection */
9013 + struct list_head mounts;
9014 ++
9015 ++ /* New writepages go into this bucket */
9016 ++ struct fuse_sync_bucket __rcu *curr_bucket;
9017 + };
9018 +
9019 + /*
9020 +@@ -910,6 +920,15 @@ static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
9021 + descs[i].length = PAGE_SIZE - descs[i].offset;
9022 + }
9023 +
9024 ++static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket)
9025 ++{
9026 ++ /* Need RCU protection to prevent use after free after the decrement */
9027 ++ rcu_read_lock();
9028 ++ if (atomic_dec_and_test(&bucket->count))
9029 ++ wake_up(&bucket->waitq);
9030 ++ rcu_read_unlock();
9031 ++}
9032 ++
9033 + /** Device operations */
9034 + extern const struct file_operations fuse_dev_operations;
9035 +
9036 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
9037 +index b9beb39a4a181..be7378c4f47ca 100644
9038 +--- a/fs/fuse/inode.c
9039 ++++ b/fs/fuse/inode.c
9040 +@@ -506,6 +506,57 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
9041 + return err;
9042 + }
9043 +
9044 ++static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void)
9045 ++{
9046 ++ struct fuse_sync_bucket *bucket;
9047 ++
9048 ++ bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL);
9049 ++ if (bucket) {
9050 ++ init_waitqueue_head(&bucket->waitq);
9051 ++ /* Initial active count */
9052 ++ atomic_set(&bucket->count, 1);
9053 ++ }
9054 ++ return bucket;
9055 ++}
9056 ++
9057 ++static void fuse_sync_fs_writes(struct fuse_conn *fc)
9058 ++{
9059 ++ struct fuse_sync_bucket *bucket, *new_bucket;
9060 ++ int count;
9061 ++
9062 ++ new_bucket = fuse_sync_bucket_alloc();
9063 ++ spin_lock(&fc->lock);
9064 ++ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
9065 ++ count = atomic_read(&bucket->count);
9066 ++ WARN_ON(count < 1);
9067 ++ /* No outstanding writes? */
9068 ++ if (count == 1) {
9069 ++ spin_unlock(&fc->lock);
9070 ++ kfree(new_bucket);
9071 ++ return;
9072 ++ }
9073 ++
9074 ++ /*
9075 ++ * Completion of new bucket depends on completion of this bucket, so add
9076 ++ * one more count.
9077 ++ */
9078 ++ atomic_inc(&new_bucket->count);
9079 ++ rcu_assign_pointer(fc->curr_bucket, new_bucket);
9080 ++ spin_unlock(&fc->lock);
9081 ++ /*
9082 ++ * Drop initial active count. At this point if all writes in this and
9083 ++ * ancestor buckets complete, the count will go to zero and this task
9084 ++ * will be woken up.
9085 ++ */
9086 ++ atomic_dec(&bucket->count);
9087 ++
9088 ++ wait_event(bucket->waitq, atomic_read(&bucket->count) == 0);
9089 ++
9090 ++ /* Drop temp count on descendant bucket */
9091 ++ fuse_sync_bucket_dec(new_bucket);
9092 ++ kfree_rcu(bucket, rcu);
9093 ++}
9094 ++
9095 + static int fuse_sync_fs(struct super_block *sb, int wait)
9096 + {
9097 + struct fuse_mount *fm = get_fuse_mount_super(sb);
9098 +@@ -528,6 +579,8 @@ static int fuse_sync_fs(struct super_block *sb, int wait)
9099 + if (!fc->sync_fs)
9100 + return 0;
9101 +
9102 ++ fuse_sync_fs_writes(fc);
9103 ++
9104 + memset(&inarg, 0, sizeof(inarg));
9105 + args.in_numargs = 1;
9106 + args.in_args[0].size = sizeof(inarg);
9107 +@@ -763,6 +816,7 @@ void fuse_conn_put(struct fuse_conn *fc)
9108 + {
9109 + if (refcount_dec_and_test(&fc->count)) {
9110 + struct fuse_iqueue *fiq = &fc->iq;
9111 ++ struct fuse_sync_bucket *bucket;
9112 +
9113 + if (IS_ENABLED(CONFIG_FUSE_DAX))
9114 + fuse_dax_conn_free(fc);
9115 +@@ -770,6 +824,11 @@ void fuse_conn_put(struct fuse_conn *fc)
9116 + fiq->ops->release(fiq);
9117 + put_pid_ns(fc->pid_ns);
9118 + put_user_ns(fc->user_ns);
9119 ++ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
9120 ++ if (bucket) {
9121 ++ WARN_ON(atomic_read(&bucket->count) != 1);
9122 ++ kfree(bucket);
9123 ++ }
9124 + fc->release(fc);
9125 + }
9126 + }
9127 +@@ -1418,6 +1477,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
9128 + if (sb->s_flags & SB_MANDLOCK)
9129 + goto err;
9130 +
9131 ++ rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc());
9132 + fuse_sb_defaults(sb);
9133 +
9134 + if (ctx->is_bdev) {
9135 +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
9136 +index 5f4504dd0875a..ca76e3b8792ce 100644
9137 +--- a/fs/gfs2/ops_fstype.c
9138 ++++ b/fs/gfs2/ops_fstype.c
9139 +@@ -677,6 +677,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
9140 + error = PTR_ERR(lsi->si_sc_inode);
9141 + fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
9142 + jd->jd_jid, error);
9143 ++ kfree(lsi);
9144 + goto free_local;
9145 + }
9146 + lsi->si_jid = jd->jd_jid;
9147 +@@ -1088,6 +1089,34 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
9148 + kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
9149 + }
9150 +
9151 ++static int init_threads(struct gfs2_sbd *sdp)
9152 ++{
9153 ++ struct task_struct *p;
9154 ++ int error = 0;
9155 ++
9156 ++ p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
9157 ++ if (IS_ERR(p)) {
9158 ++ error = PTR_ERR(p);
9159 ++ fs_err(sdp, "can't start logd thread: %d\n", error);
9160 ++ return error;
9161 ++ }
9162 ++ sdp->sd_logd_process = p;
9163 ++
9164 ++ p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
9165 ++ if (IS_ERR(p)) {
9166 ++ error = PTR_ERR(p);
9167 ++ fs_err(sdp, "can't start quotad thread: %d\n", error);
9168 ++ goto fail;
9169 ++ }
9170 ++ sdp->sd_quotad_process = p;
9171 ++ return 0;
9172 ++
9173 ++fail:
9174 ++ kthread_stop(sdp->sd_logd_process);
9175 ++ sdp->sd_logd_process = NULL;
9176 ++ return error;
9177 ++}
9178 ++
9179 + /**
9180 + * gfs2_fill_super - Read in superblock
9181 + * @sb: The VFS superblock
9182 +@@ -1216,6 +1245,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
9183 + goto fail_per_node;
9184 + }
9185 +
9186 ++ if (!sb_rdonly(sb)) {
9187 ++ error = init_threads(sdp);
9188 ++ if (error) {
9189 ++ gfs2_withdraw_delayed(sdp);
9190 ++ goto fail_per_node;
9191 ++ }
9192 ++ }
9193 ++
9194 + error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
9195 + if (error)
9196 + goto fail_per_node;
9197 +@@ -1225,6 +1262,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
9198 +
9199 + gfs2_freeze_unlock(&freeze_gh);
9200 + if (error) {
9201 ++ if (sdp->sd_quotad_process)
9202 ++ kthread_stop(sdp->sd_quotad_process);
9203 ++ sdp->sd_quotad_process = NULL;
9204 ++ if (sdp->sd_logd_process)
9205 ++ kthread_stop(sdp->sd_logd_process);
9206 ++ sdp->sd_logd_process = NULL;
9207 + fs_err(sdp, "can't make FS RW: %d\n", error);
9208 + goto fail_per_node;
9209 + }
9210 +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
9211 +index 4d4ceb0b69031..2bdbba5ea8d79 100644
9212 +--- a/fs/gfs2/super.c
9213 ++++ b/fs/gfs2/super.c
9214 +@@ -119,34 +119,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
9215 + return 0;
9216 + }
9217 +
9218 +-static int init_threads(struct gfs2_sbd *sdp)
9219 +-{
9220 +- struct task_struct *p;
9221 +- int error = 0;
9222 +-
9223 +- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
9224 +- if (IS_ERR(p)) {
9225 +- error = PTR_ERR(p);
9226 +- fs_err(sdp, "can't start logd thread: %d\n", error);
9227 +- return error;
9228 +- }
9229 +- sdp->sd_logd_process = p;
9230 +-
9231 +- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
9232 +- if (IS_ERR(p)) {
9233 +- error = PTR_ERR(p);
9234 +- fs_err(sdp, "can't start quotad thread: %d\n", error);
9235 +- goto fail;
9236 +- }
9237 +- sdp->sd_quotad_process = p;
9238 +- return 0;
9239 +-
9240 +-fail:
9241 +- kthread_stop(sdp->sd_logd_process);
9242 +- sdp->sd_logd_process = NULL;
9243 +- return error;
9244 +-}
9245 +-
9246 + /**
9247 + * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
9248 + * @sdp: the filesystem
9249 +@@ -161,26 +133,17 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
9250 + struct gfs2_log_header_host head;
9251 + int error;
9252 +
9253 +- error = init_threads(sdp);
9254 +- if (error) {
9255 +- gfs2_withdraw_delayed(sdp);
9256 +- return error;
9257 +- }
9258 +-
9259 + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
9260 +- if (gfs2_withdrawn(sdp)) {
9261 +- error = -EIO;
9262 +- goto fail;
9263 +- }
9264 ++ if (gfs2_withdrawn(sdp))
9265 ++ return -EIO;
9266 +
9267 + error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
9268 + if (error || gfs2_withdrawn(sdp))
9269 +- goto fail;
9270 ++ return error;
9271 +
9272 + if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
9273 + gfs2_consist(sdp);
9274 +- error = -EIO;
9275 +- goto fail;
9276 ++ return -EIO;
9277 + }
9278 +
9279 + /* Initialize some head of the log stuff */
9280 +@@ -188,20 +151,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
9281 + gfs2_log_pointers_init(sdp, head.lh_blkno);
9282 +
9283 + error = gfs2_quota_init(sdp);
9284 +- if (error || gfs2_withdrawn(sdp))
9285 +- goto fail;
9286 +-
9287 +- set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
9288 +-
9289 +- return 0;
9290 +-
9291 +-fail:
9292 +- if (sdp->sd_quotad_process)
9293 +- kthread_stop(sdp->sd_quotad_process);
9294 +- sdp->sd_quotad_process = NULL;
9295 +- if (sdp->sd_logd_process)
9296 +- kthread_stop(sdp->sd_logd_process);
9297 +- sdp->sd_logd_process = NULL;
9298 ++ if (!error && !gfs2_withdrawn(sdp))
9299 ++ set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
9300 + return error;
9301 + }
9302 +
9303 +diff --git a/fs/io-wq.c b/fs/io-wq.c
9304 +index 7d2ed8c7dd312..2cc7f75ff24d7 100644
9305 +--- a/fs/io-wq.c
9306 ++++ b/fs/io-wq.c
9307 +@@ -51,6 +51,10 @@ struct io_worker {
9308 +
9309 + struct completion ref_done;
9310 +
9311 ++ unsigned long create_state;
9312 ++ struct callback_head create_work;
9313 ++ int create_index;
9314 ++
9315 + struct rcu_head rcu;
9316 + };
9317 +
9318 +@@ -272,24 +276,18 @@ static void io_wqe_inc_running(struct io_worker *worker)
9319 + atomic_inc(&acct->nr_running);
9320 + }
9321 +
9322 +-struct create_worker_data {
9323 +- struct callback_head work;
9324 +- struct io_wqe *wqe;
9325 +- int index;
9326 +-};
9327 +-
9328 + static void create_worker_cb(struct callback_head *cb)
9329 + {
9330 +- struct create_worker_data *cwd;
9331 ++ struct io_worker *worker;
9332 + struct io_wq *wq;
9333 + struct io_wqe *wqe;
9334 + struct io_wqe_acct *acct;
9335 + bool do_create = false, first = false;
9336 +
9337 +- cwd = container_of(cb, struct create_worker_data, work);
9338 +- wqe = cwd->wqe;
9339 ++ worker = container_of(cb, struct io_worker, create_work);
9340 ++ wqe = worker->wqe;
9341 + wq = wqe->wq;
9342 +- acct = &wqe->acct[cwd->index];
9343 ++ acct = &wqe->acct[worker->create_index];
9344 + raw_spin_lock_irq(&wqe->lock);
9345 + if (acct->nr_workers < acct->max_workers) {
9346 + if (!acct->nr_workers)
9347 +@@ -299,33 +297,42 @@ static void create_worker_cb(struct callback_head *cb)
9348 + }
9349 + raw_spin_unlock_irq(&wqe->lock);
9350 + if (do_create) {
9351 +- create_io_worker(wq, wqe, cwd->index, first);
9352 ++ create_io_worker(wq, wqe, worker->create_index, first);
9353 + } else {
9354 + atomic_dec(&acct->nr_running);
9355 + io_worker_ref_put(wq);
9356 + }
9357 +- kfree(cwd);
9358 ++ clear_bit_unlock(0, &worker->create_state);
9359 ++ io_worker_release(worker);
9360 + }
9361 +
9362 +-static void io_queue_worker_create(struct io_wqe *wqe, struct io_wqe_acct *acct)
9363 ++static void io_queue_worker_create(struct io_wqe *wqe, struct io_worker *worker,
9364 ++ struct io_wqe_acct *acct)
9365 + {
9366 +- struct create_worker_data *cwd;
9367 + struct io_wq *wq = wqe->wq;
9368 +
9369 + /* raced with exit, just ignore create call */
9370 + if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
9371 + goto fail;
9372 ++ if (!io_worker_get(worker))
9373 ++ goto fail;
9374 ++ /*
9375 ++ * create_state manages ownership of create_work/index. We should
9376 ++ * only need one entry per worker, as the worker going to sleep
9377 ++ * will trigger the condition, and waking will clear it once it
9378 ++ * runs the task_work.
9379 ++ */
9380 ++ if (test_bit(0, &worker->create_state) ||
9381 ++ test_and_set_bit_lock(0, &worker->create_state))
9382 ++ goto fail_release;
9383 +
9384 +- cwd = kmalloc(sizeof(*cwd), GFP_ATOMIC);
9385 +- if (cwd) {
9386 +- init_task_work(&cwd->work, create_worker_cb);
9387 +- cwd->wqe = wqe;
9388 +- cwd->index = acct->index;
9389 +- if (!task_work_add(wq->task, &cwd->work, TWA_SIGNAL))
9390 +- return;
9391 +-
9392 +- kfree(cwd);
9393 +- }
9394 ++ init_task_work(&worker->create_work, create_worker_cb);
9395 ++ worker->create_index = acct->index;
9396 ++ if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL))
9397 ++ return;
9398 ++ clear_bit_unlock(0, &worker->create_state);
9399 ++fail_release:
9400 ++ io_worker_release(worker);
9401 + fail:
9402 + atomic_dec(&acct->nr_running);
9403 + io_worker_ref_put(wq);
9404 +@@ -343,7 +350,7 @@ static void io_wqe_dec_running(struct io_worker *worker)
9405 + if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) {
9406 + atomic_inc(&acct->nr_running);
9407 + atomic_inc(&wqe->wq->worker_refs);
9408 +- io_queue_worker_create(wqe, acct);
9409 ++ io_queue_worker_create(wqe, worker, acct);
9410 + }
9411 + }
9412 +
9413 +@@ -416,7 +423,28 @@ static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
9414 + spin_unlock(&wq->hash->wait.lock);
9415 + }
9416 +
9417 +-static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
9418 ++/*
9419 ++ * We can always run the work if the worker is currently the same type as
9420 ++ * the work (eg both are bound, or both are unbound). If they are not the
9421 ++ * same, only allow it if incrementing the worker count would be allowed.
9422 ++ */
9423 ++static bool io_worker_can_run_work(struct io_worker *worker,
9424 ++ struct io_wq_work *work)
9425 ++{
9426 ++ struct io_wqe_acct *acct;
9427 ++
9428 ++ if (!(worker->flags & IO_WORKER_F_BOUND) !=
9429 ++ !(work->flags & IO_WQ_WORK_UNBOUND))
9430 ++ return true;
9431 ++
9432 ++ /* not the same type, check if we'd go over the limit */
9433 ++ acct = io_work_get_acct(worker->wqe, work);
9434 ++ return acct->nr_workers < acct->max_workers;
9435 ++}
9436 ++
9437 ++static struct io_wq_work *io_get_next_work(struct io_wqe *wqe,
9438 ++ struct io_worker *worker,
9439 ++ bool *stalled)
9440 + __must_hold(wqe->lock)
9441 + {
9442 + struct io_wq_work_node *node, *prev;
9443 +@@ -428,6 +456,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
9444 +
9445 + work = container_of(node, struct io_wq_work, list);
9446 +
9447 ++ if (!io_worker_can_run_work(worker, work))
9448 ++ break;
9449 ++
9450 + /* not hashed, can run anytime */
9451 + if (!io_wq_is_hashed(work)) {
9452 + wq_list_del(&wqe->work_list, node, prev);
9453 +@@ -454,6 +485,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
9454 + raw_spin_unlock(&wqe->lock);
9455 + io_wait_on_hash(wqe, stall_hash);
9456 + raw_spin_lock(&wqe->lock);
9457 ++ *stalled = true;
9458 + }
9459 +
9460 + return NULL;
9461 +@@ -493,6 +525,7 @@ static void io_worker_handle_work(struct io_worker *worker)
9462 +
9463 + do {
9464 + struct io_wq_work *work;
9465 ++ bool stalled;
9466 + get_next:
9467 + /*
9468 + * If we got some work, mark us as busy. If we didn't, but
9469 +@@ -501,10 +534,11 @@ get_next:
9470 + * can't make progress, any work completion or insertion will
9471 + * clear the stalled flag.
9472 + */
9473 +- work = io_get_next_work(wqe);
9474 ++ stalled = false;
9475 ++ work = io_get_next_work(wqe, worker, &stalled);
9476 + if (work)
9477 + __io_worker_busy(wqe, worker, work);
9478 +- else if (!wq_list_empty(&wqe->work_list))
9479 ++ else if (stalled)
9480 + wqe->flags |= IO_WQE_FLAG_STALLED;
9481 +
9482 + raw_spin_unlock_irq(&wqe->lock);
9483 +@@ -1004,12 +1038,12 @@ err_wq:
9484 +
9485 + static bool io_task_work_match(struct callback_head *cb, void *data)
9486 + {
9487 +- struct create_worker_data *cwd;
9488 ++ struct io_worker *worker;
9489 +
9490 + if (cb->func != create_worker_cb)
9491 + return false;
9492 +- cwd = container_of(cb, struct create_worker_data, work);
9493 +- return cwd->wqe->wq == data;
9494 ++ worker = container_of(cb, struct io_worker, create_work);
9495 ++ return worker->wqe->wq == data;
9496 + }
9497 +
9498 + void io_wq_exit_start(struct io_wq *wq)
9499 +@@ -1026,12 +1060,13 @@ static void io_wq_exit_workers(struct io_wq *wq)
9500 + return;
9501 +
9502 + while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
9503 +- struct create_worker_data *cwd;
9504 ++ struct io_worker *worker;
9505 +
9506 +- cwd = container_of(cb, struct create_worker_data, work);
9507 +- atomic_dec(&cwd->wqe->acct[cwd->index].nr_running);
9508 ++ worker = container_of(cb, struct io_worker, create_work);
9509 ++ atomic_dec(&worker->wqe->acct[worker->create_index].nr_running);
9510 + io_worker_ref_put(wq);
9511 +- kfree(cwd);
9512 ++ clear_bit_unlock(0, &worker->create_state);
9513 ++ io_worker_release(worker);
9514 + }
9515 +
9516 + rcu_read_lock();
9517 +diff --git a/fs/io_uring.c b/fs/io_uring.c
9518 +index a2e20a6fbfed8..14bebc62db2d4 100644
9519 +--- a/fs/io_uring.c
9520 ++++ b/fs/io_uring.c
9521 +@@ -1001,6 +1001,7 @@ static const struct io_op_def io_op_defs[] = {
9522 + },
9523 + [IORING_OP_WRITE] = {
9524 + .needs_file = 1,
9525 ++ .hash_reg_file = 1,
9526 + .unbound_nonreg_file = 1,
9527 + .pollout = 1,
9528 + .plug = 1,
9529 +@@ -1328,6 +1329,8 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
9530 + struct io_timeout_data *io = req->async_data;
9531 +
9532 + if (hrtimer_try_to_cancel(&io->timer) != -1) {
9533 ++ if (status)
9534 ++ req_set_fail(req);
9535 + atomic_set(&req->ctx->cq_timeouts,
9536 + atomic_read(&req->ctx->cq_timeouts) + 1);
9537 + list_del_init(&req->timeout.list);
9538 +@@ -7722,6 +7725,8 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
9539 + return -EINVAL;
9540 + if (nr_args > IORING_MAX_FIXED_FILES)
9541 + return -EMFILE;
9542 ++ if (nr_args > rlimit(RLIMIT_NOFILE))
9543 ++ return -EMFILE;
9544 + ret = io_rsrc_node_switch_start(ctx);
9545 + if (ret)
9546 + return ret;
9547 +diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
9548 +index 6250ca6a1f851..4ecf4e1f68ef9 100644
9549 +--- a/fs/iomap/swapfile.c
9550 ++++ b/fs/iomap/swapfile.c
9551 +@@ -31,11 +31,16 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
9552 + {
9553 + struct iomap *iomap = &isi->iomap;
9554 + unsigned long nr_pages;
9555 ++ unsigned long max_pages;
9556 + uint64_t first_ppage;
9557 + uint64_t first_ppage_reported;
9558 + uint64_t next_ppage;
9559 + int error;
9560 +
9561 ++ if (unlikely(isi->nr_pages >= isi->sis->max))
9562 ++ return 0;
9563 ++ max_pages = isi->sis->max - isi->nr_pages;
9564 ++
9565 + /*
9566 + * Round the start up and the end down so that the physical
9567 + * extent aligns to a page boundary.
9568 +@@ -48,6 +53,7 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
9569 + if (first_ppage >= next_ppage)
9570 + return 0;
9571 + nr_pages = next_ppage - first_ppage;
9572 ++ nr_pages = min(nr_pages, max_pages);
9573 +
9574 + /*
9575 + * Calculate how much swap space we're adding; the first page contains
9576 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
9577 +index 21edc423b79fa..678e2c51b855c 100644
9578 +--- a/fs/isofs/inode.c
9579 ++++ b/fs/isofs/inode.c
9580 +@@ -155,7 +155,6 @@ struct iso9660_options{
9581 + unsigned int overriderockperm:1;
9582 + unsigned int uid_set:1;
9583 + unsigned int gid_set:1;
9584 +- unsigned int utf8:1;
9585 + unsigned char map;
9586 + unsigned char check;
9587 + unsigned int blocksize;
9588 +@@ -356,7 +355,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
9589 + popt->gid = GLOBAL_ROOT_GID;
9590 + popt->uid = GLOBAL_ROOT_UID;
9591 + popt->iocharset = NULL;
9592 +- popt->utf8 = 0;
9593 + popt->overriderockperm = 0;
9594 + popt->session=-1;
9595 + popt->sbsector=-1;
9596 +@@ -389,10 +387,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
9597 + case Opt_cruft:
9598 + popt->cruft = 1;
9599 + break;
9600 ++#ifdef CONFIG_JOLIET
9601 + case Opt_utf8:
9602 +- popt->utf8 = 1;
9603 ++ kfree(popt->iocharset);
9604 ++ popt->iocharset = kstrdup("utf8", GFP_KERNEL);
9605 ++ if (!popt->iocharset)
9606 ++ return 0;
9607 + break;
9608 +-#ifdef CONFIG_JOLIET
9609 + case Opt_iocharset:
9610 + kfree(popt->iocharset);
9611 + popt->iocharset = match_strdup(&args[0]);
9612 +@@ -495,7 +496,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
9613 + if (sbi->s_nocompress) seq_puts(m, ",nocompress");
9614 + if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm");
9615 + if (sbi->s_showassoc) seq_puts(m, ",showassoc");
9616 +- if (sbi->s_utf8) seq_puts(m, ",utf8");
9617 +
9618 + if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check);
9619 + if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping);
9620 +@@ -518,9 +518,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
9621 + seq_printf(m, ",fmode=%o", sbi->s_fmode);
9622 +
9623 + #ifdef CONFIG_JOLIET
9624 +- if (sbi->s_nls_iocharset &&
9625 +- strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
9626 ++ if (sbi->s_nls_iocharset)
9627 + seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
9628 ++ else
9629 ++ seq_puts(m, ",iocharset=utf8");
9630 + #endif
9631 + return 0;
9632 + }
9633 +@@ -863,14 +864,13 @@ root_found:
9634 + sbi->s_nls_iocharset = NULL;
9635 +
9636 + #ifdef CONFIG_JOLIET
9637 +- if (joliet_level && opt.utf8 == 0) {
9638 ++ if (joliet_level) {
9639 + char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
9640 +- sbi->s_nls_iocharset = load_nls(p);
9641 +- if (! sbi->s_nls_iocharset) {
9642 +- /* Fail only if explicit charset specified */
9643 +- if (opt.iocharset)
9644 ++ if (strcmp(p, "utf8") != 0) {
9645 ++ sbi->s_nls_iocharset = opt.iocharset ?
9646 ++ load_nls(opt.iocharset) : load_nls_default();
9647 ++ if (!sbi->s_nls_iocharset)
9648 + goto out_freesbi;
9649 +- sbi->s_nls_iocharset = load_nls_default();
9650 + }
9651 + }
9652 + #endif
9653 +@@ -886,7 +886,6 @@ root_found:
9654 + sbi->s_gid = opt.gid;
9655 + sbi->s_uid_set = opt.uid_set;
9656 + sbi->s_gid_set = opt.gid_set;
9657 +- sbi->s_utf8 = opt.utf8;
9658 + sbi->s_nocompress = opt.nocompress;
9659 + sbi->s_overriderockperm = opt.overriderockperm;
9660 + /*
9661 +diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
9662 +index 055ec6c586f7f..dcdc191ed1834 100644
9663 +--- a/fs/isofs/isofs.h
9664 ++++ b/fs/isofs/isofs.h
9665 +@@ -44,7 +44,6 @@ struct isofs_sb_info {
9666 + unsigned char s_session;
9667 + unsigned int s_high_sierra:1;
9668 + unsigned int s_rock:2;
9669 +- unsigned int s_utf8:1;
9670 + unsigned int s_cruft:1; /* Broken disks with high byte of length
9671 + * containing junk */
9672 + unsigned int s_nocompress:1;
9673 +diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
9674 +index be8b6a9d0b926..c0f04a1e7f695 100644
9675 +--- a/fs/isofs/joliet.c
9676 ++++ b/fs/isofs/joliet.c
9677 +@@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
9678 + int
9679 + get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
9680 + {
9681 +- unsigned char utf8;
9682 + struct nls_table *nls;
9683 + unsigned char len = 0;
9684 +
9685 +- utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
9686 + nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
9687 +
9688 +- if (utf8) {
9689 ++ if (!nls) {
9690 + len = utf16s_to_utf8s((const wchar_t *) de->name,
9691 + de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
9692 + outname, PAGE_SIZE);
9693 +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
9694 +index 61d3cc2283dc8..498cb70c2c0d0 100644
9695 +--- a/fs/lockd/svclock.c
9696 ++++ b/fs/lockd/svclock.c
9697 +@@ -634,7 +634,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
9698 + conflock->caller = "somehost"; /* FIXME */
9699 + conflock->len = strlen(conflock->caller);
9700 + conflock->oh.len = 0; /* don't return OH info */
9701 +- conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
9702 ++ conflock->svid = lock->fl.fl_pid;
9703 + conflock->fl.fl_type = lock->fl.fl_type;
9704 + conflock->fl.fl_start = lock->fl.fl_start;
9705 + conflock->fl.fl_end = lock->fl.fl_end;
9706 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
9707 +index fa67ecd5fe63f..2bedc7839ec56 100644
9708 +--- a/fs/nfsd/nfs4state.c
9709 ++++ b/fs/nfsd/nfs4state.c
9710 +@@ -2687,9 +2687,9 @@ static void force_expire_client(struct nfs4_client *clp)
9711 +
9712 + trace_nfsd_clid_admin_expired(&clp->cl_clientid);
9713 +
9714 +- spin_lock(&clp->cl_lock);
9715 ++ spin_lock(&nn->client_lock);
9716 + clp->cl_time = 0;
9717 +- spin_unlock(&clp->cl_lock);
9718 ++ spin_unlock(&nn->client_lock);
9719 +
9720 + wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
9721 + spin_lock(&nn->client_lock);
9722 +diff --git a/fs/udf/misc.c b/fs/udf/misc.c
9723 +index eab94527340dc..1614d308d0f06 100644
9724 +--- a/fs/udf/misc.c
9725 ++++ b/fs/udf/misc.c
9726 +@@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
9727 + else
9728 + offset = le32_to_cpu(eahd->appAttrLocation);
9729 +
9730 +- while (offset < iinfo->i_lenEAttr) {
9731 ++ while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
9732 ++ uint32_t attrLength;
9733 ++
9734 + gaf = (struct genericFormat *)&ea[offset];
9735 ++ attrLength = le32_to_cpu(gaf->attrLength);
9736 ++
9737 ++ /* Detect undersized elements and buffer overflows */
9738 ++ if ((attrLength < sizeof(*gaf)) ||
9739 ++ (attrLength > (iinfo->i_lenEAttr - offset)))
9740 ++ break;
9741 ++
9742 + if (le32_to_cpu(gaf->attrType) == type &&
9743 + gaf->attrSubtype == subtype)
9744 + return gaf;
9745 + else
9746 +- offset += le32_to_cpu(gaf->attrLength);
9747 ++ offset += attrLength;
9748 + }
9749 + }
9750 +
9751 +diff --git a/fs/udf/super.c b/fs/udf/super.c
9752 +index 2f83c1204e20c..b2d7c57d06881 100644
9753 +--- a/fs/udf/super.c
9754 ++++ b/fs/udf/super.c
9755 +@@ -108,16 +108,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
9756 + return NULL;
9757 + lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
9758 + partnum = le32_to_cpu(lvid->numOfPartitions);
9759 +- if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
9760 +- offsetof(struct logicalVolIntegrityDesc, impUse)) /
9761 +- (2 * sizeof(uint32_t)) < partnum) {
9762 +- udf_err(sb, "Logical volume integrity descriptor corrupted "
9763 +- "(numOfPartitions = %u)!\n", partnum);
9764 +- return NULL;
9765 +- }
9766 + /* The offset is to skip freeSpaceTable and sizeTable arrays */
9767 + offset = partnum * 2 * sizeof(uint32_t);
9768 +- return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
9769 ++ return (struct logicalVolIntegrityDescImpUse *)
9770 ++ (((uint8_t *)(lvid + 1)) + offset);
9771 + }
9772 +
9773 + /* UDF filesystem type */
9774 +@@ -349,10 +343,10 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
9775 + seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
9776 + if (sbi->s_anchor != 0)
9777 + seq_printf(seq, ",anchor=%u", sbi->s_anchor);
9778 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
9779 +- seq_puts(seq, ",utf8");
9780 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
9781 ++ if (sbi->s_nls_map)
9782 + seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
9783 ++ else
9784 ++ seq_puts(seq, ",iocharset=utf8");
9785 +
9786 + return 0;
9787 + }
9788 +@@ -558,19 +552,24 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
9789 + /* Ignored (never implemented properly) */
9790 + break;
9791 + case Opt_utf8:
9792 +- uopt->flags |= (1 << UDF_FLAG_UTF8);
9793 ++ if (!remount) {
9794 ++ unload_nls(uopt->nls_map);
9795 ++ uopt->nls_map = NULL;
9796 ++ }
9797 + break;
9798 + case Opt_iocharset:
9799 + if (!remount) {
9800 +- if (uopt->nls_map)
9801 +- unload_nls(uopt->nls_map);
9802 +- /*
9803 +- * load_nls() failure is handled later in
9804 +- * udf_fill_super() after all options are
9805 +- * parsed.
9806 +- */
9807 ++ unload_nls(uopt->nls_map);
9808 ++ uopt->nls_map = NULL;
9809 ++ }
9810 ++ /* When nls_map is not loaded then UTF-8 is used */
9811 ++ if (!remount && strcmp(args[0].from, "utf8") != 0) {
9812 + uopt->nls_map = load_nls(args[0].from);
9813 +- uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
9814 ++ if (!uopt->nls_map) {
9815 ++ pr_err("iocharset %s not found\n",
9816 ++ args[0].from);
9817 ++ return 0;
9818 ++ }
9819 + }
9820 + break;
9821 + case Opt_uforget:
9822 +@@ -1542,6 +1541,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
9823 + struct udf_sb_info *sbi = UDF_SB(sb);
9824 + struct logicalVolIntegrityDesc *lvid;
9825 + int indirections = 0;
9826 ++ u32 parts, impuselen;
9827 +
9828 + while (++indirections <= UDF_MAX_LVID_NESTING) {
9829 + final_bh = NULL;
9830 +@@ -1568,15 +1568,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
9831 +
9832 + lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
9833 + if (lvid->nextIntegrityExt.extLength == 0)
9834 +- return;
9835 ++ goto check;
9836 +
9837 + loc = leea_to_cpu(lvid->nextIntegrityExt);
9838 + }
9839 +
9840 + udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
9841 + UDF_MAX_LVID_NESTING);
9842 ++out_err:
9843 + brelse(sbi->s_lvid_bh);
9844 + sbi->s_lvid_bh = NULL;
9845 ++ return;
9846 ++check:
9847 ++ parts = le32_to_cpu(lvid->numOfPartitions);
9848 ++ impuselen = le32_to_cpu(lvid->lengthOfImpUse);
9849 ++ if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
9850 ++ sizeof(struct logicalVolIntegrityDesc) + impuselen +
9851 ++ 2 * parts * sizeof(u32) > sb->s_blocksize) {
9852 ++ udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
9853 ++ "ignoring.\n", parts, impuselen);
9854 ++ goto out_err;
9855 ++ }
9856 + }
9857 +
9858 + /*
9859 +@@ -2139,21 +2151,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
9860 + if (!udf_parse_options((char *)options, &uopt, false))
9861 + goto parse_options_failure;
9862 +
9863 +- if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
9864 +- uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
9865 +- udf_err(sb, "utf8 cannot be combined with iocharset\n");
9866 +- goto parse_options_failure;
9867 +- }
9868 +- if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
9869 +- uopt.nls_map = load_nls_default();
9870 +- if (!uopt.nls_map)
9871 +- uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
9872 +- else
9873 +- udf_debug("Using default NLS map\n");
9874 +- }
9875 +- if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
9876 +- uopt.flags |= (1 << UDF_FLAG_UTF8);
9877 +-
9878 + fileset.logicalBlockNum = 0xFFFFFFFF;
9879 + fileset.partitionReferenceNum = 0xFFFF;
9880 +
9881 +@@ -2308,8 +2305,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
9882 + error_out:
9883 + iput(sbi->s_vat_inode);
9884 + parse_options_failure:
9885 +- if (uopt.nls_map)
9886 +- unload_nls(uopt.nls_map);
9887 ++ unload_nls(uopt.nls_map);
9888 + if (lvid_open)
9889 + udf_close_lvid(sb);
9890 + brelse(sbi->s_lvid_bh);
9891 +@@ -2359,8 +2355,7 @@ static void udf_put_super(struct super_block *sb)
9892 + sbi = UDF_SB(sb);
9893 +
9894 + iput(sbi->s_vat_inode);
9895 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
9896 +- unload_nls(sbi->s_nls_map);
9897 ++ unload_nls(sbi->s_nls_map);
9898 + if (!sb_rdonly(sb))
9899 + udf_close_lvid(sb);
9900 + brelse(sbi->s_lvid_bh);
9901 +diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
9902 +index 758efe557a199..4fa620543d302 100644
9903 +--- a/fs/udf/udf_sb.h
9904 ++++ b/fs/udf/udf_sb.h
9905 +@@ -20,8 +20,6 @@
9906 + #define UDF_FLAG_UNDELETE 6
9907 + #define UDF_FLAG_UNHIDE 7
9908 + #define UDF_FLAG_VARCONV 8
9909 +-#define UDF_FLAG_NLS_MAP 9
9910 +-#define UDF_FLAG_UTF8 10
9911 + #define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
9912 + #define UDF_FLAG_GID_FORGET 12
9913 + #define UDF_FLAG_UID_SET 13
9914 +diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
9915 +index 5fcfa96463ebb..622569007b530 100644
9916 +--- a/fs/udf/unicode.c
9917 ++++ b/fs/udf/unicode.c
9918 +@@ -177,7 +177,7 @@ static int udf_name_from_CS0(struct super_block *sb,
9919 + return 0;
9920 + }
9921 +
9922 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
9923 ++ if (UDF_SB(sb)->s_nls_map)
9924 + conv_f = UDF_SB(sb)->s_nls_map->uni2char;
9925 + else
9926 + conv_f = NULL;
9927 +@@ -285,7 +285,7 @@ static int udf_name_to_CS0(struct super_block *sb,
9928 + if (ocu_max_len <= 0)
9929 + return 0;
9930 +
9931 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
9932 ++ if (UDF_SB(sb)->s_nls_map)
9933 + conv_f = UDF_SB(sb)->s_nls_map->char2uni;
9934 + else
9935 + conv_f = NULL;
9936 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
9937 +index d3afea47ade67..4b0f8bb0671d1 100644
9938 +--- a/include/linux/blkdev.h
9939 ++++ b/include/linux/blkdev.h
9940 +@@ -1521,6 +1521,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
9941 + return offset << SECTOR_SHIFT;
9942 + }
9943 +
9944 ++/*
9945 ++ * Two cases of handling DISCARD merge:
9946 ++ * If max_discard_segments > 1, the driver takes every bio
9947 ++ * as a range and send them to controller together. The ranges
9948 ++ * needn't to be contiguous.
9949 ++ * Otherwise, the bios/requests will be handled as same as
9950 ++ * others which should be contiguous.
9951 ++ */
9952 ++static inline bool blk_discard_mergable(struct request *req)
9953 ++{
9954 ++ if (req_op(req) == REQ_OP_DISCARD &&
9955 ++ queue_max_discard_segments(req->q) > 1)
9956 ++ return true;
9957 ++ return false;
9958 ++}
9959 ++
9960 + static inline int bdev_discard_alignment(struct block_device *bdev)
9961 + {
9962 + struct request_queue *q = bdev_get_queue(bdev);
9963 +diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
9964 +index 3f221dbf5f95d..1834752c56175 100644
9965 +--- a/include/linux/energy_model.h
9966 ++++ b/include/linux/energy_model.h
9967 +@@ -53,6 +53,22 @@ struct em_perf_domain {
9968 + #ifdef CONFIG_ENERGY_MODEL
9969 + #define EM_MAX_POWER 0xFFFF
9970 +
9971 ++/*
9972 ++ * Increase resolution of energy estimation calculations for 64-bit
9973 ++ * architectures. The extra resolution improves decision made by EAS for the
9974 ++ * task placement when two Performance Domains might provide similar energy
9975 ++ * estimation values (w/o better resolution the values could be equal).
9976 ++ *
9977 ++ * We increase resolution only if we have enough bits to allow this increased
9978 ++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
9979 ++ * are pretty high and the returns do not justify the increased costs.
9980 ++ */
9981 ++#ifdef CONFIG_64BIT
9982 ++#define em_scale_power(p) ((p) * 1000)
9983 ++#else
9984 ++#define em_scale_power(p) (p)
9985 ++#endif
9986 ++
9987 + struct em_data_callback {
9988 + /**
9989 + * active_power() - Provide power at the next performance state of
9990 +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
9991 +index bb5e7b0a42746..77295af724264 100644
9992 +--- a/include/linux/hrtimer.h
9993 ++++ b/include/linux/hrtimer.h
9994 +@@ -318,16 +318,12 @@ struct clock_event_device;
9995 +
9996 + extern void hrtimer_interrupt(struct clock_event_device *dev);
9997 +
9998 +-extern void clock_was_set_delayed(void);
9999 +-
10000 + extern unsigned int hrtimer_resolution;
10001 +
10002 + #else
10003 +
10004 + #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
10005 +
10006 +-static inline void clock_was_set_delayed(void) { }
10007 +-
10008 + #endif
10009 +
10010 + static inline ktime_t
10011 +@@ -351,7 +347,6 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
10012 + timer->base->get_time());
10013 + }
10014 +
10015 +-extern void clock_was_set(void);
10016 + #ifdef CONFIG_TIMERFD
10017 + extern void timerfd_clock_was_set(void);
10018 + #else
10019 +diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
10020 +index ded90b097e6e8..3f02b818625ef 100644
10021 +--- a/include/linux/local_lock_internal.h
10022 ++++ b/include/linux/local_lock_internal.h
10023 +@@ -14,29 +14,14 @@ typedef struct {
10024 + } local_lock_t;
10025 +
10026 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
10027 +-# define LL_DEP_MAP_INIT(lockname) \
10028 ++# define LOCAL_LOCK_DEBUG_INIT(lockname) \
10029 + .dep_map = { \
10030 + .name = #lockname, \
10031 + .wait_type_inner = LD_WAIT_CONFIG, \
10032 +- .lock_type = LD_LOCK_PERCPU, \
10033 +- }
10034 +-#else
10035 +-# define LL_DEP_MAP_INIT(lockname)
10036 +-#endif
10037 +-
10038 +-#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
10039 +-
10040 +-#define __local_lock_init(lock) \
10041 +-do { \
10042 +- static struct lock_class_key __key; \
10043 +- \
10044 +- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
10045 +- lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
10046 +- LD_WAIT_CONFIG, LD_WAIT_INV, \
10047 +- LD_LOCK_PERCPU); \
10048 +-} while (0)
10049 ++ .lock_type = LD_LOCK_PERCPU, \
10050 ++ }, \
10051 ++ .owner = NULL,
10052 +
10053 +-#ifdef CONFIG_DEBUG_LOCK_ALLOC
10054 + static inline void local_lock_acquire(local_lock_t *l)
10055 + {
10056 + lock_map_acquire(&l->dep_map);
10057 +@@ -51,11 +36,30 @@ static inline void local_lock_release(local_lock_t *l)
10058 + lock_map_release(&l->dep_map);
10059 + }
10060 +
10061 ++static inline void local_lock_debug_init(local_lock_t *l)
10062 ++{
10063 ++ l->owner = NULL;
10064 ++}
10065 + #else /* CONFIG_DEBUG_LOCK_ALLOC */
10066 ++# define LOCAL_LOCK_DEBUG_INIT(lockname)
10067 + static inline void local_lock_acquire(local_lock_t *l) { }
10068 + static inline void local_lock_release(local_lock_t *l) { }
10069 ++static inline void local_lock_debug_init(local_lock_t *l) { }
10070 + #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
10071 +
10072 ++#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
10073 ++
10074 ++#define __local_lock_init(lock) \
10075 ++do { \
10076 ++ static struct lock_class_key __key; \
10077 ++ \
10078 ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
10079 ++ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
10080 ++ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
10081 ++ LD_LOCK_PERCPU); \
10082 ++ local_lock_debug_init(lock); \
10083 ++} while (0)
10084 ++
10085 + #define __local_lock(lock) \
10086 + do { \
10087 + preempt_disable(); \
10088 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
10089 +index b0009aa3647f4..6bbae0c3bc0b9 100644
10090 +--- a/include/linux/mlx5/mlx5_ifc.h
10091 ++++ b/include/linux/mlx5/mlx5_ifc.h
10092 +@@ -921,7 +921,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
10093 + u8 scatter_fcs[0x1];
10094 + u8 enhanced_multi_pkt_send_wqe[0x1];
10095 + u8 tunnel_lso_const_out_ip_id[0x1];
10096 +- u8 reserved_at_1c[0x2];
10097 ++ u8 tunnel_lro_gre[0x1];
10098 ++ u8 tunnel_lro_vxlan[0x1];
10099 + u8 tunnel_stateless_gre[0x1];
10100 + u8 tunnel_stateless_vxlan[0x1];
10101 +
10102 +diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
10103 +index d55c746ac56e2..e00ad1cfb1f1d 100644
10104 +--- a/include/linux/power/max17042_battery.h
10105 ++++ b/include/linux/power/max17042_battery.h
10106 +@@ -69,7 +69,7 @@ enum max17042_register {
10107 + MAX17042_RelaxCFG = 0x2A,
10108 + MAX17042_MiscCFG = 0x2B,
10109 + MAX17042_TGAIN = 0x2C,
10110 +- MAx17042_TOFF = 0x2D,
10111 ++ MAX17042_TOFF = 0x2D,
10112 + MAX17042_CGAIN = 0x2E,
10113 + MAX17042_COFF = 0x2F,
10114 +
10115 +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
10116 +index e91d51ea028bb..65185d1e07ea6 100644
10117 +--- a/include/linux/sunrpc/svc.h
10118 ++++ b/include/linux/sunrpc/svc.h
10119 +@@ -523,6 +523,7 @@ void svc_wake_up(struct svc_serv *);
10120 + void svc_reserve(struct svc_rqst *rqstp, int space);
10121 + struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
10122 + char * svc_print_addr(struct svc_rqst *, char *, size_t);
10123 ++const char * svc_proc_name(const struct svc_rqst *rqstp);
10124 + int svc_encode_result_payload(struct svc_rqst *rqstp,
10125 + unsigned int offset,
10126 + unsigned int length);
10127 +diff --git a/include/linux/time64.h b/include/linux/time64.h
10128 +index 5117cb5b56561..81b9686a20799 100644
10129 +--- a/include/linux/time64.h
10130 ++++ b/include/linux/time64.h
10131 +@@ -25,7 +25,9 @@ struct itimerspec64 {
10132 + #define TIME64_MIN (-TIME64_MAX - 1)
10133 +
10134 + #define KTIME_MAX ((s64)~((u64)1 << 63))
10135 ++#define KTIME_MIN (-KTIME_MAX - 1)
10136 + #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
10137 ++#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)
10138 +
10139 + /*
10140 + * Limits for settimeofday():
10141 +@@ -124,10 +126,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
10142 + */
10143 + static inline s64 timespec64_to_ns(const struct timespec64 *ts)
10144 + {
10145 +- /* Prevent multiplication overflow */
10146 +- if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
10147 ++ /* Prevent multiplication overflow / underflow */
10148 ++ if (ts->tv_sec >= KTIME_SEC_MAX)
10149 + return KTIME_MAX;
10150 +
10151 ++ if (ts->tv_sec <= KTIME_SEC_MIN)
10152 ++ return KTIME_MIN;
10153 ++
10154 + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
10155 + }
10156 +
10157 +diff --git a/include/net/dsa.h b/include/net/dsa.h
10158 +index 33f40c1ec379f..048d297623c9a 100644
10159 +--- a/include/net/dsa.h
10160 ++++ b/include/net/dsa.h
10161 +@@ -699,8 +699,6 @@ struct dsa_switch_ops {
10162 + int (*port_bridge_flags)(struct dsa_switch *ds, int port,
10163 + struct switchdev_brport_flags flags,
10164 + struct netlink_ext_ack *extack);
10165 +- int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter,
10166 +- struct netlink_ext_ack *extack);
10167 +
10168 + /*
10169 + * VLAN support
10170 +diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
10171 +index 298a8d10168b6..fb34b66aefa73 100644
10172 +--- a/include/net/pkt_cls.h
10173 ++++ b/include/net/pkt_cls.h
10174 +@@ -824,10 +824,9 @@ enum tc_htb_command {
10175 + struct tc_htb_qopt_offload {
10176 + struct netlink_ext_ack *extack;
10177 + enum tc_htb_command command;
10178 +- u16 classid;
10179 + u32 parent_classid;
10180 ++ u16 classid;
10181 + u16 qid;
10182 +- u16 moved_qid;
10183 + u64 rate;
10184 + u64 ceil;
10185 + };
10186 +diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
10187 +index e4e44a2b4aa91..0dd30de00e5b4 100644
10188 +--- a/include/trace/events/io_uring.h
10189 ++++ b/include/trace/events/io_uring.h
10190 +@@ -295,14 +295,14 @@ TRACE_EVENT(io_uring_fail_link,
10191 + */
10192 + TRACE_EVENT(io_uring_complete,
10193 +
10194 +- TP_PROTO(void *ctx, u64 user_data, long res, unsigned cflags),
10195 ++ TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
10196 +
10197 + TP_ARGS(ctx, user_data, res, cflags),
10198 +
10199 + TP_STRUCT__entry (
10200 + __field( void *, ctx )
10201 + __field( u64, user_data )
10202 +- __field( long, res )
10203 ++ __field( int, res )
10204 + __field( unsigned, cflags )
10205 + ),
10206 +
10207 +@@ -313,7 +313,7 @@ TRACE_EVENT(io_uring_complete,
10208 + __entry->cflags = cflags;
10209 + ),
10210 +
10211 +- TP_printk("ring %p, user_data 0x%llx, result %ld, cflags %x",
10212 ++ TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
10213 + __entry->ctx, (unsigned long long)__entry->user_data,
10214 + __entry->res, __entry->cflags)
10215 + );
10216 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
10217 +index 861f199896c6a..d323f5a049c88 100644
10218 +--- a/include/trace/events/sunrpc.h
10219 ++++ b/include/trace/events/sunrpc.h
10220 +@@ -1642,7 +1642,7 @@ TRACE_EVENT(svc_process,
10221 + __field(u32, vers)
10222 + __field(u32, proc)
10223 + __string(service, name)
10224 +- __string(procedure, rqst->rq_procinfo->pc_name)
10225 ++ __string(procedure, svc_proc_name(rqst))
10226 + __string(addr, rqst->rq_xprt ?
10227 + rqst->rq_xprt->xpt_remotebuf : "(null)")
10228 + ),
10229 +@@ -1652,7 +1652,7 @@ TRACE_EVENT(svc_process,
10230 + __entry->vers = rqst->rq_vers;
10231 + __entry->proc = rqst->rq_proc;
10232 + __assign_str(service, name);
10233 +- __assign_str(procedure, rqst->rq_procinfo->pc_name);
10234 ++ __assign_str(procedure, svc_proc_name(rqst));
10235 + __assign_str(addr, rqst->rq_xprt ?
10236 + rqst->rq_xprt->xpt_remotebuf : "(null)");
10237 + ),
10238 +@@ -1918,7 +1918,7 @@ TRACE_EVENT(svc_stats_latency,
10239 + TP_STRUCT__entry(
10240 + __field(u32, xid)
10241 + __field(unsigned long, execute)
10242 +- __string(procedure, rqst->rq_procinfo->pc_name)
10243 ++ __string(procedure, svc_proc_name(rqst))
10244 + __string(addr, rqst->rq_xprt->xpt_remotebuf)
10245 + ),
10246 +
10247 +@@ -1926,7 +1926,7 @@ TRACE_EVENT(svc_stats_latency,
10248 + __entry->xid = be32_to_cpu(rqst->rq_xid);
10249 + __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
10250 + rqst->rq_stime));
10251 +- __assign_str(procedure, rqst->rq_procinfo->pc_name);
10252 ++ __assign_str(procedure, svc_proc_name(rqst));
10253 + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
10254 + ),
10255 +
10256 +diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
10257 +index bf9252c7381e8..5cdff1631608c 100644
10258 +--- a/include/uapi/linux/bpf.h
10259 ++++ b/include/uapi/linux/bpf.h
10260 +@@ -3249,7 +3249,7 @@ union bpf_attr {
10261 + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
10262 + * Description
10263 + * Select a **SO_REUSEPORT** socket from a
10264 +- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
10265 ++ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
10266 + * It checks the selected socket is matching the incoming
10267 + * request in the socket buffer.
10268 + * Return
10269 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
10270 +index 49f07e2bf23b9..9d94ac6ff50c4 100644
10271 +--- a/kernel/bpf/verifier.c
10272 ++++ b/kernel/bpf/verifier.c
10273 +@@ -11414,10 +11414,11 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
10274 + * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
10275 + * [0, off) and [off, end) to new locations, so the patched range stays zero
10276 + */
10277 +-static int adjust_insn_aux_data(struct bpf_verifier_env *env,
10278 +- struct bpf_prog *new_prog, u32 off, u32 cnt)
10279 ++static void adjust_insn_aux_data(struct bpf_verifier_env *env,
10280 ++ struct bpf_insn_aux_data *new_data,
10281 ++ struct bpf_prog *new_prog, u32 off, u32 cnt)
10282 + {
10283 +- struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
10284 ++ struct bpf_insn_aux_data *old_data = env->insn_aux_data;
10285 + struct bpf_insn *insn = new_prog->insnsi;
10286 + u32 old_seen = old_data[off].seen;
10287 + u32 prog_len;
10288 +@@ -11430,12 +11431,9 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
10289 + old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
10290 +
10291 + if (cnt == 1)
10292 +- return 0;
10293 ++ return;
10294 + prog_len = new_prog->len;
10295 +- new_data = vzalloc(array_size(prog_len,
10296 +- sizeof(struct bpf_insn_aux_data)));
10297 +- if (!new_data)
10298 +- return -ENOMEM;
10299 ++
10300 + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
10301 + memcpy(new_data + off + cnt - 1, old_data + off,
10302 + sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
10303 +@@ -11446,7 +11444,6 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
10304 + }
10305 + env->insn_aux_data = new_data;
10306 + vfree(old_data);
10307 +- return 0;
10308 + }
10309 +
10310 + static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
10311 +@@ -11481,6 +11478,14 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
10312 + const struct bpf_insn *patch, u32 len)
10313 + {
10314 + struct bpf_prog *new_prog;
10315 ++ struct bpf_insn_aux_data *new_data = NULL;
10316 ++
10317 ++ if (len > 1) {
10318 ++ new_data = vzalloc(array_size(env->prog->len + len - 1,
10319 ++ sizeof(struct bpf_insn_aux_data)));
10320 ++ if (!new_data)
10321 ++ return NULL;
10322 ++ }
10323 +
10324 + new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
10325 + if (IS_ERR(new_prog)) {
10326 +@@ -11488,10 +11493,10 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
10327 + verbose(env,
10328 + "insn %d cannot be patched due to 16-bit range\n",
10329 + env->insn_aux_data[off].orig_idx);
10330 ++ vfree(new_data);
10331 + return NULL;
10332 + }
10333 +- if (adjust_insn_aux_data(env, new_prog, off, len))
10334 +- return NULL;
10335 ++ adjust_insn_aux_data(env, new_data, new_prog, off, len);
10336 + adjust_subprog_starts(env, off, len);
10337 + adjust_poke_descs(new_prog, off, len);
10338 + return new_prog;
10339 +@@ -12008,6 +12013,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
10340 + if (is_narrower_load && size < target_size) {
10341 + u8 shift = bpf_ctx_narrow_access_offset(
10342 + off, size, size_default) * 8;
10343 ++ if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
10344 ++ verbose(env, "bpf verifier narrow ctx load misconfigured\n");
10345 ++ return -EINVAL;
10346 ++ }
10347 + if (ctx_field_size <= 4) {
10348 + if (shift)
10349 + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
10350 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
10351 +index adb5190c44296..13b5be6df4da2 100644
10352 +--- a/kernel/cgroup/cpuset.c
10353 ++++ b/kernel/cgroup/cpuset.c
10354 +@@ -1114,7 +1114,7 @@ enum subparts_cmd {
10355 + * cpus_allowed can be granted or an error code will be returned.
10356 + *
10357 + * For partcmd_disable, the cpuset is being transofrmed from a partition
10358 +- * root back to a non-partition root. any CPUs in cpus_allowed that are in
10359 ++ * root back to a non-partition root. Any CPUs in cpus_allowed that are in
10360 + * parent's subparts_cpus will be taken away from that cpumask and put back
10361 + * into parent's effective_cpus. 0 should always be returned.
10362 + *
10363 +@@ -1148,6 +1148,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10364 + struct cpuset *parent = parent_cs(cpuset);
10365 + int adding; /* Moving cpus from effective_cpus to subparts_cpus */
10366 + int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
10367 ++ int new_prs;
10368 + bool part_error = false; /* Partition error? */
10369 +
10370 + percpu_rwsem_assert_held(&cpuset_rwsem);
10371 +@@ -1183,6 +1184,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10372 + * A cpumask update cannot make parent's effective_cpus become empty.
10373 + */
10374 + adding = deleting = false;
10375 ++ new_prs = cpuset->partition_root_state;
10376 + if (cmd == partcmd_enable) {
10377 + cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
10378 + adding = true;
10379 +@@ -1225,7 +1227,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10380 + /*
10381 + * partcmd_update w/o newmask:
10382 + *
10383 +- * addmask = cpus_allowed & parent->effectiveb_cpus
10384 ++ * addmask = cpus_allowed & parent->effective_cpus
10385 + *
10386 + * Note that parent's subparts_cpus may have been
10387 + * pre-shrunk in case there is a change in the cpu list.
10388 +@@ -1247,11 +1249,11 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10389 + switch (cpuset->partition_root_state) {
10390 + case PRS_ENABLED:
10391 + if (part_error)
10392 +- cpuset->partition_root_state = PRS_ERROR;
10393 ++ new_prs = PRS_ERROR;
10394 + break;
10395 + case PRS_ERROR:
10396 + if (!part_error)
10397 +- cpuset->partition_root_state = PRS_ENABLED;
10398 ++ new_prs = PRS_ENABLED;
10399 + break;
10400 + }
10401 + /*
10402 +@@ -1260,10 +1262,10 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10403 + part_error = (prev_prs == PRS_ERROR);
10404 + }
10405 +
10406 +- if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
10407 ++ if (!part_error && (new_prs == PRS_ERROR))
10408 + return 0; /* Nothing need to be done */
10409 +
10410 +- if (cpuset->partition_root_state == PRS_ERROR) {
10411 ++ if (new_prs == PRS_ERROR) {
10412 + /*
10413 + * Remove all its cpus from parent's subparts_cpus.
10414 + */
10415 +@@ -1272,7 +1274,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10416 + parent->subparts_cpus);
10417 + }
10418 +
10419 +- if (!adding && !deleting)
10420 ++ if (!adding && !deleting && (new_prs == cpuset->partition_root_state))
10421 + return 0;
10422 +
10423 + /*
10424 +@@ -1299,6 +1301,9 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
10425 + }
10426 +
10427 + parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
10428 ++
10429 ++ if (cpuset->partition_root_state != new_prs)
10430 ++ cpuset->partition_root_state = new_prs;
10431 + spin_unlock_irq(&callback_lock);
10432 +
10433 + return cmd == partcmd_update;
10434 +@@ -1321,6 +1326,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
10435 + struct cpuset *cp;
10436 + struct cgroup_subsys_state *pos_css;
10437 + bool need_rebuild_sched_domains = false;
10438 ++ int new_prs;
10439 +
10440 + rcu_read_lock();
10441 + cpuset_for_each_descendant_pre(cp, pos_css, cs) {
10442 +@@ -1360,17 +1366,18 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
10443 + * update_tasks_cpumask() again for tasks in the parent
10444 + * cpuset if the parent's subparts_cpus changes.
10445 + */
10446 +- if ((cp != cs) && cp->partition_root_state) {
10447 ++ new_prs = cp->partition_root_state;
10448 ++ if ((cp != cs) && new_prs) {
10449 + switch (parent->partition_root_state) {
10450 + case PRS_DISABLED:
10451 + /*
10452 + * If parent is not a partition root or an
10453 +- * invalid partition root, clear the state
10454 +- * state and the CS_CPU_EXCLUSIVE flag.
10455 ++ * invalid partition root, clear its state
10456 ++ * and its CS_CPU_EXCLUSIVE flag.
10457 + */
10458 + WARN_ON_ONCE(cp->partition_root_state
10459 + != PRS_ERROR);
10460 +- cp->partition_root_state = 0;
10461 ++ new_prs = PRS_DISABLED;
10462 +
10463 + /*
10464 + * clear_bit() is an atomic operation and
10465 +@@ -1391,11 +1398,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
10466 + /*
10467 + * When parent is invalid, it has to be too.
10468 + */
10469 +- cp->partition_root_state = PRS_ERROR;
10470 +- if (cp->nr_subparts_cpus) {
10471 +- cp->nr_subparts_cpus = 0;
10472 +- cpumask_clear(cp->subparts_cpus);
10473 +- }
10474 ++ new_prs = PRS_ERROR;
10475 + break;
10476 + }
10477 + }
10478 +@@ -1407,8 +1410,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
10479 + spin_lock_irq(&callback_lock);
10480 +
10481 + cpumask_copy(cp->effective_cpus, tmp->new_cpus);
10482 +- if (cp->nr_subparts_cpus &&
10483 +- (cp->partition_root_state != PRS_ENABLED)) {
10484 ++ if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
10485 + cp->nr_subparts_cpus = 0;
10486 + cpumask_clear(cp->subparts_cpus);
10487 + } else if (cp->nr_subparts_cpus) {
10488 +@@ -1435,6 +1437,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
10489 + = cpumask_weight(cp->subparts_cpus);
10490 + }
10491 + }
10492 ++
10493 ++ if (new_prs != cp->partition_root_state)
10494 ++ cp->partition_root_state = new_prs;
10495 ++
10496 + spin_unlock_irq(&callback_lock);
10497 +
10498 + WARN_ON(!is_in_v2_mode() &&
10499 +@@ -1937,34 +1943,32 @@ out:
10500 +
10501 + /*
10502 + * update_prstate - update partititon_root_state
10503 +- * cs: the cpuset to update
10504 +- * val: 0 - disabled, 1 - enabled
10505 ++ * cs: the cpuset to update
10506 ++ * new_prs: new partition root state
10507 + *
10508 + * Call with cpuset_mutex held.
10509 + */
10510 +-static int update_prstate(struct cpuset *cs, int val)
10511 ++static int update_prstate(struct cpuset *cs, int new_prs)
10512 + {
10513 +- int err;
10514 ++ int err, old_prs = cs->partition_root_state;
10515 + struct cpuset *parent = parent_cs(cs);
10516 +- struct tmpmasks tmp;
10517 ++ struct tmpmasks tmpmask;
10518 +
10519 +- if ((val != 0) && (val != 1))
10520 +- return -EINVAL;
10521 +- if (val == cs->partition_root_state)
10522 ++ if (old_prs == new_prs)
10523 + return 0;
10524 +
10525 + /*
10526 + * Cannot force a partial or invalid partition root to a full
10527 + * partition root.
10528 + */
10529 +- if (val && cs->partition_root_state)
10530 ++ if (new_prs && (old_prs == PRS_ERROR))
10531 + return -EINVAL;
10532 +
10533 +- if (alloc_cpumasks(NULL, &tmp))
10534 ++ if (alloc_cpumasks(NULL, &tmpmask))
10535 + return -ENOMEM;
10536 +
10537 + err = -EINVAL;
10538 +- if (!cs->partition_root_state) {
10539 ++ if (!old_prs) {
10540 + /*
10541 + * Turning on partition root requires setting the
10542 + * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
10543 +@@ -1978,31 +1982,27 @@ static int update_prstate(struct cpuset *cs, int val)
10544 + goto out;
10545 +
10546 + err = update_parent_subparts_cpumask(cs, partcmd_enable,
10547 +- NULL, &tmp);
10548 ++ NULL, &tmpmask);
10549 + if (err) {
10550 + update_flag(CS_CPU_EXCLUSIVE, cs, 0);
10551 + goto out;
10552 + }
10553 +- cs->partition_root_state = PRS_ENABLED;
10554 + } else {
10555 + /*
10556 + * Turning off partition root will clear the
10557 + * CS_CPU_EXCLUSIVE bit.
10558 + */
10559 +- if (cs->partition_root_state == PRS_ERROR) {
10560 +- cs->partition_root_state = 0;
10561 ++ if (old_prs == PRS_ERROR) {
10562 + update_flag(CS_CPU_EXCLUSIVE, cs, 0);
10563 + err = 0;
10564 + goto out;
10565 + }
10566 +
10567 + err = update_parent_subparts_cpumask(cs, partcmd_disable,
10568 +- NULL, &tmp);
10569 ++ NULL, &tmpmask);
10570 + if (err)
10571 + goto out;
10572 +
10573 +- cs->partition_root_state = 0;
10574 +-
10575 + /* Turning off CS_CPU_EXCLUSIVE will not return error */
10576 + update_flag(CS_CPU_EXCLUSIVE, cs, 0);
10577 + }
10578 +@@ -2015,11 +2015,17 @@ static int update_prstate(struct cpuset *cs, int val)
10579 + update_tasks_cpumask(parent);
10580 +
10581 + if (parent->child_ecpus_count)
10582 +- update_sibling_cpumasks(parent, cs, &tmp);
10583 ++ update_sibling_cpumasks(parent, cs, &tmpmask);
10584 +
10585 + rebuild_sched_domains_locked();
10586 + out:
10587 +- free_cpumasks(NULL, &tmp);
10588 ++ if (!err) {
10589 ++ spin_lock_irq(&callback_lock);
10590 ++ cs->partition_root_state = new_prs;
10591 ++ spin_unlock_irq(&callback_lock);
10592 ++ }
10593 ++
10594 ++ free_cpumasks(NULL, &tmpmask);
10595 + return err;
10596 + }
10597 +
10598 +@@ -3060,7 +3066,7 @@ retry:
10599 + goto retry;
10600 + }
10601 +
10602 +- parent = parent_cs(cs);
10603 ++ parent = parent_cs(cs);
10604 + compute_effective_cpumask(&new_cpus, cs, parent);
10605 + nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
10606 +
10607 +@@ -3082,8 +3088,10 @@ retry:
10608 + if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
10609 + (parent->partition_root_state == PRS_ERROR))) {
10610 + if (cs->nr_subparts_cpus) {
10611 ++ spin_lock_irq(&callback_lock);
10612 + cs->nr_subparts_cpus = 0;
10613 + cpumask_clear(cs->subparts_cpus);
10614 ++ spin_unlock_irq(&callback_lock);
10615 + compute_effective_cpumask(&new_cpus, cs, parent);
10616 + }
10617 +
10618 +@@ -3097,7 +3105,9 @@ retry:
10619 + cpumask_empty(&new_cpus)) {
10620 + update_parent_subparts_cpumask(cs, partcmd_disable,
10621 + NULL, tmp);
10622 ++ spin_lock_irq(&callback_lock);
10623 + cs->partition_root_state = PRS_ERROR;
10624 ++ spin_unlock_irq(&callback_lock);
10625 + }
10626 + cpuset_force_rebuild();
10627 + }
10628 +@@ -3168,6 +3178,13 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
10629 + cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
10630 + mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
10631 +
10632 ++ /*
10633 ++ * In the rare case that hotplug removes all the cpus in subparts_cpus,
10634 ++ * we assumed that cpus are updated.
10635 ++ */
10636 ++ if (!cpus_updated && top_cpuset.nr_subparts_cpus)
10637 ++ cpus_updated = true;
10638 ++
10639 + /* synchronize cpus_allowed to cpu_active_mask */
10640 + if (cpus_updated) {
10641 + spin_lock_irq(&callback_lock);
10642 +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
10643 +index f7e1d0eccdbc6..246efc74e3f34 100644
10644 +--- a/kernel/cpu_pm.c
10645 ++++ b/kernel/cpu_pm.c
10646 +@@ -13,19 +13,32 @@
10647 + #include <linux/spinlock.h>
10648 + #include <linux/syscore_ops.h>
10649 +
10650 +-static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
10651 ++/*
10652 ++ * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
10653 ++ * Notifications for cpu_pm will be issued by the idle task itself, which can
10654 ++ * never block, IOW it requires using a raw_spinlock_t.
10655 ++ */
10656 ++static struct {
10657 ++ struct raw_notifier_head chain;
10658 ++ raw_spinlock_t lock;
10659 ++} cpu_pm_notifier = {
10660 ++ .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
10661 ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
10662 ++};
10663 +
10664 + static int cpu_pm_notify(enum cpu_pm_event event)
10665 + {
10666 + int ret;
10667 +
10668 + /*
10669 +- * atomic_notifier_call_chain has a RCU read critical section, which
10670 +- * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
10671 +- * RCU know this.
10672 ++ * This introduces a RCU read critical section, which could be
10673 ++ * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
10674 ++ * this.
10675 + */
10676 + rcu_irq_enter_irqson();
10677 +- ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
10678 ++ rcu_read_lock();
10679 ++ ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
10680 ++ rcu_read_unlock();
10681 + rcu_irq_exit_irqson();
10682 +
10683 + return notifier_to_errno(ret);
10684 +@@ -33,10 +46,13 @@ static int cpu_pm_notify(enum cpu_pm_event event)
10685 +
10686 + static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
10687 + {
10688 ++ unsigned long flags;
10689 + int ret;
10690 +
10691 + rcu_irq_enter_irqson();
10692 +- ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
10693 ++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
10694 ++ ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
10695 ++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
10696 + rcu_irq_exit_irqson();
10697 +
10698 + return notifier_to_errno(ret);
10699 +@@ -49,12 +65,17 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
10700 + * Add a driver to a list of drivers that are notified about
10701 + * CPU and CPU cluster low power entry and exit.
10702 + *
10703 +- * This function may sleep, and has the same return conditions as
10704 +- * raw_notifier_chain_register.
10705 ++ * This function has the same return conditions as raw_notifier_chain_register.
10706 + */
10707 + int cpu_pm_register_notifier(struct notifier_block *nb)
10708 + {
10709 +- return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
10710 ++ unsigned long flags;
10711 ++ int ret;
10712 ++
10713 ++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
10714 ++ ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
10715 ++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
10716 ++ return ret;
10717 + }
10718 + EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
10719 +
10720 +@@ -64,12 +85,17 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
10721 + *
10722 + * Remove a driver from the CPU PM notifier list.
10723 + *
10724 +- * This function may sleep, and has the same return conditions as
10725 +- * raw_notifier_chain_unregister.
10726 ++ * This function has the same return conditions as raw_notifier_chain_unregister.
10727 + */
10728 + int cpu_pm_unregister_notifier(struct notifier_block *nb)
10729 + {
10730 +- return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
10731 ++ unsigned long flags;
10732 ++ int ret;
10733 ++
10734 ++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
10735 ++ ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
10736 ++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
10737 ++ return ret;
10738 + }
10739 + EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
10740 +
10741 +diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
10742 +index 4d2a702d7aa95..c43e2ac2f8def 100644
10743 +--- a/kernel/irq/timings.c
10744 ++++ b/kernel/irq/timings.c
10745 +@@ -799,12 +799,14 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
10746 +
10747 + __irq_timings_store(irq, irqs, ti->intervals[i]);
10748 + if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
10749 ++ ret = -EBADSLT;
10750 + pr_err("Failed to store in the circular buffer\n");
10751 + goto out;
10752 + }
10753 + }
10754 +
10755 + if (irqs->count != ti->count) {
10756 ++ ret = -ERANGE;
10757 + pr_err("Count differs\n");
10758 + goto out;
10759 + }
10760 +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
10761 +index d2df5e68b5039..fb30e1436dfb3 100644
10762 +--- a/kernel/locking/mutex.c
10763 ++++ b/kernel/locking/mutex.c
10764 +@@ -928,7 +928,6 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
10765 + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
10766 + {
10767 + struct mutex_waiter waiter;
10768 +- bool first = false;
10769 + struct ww_mutex *ww;
10770 + int ret;
10771 +
10772 +@@ -1007,6 +1006,8 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
10773 +
10774 + set_current_state(state);
10775 + for (;;) {
10776 ++ bool first;
10777 ++
10778 + /*
10779 + * Once we hold wait_lock, we're serialized against
10780 + * mutex_unlock() handing the lock off to us, do a trylock
10781 +@@ -1035,15 +1036,9 @@ __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
10782 + spin_unlock(&lock->wait_lock);
10783 + schedule_preempt_disabled();
10784 +
10785 +- /*
10786 +- * ww_mutex needs to always recheck its position since its waiter
10787 +- * list is not FIFO ordered.
10788 +- */
10789 +- if (ww_ctx || !first) {
10790 +- first = __mutex_waiter_is_first(lock, &waiter);
10791 +- if (first)
10792 +- __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
10793 +- }
10794 ++ first = __mutex_waiter_is_first(lock, &waiter);
10795 ++ if (first)
10796 ++ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
10797 +
10798 + set_current_state(state);
10799 + /*
10800 +diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
10801 +index 0f4530b3a8cd9..a332ccd829e24 100644
10802 +--- a/kernel/power/energy_model.c
10803 ++++ b/kernel/power/energy_model.c
10804 +@@ -170,7 +170,9 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
10805 + /* Compute the cost of each performance state. */
10806 + fmax = (u64) table[nr_states - 1].frequency;
10807 + for (i = 0; i < nr_states; i++) {
10808 +- table[i].cost = div64_u64(fmax * table[i].power,
10809 ++ unsigned long power_res = em_scale_power(table[i].power);
10810 ++
10811 ++ table[i].cost = div64_u64(fmax * power_res,
10812 + table[i].frequency);
10813 + }
10814 +
10815 +diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
10816 +index 6c76988cc019f..0e7a60706d1c0 100644
10817 +--- a/kernel/rcu/tree_stall.h
10818 ++++ b/kernel/rcu/tree_stall.h
10819 +@@ -7,6 +7,8 @@
10820 + * Author: Paul E. McKenney <paulmck@×××××××××.com>
10821 + */
10822 +
10823 ++#include <linux/kvm_para.h>
10824 ++
10825 + //////////////////////////////////////////////////////////////////////////////
10826 + //
10827 + // Controlling CPU stall warnings, including delay calculation.
10828 +@@ -267,8 +269,10 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
10829 + struct task_struct *ts[8];
10830 +
10831 + lockdep_assert_irqs_disabled();
10832 +- if (!rcu_preempt_blocked_readers_cgp(rnp))
10833 ++ if (!rcu_preempt_blocked_readers_cgp(rnp)) {
10834 ++ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
10835 + return 0;
10836 ++ }
10837 + pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
10838 + rnp->level, rnp->grplo, rnp->grphi);
10839 + t = list_entry(rnp->gp_tasks->prev,
10840 +@@ -280,8 +284,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
10841 + break;
10842 + }
10843 + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
10844 +- for (i--; i; i--) {
10845 +- t = ts[i];
10846 ++ while (i) {
10847 ++ t = ts[--i];
10848 + if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
10849 + pr_cont(" P%d", t->pid);
10850 + else
10851 +@@ -696,6 +700,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
10852 + (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
10853 + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
10854 +
10855 ++ /*
10856 ++ * If a virtual machine is stopped by the host it can look to
10857 ++ * the watchdog like an RCU stall. Check to see if the host
10858 ++ * stopped the vm.
10859 ++ */
10860 ++ if (kvm_check_and_clear_guest_paused())
10861 ++ return;
10862 ++
10863 + /* We haven't checked in, so go dump stack. */
10864 + print_cpu_stall(gps);
10865 + if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
10866 +@@ -705,6 +717,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
10867 + ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
10868 + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
10869 +
10870 ++ /*
10871 ++ * If a virtual machine is stopped by the host it can look to
10872 ++ * the watchdog like an RCU stall. Check to see if the host
10873 ++ * stopped the vm.
10874 ++ */
10875 ++ if (kvm_check_and_clear_guest_paused())
10876 ++ return;
10877 ++
10878 + /* They had a few time units to dump stack, so complain. */
10879 + print_other_cpu_stall(gs2, gps);
10880 + if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
10881 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
10882 +index f3b27c6c51535..a2403432f3abb 100644
10883 +--- a/kernel/sched/core.c
10884 ++++ b/kernel/sched/core.c
10885 +@@ -1633,6 +1633,23 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
10886 + uclamp_rq_dec_id(rq, p, clamp_id);
10887 + }
10888 +
10889 ++static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
10890 ++ enum uclamp_id clamp_id)
10891 ++{
10892 ++ if (!p->uclamp[clamp_id].active)
10893 ++ return;
10894 ++
10895 ++ uclamp_rq_dec_id(rq, p, clamp_id);
10896 ++ uclamp_rq_inc_id(rq, p, clamp_id);
10897 ++
10898 ++ /*
10899 ++ * Make sure to clear the idle flag if we've transiently reached 0
10900 ++ * active tasks on rq.
10901 ++ */
10902 ++ if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
10903 ++ rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
10904 ++}
10905 ++
10906 + static inline void
10907 + uclamp_update_active(struct task_struct *p)
10908 + {
10909 +@@ -1656,12 +1673,8 @@ uclamp_update_active(struct task_struct *p)
10910 + * affecting a valid clamp bucket, the next time it's enqueued,
10911 + * it will already see the updated clamp bucket value.
10912 + */
10913 +- for_each_clamp_id(clamp_id) {
10914 +- if (p->uclamp[clamp_id].active) {
10915 +- uclamp_rq_dec_id(rq, p, clamp_id);
10916 +- uclamp_rq_inc_id(rq, p, clamp_id);
10917 +- }
10918 +- }
10919 ++ for_each_clamp_id(clamp_id)
10920 ++ uclamp_rq_reinc_id(rq, p, clamp_id);
10921 +
10922 + task_rq_unlock(rq, p, &rf);
10923 + }
10924 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
10925 +index aaacd6cfd42f0..e94314633b39d 100644
10926 +--- a/kernel/sched/deadline.c
10927 ++++ b/kernel/sched/deadline.c
10928 +@@ -1733,6 +1733,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
10929 + */
10930 + raw_spin_rq_lock(rq);
10931 + if (p->dl.dl_non_contending) {
10932 ++ update_rq_clock(rq);
10933 + sub_running_bw(&p->dl, &rq->dl);
10934 + p->dl.dl_non_contending = 0;
10935 + /*
10936 +@@ -2741,7 +2742,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
10937 + dl_se->dl_runtime = attr->sched_runtime;
10938 + dl_se->dl_deadline = attr->sched_deadline;
10939 + dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
10940 +- dl_se->flags = attr->sched_flags;
10941 ++ dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
10942 + dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
10943 + dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
10944 + }
10945 +@@ -2754,7 +2755,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
10946 + attr->sched_runtime = dl_se->dl_runtime;
10947 + attr->sched_deadline = dl_se->dl_deadline;
10948 + attr->sched_period = dl_se->dl_period;
10949 +- attr->sched_flags = dl_se->flags;
10950 ++ attr->sched_flags &= ~SCHED_DL_FLAGS;
10951 ++ attr->sched_flags |= dl_se->flags;
10952 + }
10953 +
10954 + /*
10955 +@@ -2851,7 +2853,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
10956 + if (dl_se->dl_runtime != attr->sched_runtime ||
10957 + dl_se->dl_deadline != attr->sched_deadline ||
10958 + dl_se->dl_period != attr->sched_period ||
10959 +- dl_se->flags != attr->sched_flags)
10960 ++ dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
10961 + return true;
10962 +
10963 + return false;
10964 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
10965 +index 0c5ec2776ddf0..7e08e3d947c20 100644
10966 +--- a/kernel/sched/debug.c
10967 ++++ b/kernel/sched/debug.c
10968 +@@ -388,6 +388,13 @@ void update_sched_domain_debugfs(void)
10969 + {
10970 + int cpu, i;
10971 +
10972 ++ /*
10973 ++ * This can unfortunately be invoked before sched_debug_init() creates
10974 ++ * the debug directory. Don't touch sd_sysctl_cpus until then.
10975 ++ */
10976 ++ if (!debugfs_sched)
10977 ++ return;
10978 ++
10979 + if (!cpumask_available(sd_sysctl_cpus)) {
10980 + if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
10981 + return;
10982 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
10983 +index 44c452072a1b0..30a6984a58f71 100644
10984 +--- a/kernel/sched/fair.c
10985 ++++ b/kernel/sched/fair.c
10986 +@@ -1486,7 +1486,7 @@ static inline bool is_core_idle(int cpu)
10987 + if (cpu == sibling)
10988 + continue;
10989 +
10990 +- if (!idle_cpu(cpu))
10991 ++ if (!idle_cpu(sibling))
10992 + return false;
10993 + }
10994 + #endif
10995 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
10996 +index ddefb0419d7ae..d53d197708666 100644
10997 +--- a/kernel/sched/sched.h
10998 ++++ b/kernel/sched/sched.h
10999 +@@ -227,6 +227,8 @@ static inline void update_avg(u64 *avg, u64 sample)
11000 + */
11001 + #define SCHED_FLAG_SUGOV 0x10000000
11002 +
11003 ++#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
11004 ++
11005 + static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
11006 + {
11007 + #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
11008 +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
11009 +index b77ad49dc14f6..4e8698e62f075 100644
11010 +--- a/kernel/sched/topology.c
11011 ++++ b/kernel/sched/topology.c
11012 +@@ -1482,6 +1482,8 @@ int sched_max_numa_distance;
11013 + static int *sched_domains_numa_distance;
11014 + static struct cpumask ***sched_domains_numa_masks;
11015 + int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
11016 ++
11017 ++static unsigned long __read_mostly *sched_numa_onlined_nodes;
11018 + #endif
11019 +
11020 + /*
11021 +@@ -1833,6 +1835,16 @@ void sched_init_numa(void)
11022 + sched_domains_numa_masks[i][j] = mask;
11023 +
11024 + for_each_node(k) {
11025 ++ /*
11026 ++ * Distance information can be unreliable for
11027 ++ * offline nodes, defer building the node
11028 ++ * masks to its bringup.
11029 ++ * This relies on all unique distance values
11030 ++ * still being visible at init time.
11031 ++ */
11032 ++ if (!node_online(j))
11033 ++ continue;
11034 ++
11035 + if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
11036 + sched_numa_warn("Node-distance not symmetric");
11037 +
11038 +@@ -1886,6 +1898,53 @@ void sched_init_numa(void)
11039 + sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
11040 +
11041 + init_numa_topology_type();
11042 ++
11043 ++ sched_numa_onlined_nodes = bitmap_alloc(nr_node_ids, GFP_KERNEL);
11044 ++ if (!sched_numa_onlined_nodes)
11045 ++ return;
11046 ++
11047 ++ bitmap_zero(sched_numa_onlined_nodes, nr_node_ids);
11048 ++ for_each_online_node(i)
11049 ++ bitmap_set(sched_numa_onlined_nodes, i, 1);
11050 ++}
11051 ++
11052 ++static void __sched_domains_numa_masks_set(unsigned int node)
11053 ++{
11054 ++ int i, j;
11055 ++
11056 ++ /*
11057 ++ * NUMA masks are not built for offline nodes in sched_init_numa().
11058 ++ * Thus, when a CPU of a never-onlined-before node gets plugged in,
11059 ++ * adding that new CPU to the right NUMA masks is not sufficient: the
11060 ++ * masks of that CPU's node must also be updated.
11061 ++ */
11062 ++ if (test_bit(node, sched_numa_onlined_nodes))
11063 ++ return;
11064 ++
11065 ++ bitmap_set(sched_numa_onlined_nodes, node, 1);
11066 ++
11067 ++ for (i = 0; i < sched_domains_numa_levels; i++) {
11068 ++ for (j = 0; j < nr_node_ids; j++) {
11069 ++ if (!node_online(j) || node == j)
11070 ++ continue;
11071 ++
11072 ++ if (node_distance(j, node) > sched_domains_numa_distance[i])
11073 ++ continue;
11074 ++
11075 ++ /* Add remote nodes in our masks */
11076 ++ cpumask_or(sched_domains_numa_masks[i][node],
11077 ++ sched_domains_numa_masks[i][node],
11078 ++ sched_domains_numa_masks[0][j]);
11079 ++ }
11080 ++ }
11081 ++
11082 ++ /*
11083 ++ * A new node has been brought up, potentially changing the topology
11084 ++ * classification.
11085 ++ *
11086 ++ * Note that this is racy vs any use of sched_numa_topology_type :/
11087 ++ */
11088 ++ init_numa_topology_type();
11089 + }
11090 +
11091 + void sched_domains_numa_masks_set(unsigned int cpu)
11092 +@@ -1893,8 +1952,14 @@ void sched_domains_numa_masks_set(unsigned int cpu)
11093 + int node = cpu_to_node(cpu);
11094 + int i, j;
11095 +
11096 ++ __sched_domains_numa_masks_set(node);
11097 ++
11098 + for (i = 0; i < sched_domains_numa_levels; i++) {
11099 + for (j = 0; j < nr_node_ids; j++) {
11100 ++ if (!node_online(j))
11101 ++ continue;
11102 ++
11103 ++ /* Set ourselves in the remote node's masks */
11104 + if (node_distance(j, node) <= sched_domains_numa_distance[i])
11105 + cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
11106 + }
11107 +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
11108 +index 4a66725b1d4ac..5af7584734888 100644
11109 +--- a/kernel/time/hrtimer.c
11110 ++++ b/kernel/time/hrtimer.c
11111 +@@ -758,22 +758,6 @@ static void hrtimer_switch_to_hres(void)
11112 + retrigger_next_event(NULL);
11113 + }
11114 +
11115 +-static void clock_was_set_work(struct work_struct *work)
11116 +-{
11117 +- clock_was_set();
11118 +-}
11119 +-
11120 +-static DECLARE_WORK(hrtimer_work, clock_was_set_work);
11121 +-
11122 +-/*
11123 +- * Called from timekeeping and resume code to reprogram the hrtimer
11124 +- * interrupt device on all cpus.
11125 +- */
11126 +-void clock_was_set_delayed(void)
11127 +-{
11128 +- schedule_work(&hrtimer_work);
11129 +-}
11130 +-
11131 + #else
11132 +
11133 + static inline int hrtimer_is_hres_enabled(void) { return 0; }
11134 +@@ -891,6 +875,22 @@ void clock_was_set(void)
11135 + timerfd_clock_was_set();
11136 + }
11137 +
11138 ++static void clock_was_set_work(struct work_struct *work)
11139 ++{
11140 ++ clock_was_set();
11141 ++}
11142 ++
11143 ++static DECLARE_WORK(hrtimer_work, clock_was_set_work);
11144 ++
11145 ++/*
11146 ++ * Called from timekeeping and resume code to reprogram the hrtimer
11147 ++ * interrupt device on all cpus and to notify timerfd.
11148 ++ */
11149 ++void clock_was_set_delayed(void)
11150 ++{
11151 ++ schedule_work(&hrtimer_work);
11152 ++}
11153 ++
11154 + /*
11155 + * During resume we might have to reprogram the high resolution timer
11156 + * interrupt on all online CPUs. However, all other CPUs will be
11157 +@@ -1030,12 +1030,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
11158 + * remove hrtimer, called with base lock held
11159 + */
11160 + static inline int
11161 +-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
11162 ++remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
11163 ++ bool restart, bool keep_local)
11164 + {
11165 + u8 state = timer->state;
11166 +
11167 + if (state & HRTIMER_STATE_ENQUEUED) {
11168 +- int reprogram;
11169 ++ bool reprogram;
11170 +
11171 + /*
11172 + * Remove the timer and force reprogramming when high
11173 +@@ -1048,8 +1049,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
11174 + debug_deactivate(timer);
11175 + reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
11176 +
11177 ++ /*
11178 ++ * If the timer is not restarted then reprogramming is
11179 ++ * required if the timer is local. If it is local and about
11180 ++ * to be restarted, avoid programming it twice (on removal
11181 ++ * and a moment later when it's requeued).
11182 ++ */
11183 + if (!restart)
11184 + state = HRTIMER_STATE_INACTIVE;
11185 ++ else
11186 ++ reprogram &= !keep_local;
11187 +
11188 + __remove_hrtimer(timer, base, state, reprogram);
11189 + return 1;
11190 +@@ -1103,9 +1112,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
11191 + struct hrtimer_clock_base *base)
11192 + {
11193 + struct hrtimer_clock_base *new_base;
11194 ++ bool force_local, first;
11195 ++
11196 ++ /*
11197 ++ * If the timer is on the local cpu base and is the first expiring
11198 ++ * timer then this might end up reprogramming the hardware twice
11199 ++ * (on removal and on enqueue). To avoid that by prevent the
11200 ++ * reprogram on removal, keep the timer local to the current CPU
11201 ++ * and enforce reprogramming after it is queued no matter whether
11202 ++ * it is the new first expiring timer again or not.
11203 ++ */
11204 ++ force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
11205 ++ force_local &= base->cpu_base->next_timer == timer;
11206 +
11207 +- /* Remove an active timer from the queue: */
11208 +- remove_hrtimer(timer, base, true);
11209 ++ /*
11210 ++ * Remove an active timer from the queue. In case it is not queued
11211 ++ * on the current CPU, make sure that remove_hrtimer() updates the
11212 ++ * remote data correctly.
11213 ++ *
11214 ++ * If it's on the current CPU and the first expiring timer, then
11215 ++ * skip reprogramming, keep the timer local and enforce
11216 ++ * reprogramming later if it was the first expiring timer. This
11217 ++ * avoids programming the underlying clock event twice (once at
11218 ++ * removal and once after enqueue).
11219 ++ */
11220 ++ remove_hrtimer(timer, base, true, force_local);
11221 +
11222 + if (mode & HRTIMER_MODE_REL)
11223 + tim = ktime_add_safe(tim, base->get_time());
11224 +@@ -1115,9 +1146,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
11225 + hrtimer_set_expires_range_ns(timer, tim, delta_ns);
11226 +
11227 + /* Switch the timer base, if necessary: */
11228 +- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
11229 ++ if (!force_local) {
11230 ++ new_base = switch_hrtimer_base(timer, base,
11231 ++ mode & HRTIMER_MODE_PINNED);
11232 ++ } else {
11233 ++ new_base = base;
11234 ++ }
11235 ++
11236 ++ first = enqueue_hrtimer(timer, new_base, mode);
11237 ++ if (!force_local)
11238 ++ return first;
11239 +
11240 +- return enqueue_hrtimer(timer, new_base, mode);
11241 ++ /*
11242 ++ * Timer was forced to stay on the current CPU to avoid
11243 ++ * reprogramming on removal and enqueue. Force reprogram the
11244 ++ * hardware by evaluating the new first expiring timer.
11245 ++ */
11246 ++ hrtimer_force_reprogram(new_base->cpu_base, 1);
11247 ++ return 0;
11248 + }
11249 +
11250 + /**
11251 +@@ -1183,7 +1229,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
11252 + base = lock_hrtimer_base(timer, &flags);
11253 +
11254 + if (!hrtimer_callback_running(timer))
11255 +- ret = remove_hrtimer(timer, base, false);
11256 ++ ret = remove_hrtimer(timer, base, false, false);
11257 +
11258 + unlock_hrtimer_base(timer, &flags);
11259 +
11260 +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
11261 +index 517be7fd175ef..a002685f688d6 100644
11262 +--- a/kernel/time/posix-cpu-timers.c
11263 ++++ b/kernel/time/posix-cpu-timers.c
11264 +@@ -1346,8 +1346,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
11265 + }
11266 + }
11267 +
11268 +- if (!*newval)
11269 +- return;
11270 + *newval += now;
11271 + }
11272 +
11273 +diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
11274 +index 6a742a29e545f..cd610faa25235 100644
11275 +--- a/kernel/time/tick-internal.h
11276 ++++ b/kernel/time/tick-internal.h
11277 +@@ -165,3 +165,6 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
11278 +
11279 + extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
11280 + void timer_clear_idle(void);
11281 ++
11282 ++void clock_was_set(void);
11283 ++void clock_was_set_delayed(void);
11284 +diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
11285 +index 9a75ca3f7edf9..bc81419f400c5 100644
11286 +--- a/lib/mpi/mpiutil.c
11287 ++++ b/lib/mpi/mpiutil.c
11288 +@@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
11289 + return 0; /* no need to do it */
11290 +
11291 + if (a->d) {
11292 +- p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
11293 ++ p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
11294 + if (!p)
11295 + return -ENOMEM;
11296 + memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
11297 +diff --git a/lib/test_scanf.c b/lib/test_scanf.c
11298 +index 84fe09eaf55e7..abae88848972f 100644
11299 +--- a/lib/test_scanf.c
11300 ++++ b/lib/test_scanf.c
11301 +@@ -271,7 +271,7 @@ static u32 __init next_test_random(u32 max_bits)
11302 + {
11303 + u32 n_bits = hweight32(prandom_u32_state(&rnd_state)) % (max_bits + 1);
11304 +
11305 +- return prandom_u32_state(&rnd_state) & (UINT_MAX >> (32 - n_bits));
11306 ++ return prandom_u32_state(&rnd_state) & GENMASK(n_bits, 0);
11307 + }
11308 +
11309 + static unsigned long long __init next_test_random_ull(void)
11310 +@@ -280,7 +280,7 @@ static unsigned long long __init next_test_random_ull(void)
11311 + u32 n_bits = (hweight32(rand1) * 3) % 64;
11312 + u64 val = (u64)prandom_u32_state(&rnd_state) * rand1;
11313 +
11314 +- return val & (ULLONG_MAX >> (64 - n_bits));
11315 ++ return val & GENMASK_ULL(n_bits, 0);
11316 + }
11317 +
11318 + #define random_for_type(T) \
11319 +diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
11320 +index 1c140af06d527..600b9563bfc53 100644
11321 +--- a/net/6lowpan/debugfs.c
11322 ++++ b/net/6lowpan/debugfs.c
11323 +@@ -170,7 +170,8 @@ static void lowpan_dev_debugfs_ctx_init(struct net_device *dev,
11324 + struct dentry *root;
11325 + char buf[32];
11326 +
11327 +- WARN_ON_ONCE(id > LOWPAN_IPHC_CTX_TABLE_SIZE);
11328 ++ if (WARN_ON_ONCE(id >= LOWPAN_IPHC_CTX_TABLE_SIZE))
11329 ++ return;
11330 +
11331 + sprintf(buf, "%d", id);
11332 +
11333 +diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
11334 +index c32638dddbf94..f6b9dc4e408f2 100644
11335 +--- a/net/bluetooth/cmtp/cmtp.h
11336 ++++ b/net/bluetooth/cmtp/cmtp.h
11337 +@@ -26,7 +26,7 @@
11338 + #include <linux/types.h>
11339 + #include <net/bluetooth/bluetooth.h>
11340 +
11341 +-#define BTNAMSIZ 18
11342 ++#define BTNAMSIZ 21
11343 +
11344 + /* CMTP ioctl defines */
11345 + #define CMTPCONNADD _IOW('C', 200, int)
11346 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
11347 +index e1a545c8a69f8..4c25bcd1ac4c0 100644
11348 +--- a/net/bluetooth/hci_core.c
11349 ++++ b/net/bluetooth/hci_core.c
11350 +@@ -1343,6 +1343,12 @@ int hci_inquiry(void __user *arg)
11351 + goto done;
11352 + }
11353 +
11354 ++ /* Restrict maximum inquiry length to 60 seconds */
11355 ++ if (ir.length > 60) {
11356 ++ err = -EINVAL;
11357 ++ goto done;
11358 ++ }
11359 ++
11360 + hci_dev_lock(hdev);
11361 + if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
11362 + inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
11363 +@@ -1727,6 +1733,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
11364 + hci_request_cancel_all(hdev);
11365 + hci_req_sync_lock(hdev);
11366 +
11367 ++ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
11368 ++ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
11369 ++ test_bit(HCI_UP, &hdev->flags)) {
11370 ++ /* Execute vendor specific shutdown routine */
11371 ++ if (hdev->shutdown)
11372 ++ hdev->shutdown(hdev);
11373 ++ }
11374 ++
11375 + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
11376 + cancel_delayed_work_sync(&hdev->cmd_timer);
11377 + hci_req_sync_unlock(hdev);
11378 +@@ -1798,14 +1812,6 @@ int hci_dev_do_close(struct hci_dev *hdev)
11379 + clear_bit(HCI_INIT, &hdev->flags);
11380 + }
11381 +
11382 +- if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
11383 +- !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
11384 +- test_bit(HCI_UP, &hdev->flags)) {
11385 +- /* Execute vendor specific shutdown routine */
11386 +- if (hdev->shutdown)
11387 +- hdev->shutdown(hdev);
11388 +- }
11389 +-
11390 + /* flush cmd work */
11391 + flush_work(&hdev->cmd_work);
11392 +
11393 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
11394 +index 3663f880df110..1e21e014efd22 100644
11395 +--- a/net/bluetooth/mgmt.c
11396 ++++ b/net/bluetooth/mgmt.c
11397 +@@ -7725,7 +7725,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
11398 + * advertising.
11399 + */
11400 + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
11401 +- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
11402 ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
11403 + MGMT_STATUS_NOT_SUPPORTED);
11404 +
11405 + if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
11406 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
11407 +index d9a4e88dacbb7..b5ab842c7c4a8 100644
11408 +--- a/net/bluetooth/sco.c
11409 ++++ b/net/bluetooth/sco.c
11410 +@@ -85,7 +85,6 @@ static void sco_sock_timeout(struct timer_list *t)
11411 + sk->sk_state_change(sk);
11412 + bh_unlock_sock(sk);
11413 +
11414 +- sco_sock_kill(sk);
11415 + sock_put(sk);
11416 + }
11417 +
11418 +@@ -177,7 +176,6 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
11419 + sco_sock_clear_timer(sk);
11420 + sco_chan_del(sk, err);
11421 + bh_unlock_sock(sk);
11422 +- sco_sock_kill(sk);
11423 + sock_put(sk);
11424 + }
11425 +
11426 +@@ -394,8 +392,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
11427 + */
11428 + static void sco_sock_kill(struct sock *sk)
11429 + {
11430 +- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
11431 +- sock_flag(sk, SOCK_DEAD))
11432 ++ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
11433 + return;
11434 +
11435 + BT_DBG("sk %p state %d", sk, sk->sk_state);
11436 +@@ -447,7 +444,6 @@ static void sco_sock_close(struct sock *sk)
11437 + lock_sock(sk);
11438 + __sco_sock_close(sk);
11439 + release_sock(sk);
11440 +- sco_sock_kill(sk);
11441 + }
11442 +
11443 + static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg,
11444 +@@ -773,6 +769,11 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
11445 + cp.max_latency = cpu_to_le16(0xffff);
11446 + cp.retrans_effort = 0xff;
11447 + break;
11448 ++ default:
11449 ++ /* use CVSD settings as fallback */
11450 ++ cp.max_latency = cpu_to_le16(0xffff);
11451 ++ cp.retrans_effort = 0xff;
11452 ++ break;
11453 + }
11454 +
11455 + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
11456 +diff --git a/net/core/devlink.c b/net/core/devlink.c
11457 +index 85032626de248..5a85a7b0feb25 100644
11458 +--- a/net/core/devlink.c
11459 ++++ b/net/core/devlink.c
11460 +@@ -3801,10 +3801,12 @@ static void devlink_param_notify(struct devlink *devlink,
11461 + struct devlink_param_item *param_item,
11462 + enum devlink_command cmd);
11463 +
11464 +-static void devlink_reload_netns_change(struct devlink *devlink,
11465 +- struct net *dest_net)
11466 ++static void devlink_ns_change_notify(struct devlink *devlink,
11467 ++ struct net *dest_net, struct net *curr_net,
11468 ++ bool new)
11469 + {
11470 + struct devlink_param_item *param_item;
11471 ++ enum devlink_command cmd;
11472 +
11473 + /* Userspace needs to be notified about devlink objects
11474 + * removed from original and entering new network namespace.
11475 +@@ -3812,17 +3814,18 @@ static void devlink_reload_netns_change(struct devlink *devlink,
11476 + * reload process so the notifications are generated separatelly.
11477 + */
11478 +
11479 +- list_for_each_entry(param_item, &devlink->param_list, list)
11480 +- devlink_param_notify(devlink, 0, param_item,
11481 +- DEVLINK_CMD_PARAM_DEL);
11482 +- devlink_notify(devlink, DEVLINK_CMD_DEL);
11483 ++ if (!dest_net || net_eq(dest_net, curr_net))
11484 ++ return;
11485 +
11486 +- __devlink_net_set(devlink, dest_net);
11487 ++ if (new)
11488 ++ devlink_notify(devlink, DEVLINK_CMD_NEW);
11489 +
11490 +- devlink_notify(devlink, DEVLINK_CMD_NEW);
11491 ++ cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
11492 + list_for_each_entry(param_item, &devlink->param_list, list)
11493 +- devlink_param_notify(devlink, 0, param_item,
11494 +- DEVLINK_CMD_PARAM_NEW);
11495 ++ devlink_param_notify(devlink, 0, param_item, cmd);
11496 ++
11497 ++ if (!new)
11498 ++ devlink_notify(devlink, DEVLINK_CMD_DEL);
11499 + }
11500 +
11501 + static bool devlink_reload_supported(const struct devlink_ops *ops)
11502 +@@ -3902,6 +3905,7 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
11503 + u32 *actions_performed, struct netlink_ext_ack *extack)
11504 + {
11505 + u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
11506 ++ struct net *curr_net;
11507 + int err;
11508 +
11509 + if (!devlink->reload_enabled)
11510 +@@ -3909,18 +3913,22 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
11511 +
11512 + memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
11513 + sizeof(remote_reload_stats));
11514 ++
11515 ++ curr_net = devlink_net(devlink);
11516 ++ devlink_ns_change_notify(devlink, dest_net, curr_net, false);
11517 + err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
11518 + if (err)
11519 + return err;
11520 +
11521 +- if (dest_net && !net_eq(dest_net, devlink_net(devlink)))
11522 +- devlink_reload_netns_change(devlink, dest_net);
11523 ++ if (dest_net && !net_eq(dest_net, curr_net))
11524 ++ __devlink_net_set(devlink, dest_net);
11525 +
11526 + err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
11527 + devlink_reload_failed_set(devlink, !!err);
11528 + if (err)
11529 + return err;
11530 +
11531 ++ devlink_ns_change_notify(devlink, dest_net, curr_net, true);
11532 + WARN_ON(!(*actions_performed & BIT(action)));
11533 + /* Catch driver on updating the remote action within devlink reload */
11534 + WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
11535 +@@ -4117,7 +4125,7 @@ out_free_msg:
11536 +
11537 + static void devlink_flash_update_begin_notify(struct devlink *devlink)
11538 + {
11539 +- struct devlink_flash_notify params = { 0 };
11540 ++ struct devlink_flash_notify params = {};
11541 +
11542 + __devlink_flash_update_notify(devlink,
11543 + DEVLINK_CMD_FLASH_UPDATE,
11544 +@@ -4126,7 +4134,7 @@ static void devlink_flash_update_begin_notify(struct devlink *devlink)
11545 +
11546 + static void devlink_flash_update_end_notify(struct devlink *devlink)
11547 + {
11548 +- struct devlink_flash_notify params = { 0 };
11549 ++ struct devlink_flash_notify params = {};
11550 +
11551 + __devlink_flash_update_notify(devlink,
11552 + DEVLINK_CMD_FLASH_UPDATE_END,
11553 +diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
11554 +index 00bb89b2d86fc..970906eb5b2cd 100644
11555 +--- a/net/dsa/Kconfig
11556 ++++ b/net/dsa/Kconfig
11557 +@@ -18,16 +18,6 @@ if NET_DSA
11558 +
11559 + # Drivers must select the appropriate tagging format(s)
11560 +
11561 +-config NET_DSA_TAG_8021Q
11562 +- tristate
11563 +- select VLAN_8021Q
11564 +- help
11565 +- Unlike the other tagging protocols, the 802.1Q config option simply
11566 +- provides helpers for other tagging implementations that might rely on
11567 +- VLAN in one way or another. It is not a complete solution.
11568 +-
11569 +- Drivers which use these helpers should select this as dependency.
11570 +-
11571 + config NET_DSA_TAG_AR9331
11572 + tristate "Tag driver for Atheros AR9331 SoC with built-in switch"
11573 + help
11574 +@@ -126,7 +116,6 @@ config NET_DSA_TAG_OCELOT_8021Q
11575 + tristate "Tag driver for Ocelot family of switches, using VLAN"
11576 + depends on MSCC_OCELOT_SWITCH_LIB || \
11577 + (MSCC_OCELOT_SWITCH_LIB=n && COMPILE_TEST)
11578 +- select NET_DSA_TAG_8021Q
11579 + help
11580 + Say Y or M if you want to enable support for tagging frames with a
11581 + custom VLAN-based header. Frames that require timestamping, such as
11582 +@@ -149,7 +138,7 @@ config NET_DSA_TAG_LAN9303
11583 +
11584 + config NET_DSA_TAG_SJA1105
11585 + tristate "Tag driver for NXP SJA1105 switches"
11586 +- select NET_DSA_TAG_8021Q
11587 ++ depends on (NET_DSA_SJA1105 && NET_DSA_SJA1105_PTP) || !NET_DSA_SJA1105 || !NET_DSA_SJA1105_PTP
11588 + select PACKING
11589 + help
11590 + Say Y or M if you want to enable support for tagging frames with the
11591 +diff --git a/net/dsa/Makefile b/net/dsa/Makefile
11592 +index 44bc79952b8b8..67ea009f242cb 100644
11593 +--- a/net/dsa/Makefile
11594 ++++ b/net/dsa/Makefile
11595 +@@ -1,10 +1,9 @@
11596 + # SPDX-License-Identifier: GPL-2.0
11597 + # the core
11598 + obj-$(CONFIG_NET_DSA) += dsa_core.o
11599 +-dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o
11600 ++dsa_core-y += dsa.o dsa2.o master.o port.o slave.o switch.o tag_8021q.o
11601 +
11602 + # tagging formats
11603 +-obj-$(CONFIG_NET_DSA_TAG_8021Q) += tag_8021q.o
11604 + obj-$(CONFIG_NET_DSA_TAG_AR9331) += tag_ar9331.o
11605 + obj-$(CONFIG_NET_DSA_TAG_BRCM_COMMON) += tag_brcm.o
11606 + obj-$(CONFIG_NET_DSA_TAG_DSA_COMMON) += tag_dsa.o
11607 +diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
11608 +index f201c33980bf3..cddf7cb0f398f 100644
11609 +--- a/net/dsa/dsa_priv.h
11610 ++++ b/net/dsa/dsa_priv.h
11611 +@@ -234,8 +234,6 @@ int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
11612 + int dsa_port_bridge_flags(const struct dsa_port *dp,
11613 + struct switchdev_brport_flags flags,
11614 + struct netlink_ext_ack *extack);
11615 +-int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
11616 +- struct netlink_ext_ack *extack);
11617 + int dsa_port_vlan_add(struct dsa_port *dp,
11618 + const struct switchdev_obj_port_vlan *vlan,
11619 + struct netlink_ext_ack *extack);
11620 +diff --git a/net/dsa/port.c b/net/dsa/port.c
11621 +index 28b45b7e66df1..23e30198a90e6 100644
11622 +--- a/net/dsa/port.c
11623 ++++ b/net/dsa/port.c
11624 +@@ -186,10 +186,6 @@ static int dsa_port_switchdev_sync(struct dsa_port *dp,
11625 + if (err && err != -EOPNOTSUPP)
11626 + return err;
11627 +
11628 +- err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
11629 +- if (err && err != -EOPNOTSUPP)
11630 +- return err;
11631 +-
11632 + err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
11633 + if (err && err != -EOPNOTSUPP)
11634 + return err;
11635 +@@ -272,12 +268,6 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp)
11636 +
11637 + /* VLAN filtering is handled by dsa_switch_bridge_leave */
11638 +
11639 +- /* Some drivers treat the notification for having a local multicast
11640 +- * router by allowing multicast to be flooded to the CPU, so we should
11641 +- * allow this in standalone mode too.
11642 +- */
11643 +- dsa_port_mrouter(dp->cpu_dp, true, NULL);
11644 +-
11645 + /* Ageing time may be global to the switch chip, so don't change it
11646 + * here because we have no good reason (or value) to change it to.
11647 + */
11648 +@@ -607,17 +597,6 @@ int dsa_port_bridge_flags(const struct dsa_port *dp,
11649 + return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
11650 + }
11651 +
11652 +-int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
11653 +- struct netlink_ext_ack *extack)
11654 +-{
11655 +- struct dsa_switch *ds = dp->ds;
11656 +-
11657 +- if (!ds->ops->port_set_mrouter)
11658 +- return -EOPNOTSUPP;
11659 +-
11660 +- return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
11661 +-}
11662 +-
11663 + int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
11664 + bool targeted_match)
11665 + {
11666 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
11667 +index 23be8e01026bf..b34116b15d436 100644
11668 +--- a/net/dsa/slave.c
11669 ++++ b/net/dsa/slave.c
11670 +@@ -314,12 +314,6 @@ static int dsa_slave_port_attr_set(struct net_device *dev, const void *ctx,
11671 +
11672 + ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
11673 + break;
11674 +- case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
11675 +- if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
11676 +- return -EOPNOTSUPP;
11677 +-
11678 +- ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
11679 +- break;
11680 + default:
11681 + ret = -EOPNOTSUPP;
11682 + break;
11683 +diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
11684 +index 4aa29f90eceae..0d1db3e37668d 100644
11685 +--- a/net/dsa/tag_8021q.c
11686 ++++ b/net/dsa/tag_8021q.c
11687 +@@ -493,5 +493,3 @@ void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id,
11688 + skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
11689 + }
11690 + EXPORT_SYMBOL_GPL(dsa_8021q_rcv);
11691 +-
11692 +-MODULE_LICENSE("GPL v2");
11693 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11694 +index a6f20ee353355..94e33d3eaf621 100644
11695 +--- a/net/ipv4/route.c
11696 ++++ b/net/ipv4/route.c
11697 +@@ -586,18 +586,25 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
11698 + }
11699 + }
11700 +
11701 +-static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
11702 ++static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
11703 + {
11704 +- struct fib_nh_exception *fnhe, *oldest;
11705 ++ struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
11706 ++ struct fib_nh_exception *fnhe, *oldest = NULL;
11707 +
11708 +- oldest = rcu_dereference(hash->chain);
11709 +- for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
11710 +- fnhe = rcu_dereference(fnhe->fnhe_next)) {
11711 +- if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
11712 ++ for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
11713 ++ fnhe = rcu_dereference_protected(*fnhe_p,
11714 ++ lockdep_is_held(&fnhe_lock));
11715 ++ if (!fnhe)
11716 ++ break;
11717 ++ if (!oldest ||
11718 ++ time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
11719 + oldest = fnhe;
11720 ++ oldest_p = fnhe_p;
11721 ++ }
11722 + }
11723 + fnhe_flush_routes(oldest);
11724 +- return oldest;
11725 ++ *oldest_p = oldest->fnhe_next;
11726 ++ kfree_rcu(oldest, rcu);
11727 + }
11728 +
11729 + static u32 fnhe_hashfun(__be32 daddr)
11730 +@@ -676,16 +683,21 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
11731 + if (rt)
11732 + fill_route_from_fnhe(rt, fnhe);
11733 + } else {
11734 +- if (depth > FNHE_RECLAIM_DEPTH)
11735 +- fnhe = fnhe_oldest(hash);
11736 +- else {
11737 +- fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
11738 +- if (!fnhe)
11739 +- goto out_unlock;
11740 +-
11741 +- fnhe->fnhe_next = hash->chain;
11742 +- rcu_assign_pointer(hash->chain, fnhe);
11743 ++ /* Randomize max depth to avoid some side channels attacks. */
11744 ++ int max_depth = FNHE_RECLAIM_DEPTH +
11745 ++ prandom_u32_max(FNHE_RECLAIM_DEPTH);
11746 ++
11747 ++ while (depth > max_depth) {
11748 ++ fnhe_remove_oldest(hash);
11749 ++ depth--;
11750 + }
11751 ++
11752 ++ fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
11753 ++ if (!fnhe)
11754 ++ goto out_unlock;
11755 ++
11756 ++ fnhe->fnhe_next = hash->chain;
11757 ++
11758 + fnhe->fnhe_genid = genid;
11759 + fnhe->fnhe_daddr = daddr;
11760 + fnhe->fnhe_gw = gw;
11761 +@@ -693,6 +705,8 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
11762 + fnhe->fnhe_mtu_locked = lock;
11763 + fnhe->fnhe_expires = max(1UL, expires);
11764 +
11765 ++ rcu_assign_pointer(hash->chain, fnhe);
11766 ++
11767 + /* Exception created; mark the cached routes for the nexthop
11768 + * stale, so anyone caching it rechecks if this exception
11769 + * applies to them.
11770 +@@ -3170,7 +3184,7 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
11771 + udph = skb_put_zero(skb, sizeof(struct udphdr));
11772 + udph->source = sport;
11773 + udph->dest = dport;
11774 +- udph->len = sizeof(struct udphdr);
11775 ++ udph->len = htons(sizeof(struct udphdr));
11776 + udph->check = 0;
11777 + break;
11778 + }
11779 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
11780 +index a692626c19e44..db07c05736b25 100644
11781 +--- a/net/ipv4/tcp_ipv4.c
11782 ++++ b/net/ipv4/tcp_ipv4.c
11783 +@@ -2451,6 +2451,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
11784 + static void *tcp_seek_last_pos(struct seq_file *seq)
11785 + {
11786 + struct tcp_iter_state *st = seq->private;
11787 ++ int bucket = st->bucket;
11788 + int offset = st->offset;
11789 + int orig_num = st->num;
11790 + void *rc = NULL;
11791 +@@ -2461,7 +2462,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
11792 + break;
11793 + st->state = TCP_SEQ_STATE_LISTENING;
11794 + rc = listening_get_next(seq, NULL);
11795 +- while (offset-- && rc)
11796 ++ while (offset-- && rc && bucket == st->bucket)
11797 + rc = listening_get_next(seq, rc);
11798 + if (rc)
11799 + break;
11800 +@@ -2472,7 +2473,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
11801 + if (st->bucket > tcp_hashinfo.ehash_mask)
11802 + break;
11803 + rc = established_get_first(seq);
11804 +- while (offset-- && rc)
11805 ++ while (offset-- && rc && bucket == st->bucket)
11806 + rc = established_get_next(seq, rc);
11807 + }
11808 +
11809 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
11810 +index c5e8ecb96426b..6033403021019 100644
11811 +--- a/net/ipv6/route.c
11812 ++++ b/net/ipv6/route.c
11813 +@@ -1657,6 +1657,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
11814 + struct in6_addr *src_key = NULL;
11815 + struct rt6_exception *rt6_ex;
11816 + struct fib6_nh *nh = res->nh;
11817 ++ int max_depth;
11818 + int err = 0;
11819 +
11820 + spin_lock_bh(&rt6_exception_lock);
11821 +@@ -1711,7 +1712,9 @@ static int rt6_insert_exception(struct rt6_info *nrt,
11822 + bucket->depth++;
11823 + net->ipv6.rt6_stats->fib_rt_cache++;
11824 +
11825 +- if (bucket->depth > FIB6_MAX_DEPTH)
11826 ++ /* Randomize max depth to avoid some side channels attacks. */
11827 ++ max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
11828 ++ while (bucket->depth > max_depth)
11829 + rt6_exception_remove_oldest(bucket);
11830 +
11831 + out:
11832 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
11833 +index fcae76ddd586c..45fb517591ee9 100644
11834 +--- a/net/mac80211/main.c
11835 ++++ b/net/mac80211/main.c
11836 +@@ -1020,7 +1020,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
11837 +
11838 + iftd = &sband->iftype_data[i];
11839 +
11840 +- supp_he = supp_he || (iftd && iftd->he_cap.has_he);
11841 ++ supp_he = supp_he || iftd->he_cap.has_he;
11842 + }
11843 +
11844 + /* HT, VHT, HE require QoS, thus >= 4 queues */
11845 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
11846 +index 8509778ff31f2..fa09a369214db 100644
11847 +--- a/net/mac80211/tx.c
11848 ++++ b/net/mac80211/tx.c
11849 +@@ -3242,7 +3242,9 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
11850 + if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
11851 + return true;
11852 +
11853 +- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
11854 ++ if (!ieee80211_amsdu_realloc_pad(local, skb,
11855 ++ sizeof(*amsdu_hdr) +
11856 ++ local->hw.extra_tx_headroom))
11857 + return false;
11858 +
11859 + data = skb_push(skb, sizeof(*amsdu_hdr));
11860 +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
11861 +index baf235721c43f..000bb3da4f77f 100644
11862 +--- a/net/netlabel/netlabel_cipso_v4.c
11863 ++++ b/net/netlabel/netlabel_cipso_v4.c
11864 +@@ -187,14 +187,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
11865 + }
11866 + doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
11867 + sizeof(u32),
11868 +- GFP_KERNEL);
11869 ++ GFP_KERNEL | __GFP_NOWARN);
11870 + if (doi_def->map.std->lvl.local == NULL) {
11871 + ret_val = -ENOMEM;
11872 + goto add_std_failure;
11873 + }
11874 + doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size,
11875 + sizeof(u32),
11876 +- GFP_KERNEL);
11877 ++ GFP_KERNEL | __GFP_NOWARN);
11878 + if (doi_def->map.std->lvl.cipso == NULL) {
11879 + ret_val = -ENOMEM;
11880 + goto add_std_failure;
11881 +@@ -263,7 +263,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
11882 + doi_def->map.std->cat.local = kcalloc(
11883 + doi_def->map.std->cat.local_size,
11884 + sizeof(u32),
11885 +- GFP_KERNEL);
11886 ++ GFP_KERNEL | __GFP_NOWARN);
11887 + if (doi_def->map.std->cat.local == NULL) {
11888 + ret_val = -ENOMEM;
11889 + goto add_std_failure;
11890 +@@ -271,7 +271,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
11891 + doi_def->map.std->cat.cipso = kcalloc(
11892 + doi_def->map.std->cat.cipso_size,
11893 + sizeof(u32),
11894 +- GFP_KERNEL);
11895 ++ GFP_KERNEL | __GFP_NOWARN);
11896 + if (doi_def->map.std->cat.cipso == NULL) {
11897 + ret_val = -ENOMEM;
11898 + goto add_std_failure;
11899 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
11900 +index 0c30908628bae..bdbda61db8b96 100644
11901 +--- a/net/qrtr/qrtr.c
11902 ++++ b/net/qrtr/qrtr.c
11903 +@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
11904 + goto err;
11905 + }
11906 +
11907 +- if (!size || len != ALIGN(size, 4) + hdrlen)
11908 ++ if (!size || size & 3 || len != size + hdrlen)
11909 + goto err;
11910 +
11911 + if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
11912 +@@ -506,8 +506,12 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
11913 +
11914 + if (cb->type == QRTR_TYPE_NEW_SERVER) {
11915 + /* Remote node endpoint can bridge other distant nodes */
11916 +- const struct qrtr_ctrl_pkt *pkt = data + hdrlen;
11917 ++ const struct qrtr_ctrl_pkt *pkt;
11918 +
11919 ++ if (size < sizeof(*pkt))
11920 ++ goto err;
11921 ++
11922 ++ pkt = data + hdrlen;
11923 + qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
11924 + }
11925 +
11926 +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
11927 +index b79a7e27bb315..38a3a8394bbda 100644
11928 +--- a/net/sched/sch_cbq.c
11929 ++++ b/net/sched/sch_cbq.c
11930 +@@ -1614,7 +1614,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
11931 + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
11932 + if (err) {
11933 + kfree(cl);
11934 +- return err;
11935 ++ goto failure;
11936 + }
11937 +
11938 + if (tca[TCA_RATE]) {
11939 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
11940 +index 5f7ac27a52649..f22d26a2c89fa 100644
11941 +--- a/net/sched/sch_htb.c
11942 ++++ b/net/sched/sch_htb.c
11943 +@@ -125,6 +125,7 @@ struct htb_class {
11944 + struct htb_class_leaf {
11945 + int deficit[TC_HTB_MAXDEPTH];
11946 + struct Qdisc *q;
11947 ++ struct netdev_queue *offload_queue;
11948 + } leaf;
11949 + struct htb_class_inner {
11950 + struct htb_prio clprio[TC_HTB_NUMPRIO];
11951 +@@ -1411,24 +1412,47 @@ htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
11952 + return old_q;
11953 + }
11954 +
11955 +-static void htb_offload_move_qdisc(struct Qdisc *sch, u16 qid_old, u16 qid_new)
11956 ++static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
11957 ++{
11958 ++ struct netdev_queue *queue;
11959 ++
11960 ++ queue = cl->leaf.offload_queue;
11961 ++ if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
11962 ++ WARN_ON(cl->leaf.q->dev_queue != queue);
11963 ++
11964 ++ return queue;
11965 ++}
11966 ++
11967 ++static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
11968 ++ struct htb_class *cl_new, bool destroying)
11969 + {
11970 + struct netdev_queue *queue_old, *queue_new;
11971 + struct net_device *dev = qdisc_dev(sch);
11972 +- struct Qdisc *qdisc;
11973 +
11974 +- queue_old = netdev_get_tx_queue(dev, qid_old);
11975 +- queue_new = netdev_get_tx_queue(dev, qid_new);
11976 ++ queue_old = htb_offload_get_queue(cl_old);
11977 ++ queue_new = htb_offload_get_queue(cl_new);
11978 +
11979 +- if (dev->flags & IFF_UP)
11980 +- dev_deactivate(dev);
11981 +- qdisc = dev_graft_qdisc(queue_old, NULL);
11982 +- qdisc->dev_queue = queue_new;
11983 +- qdisc = dev_graft_qdisc(queue_new, qdisc);
11984 +- if (dev->flags & IFF_UP)
11985 +- dev_activate(dev);
11986 ++ if (!destroying) {
11987 ++ struct Qdisc *qdisc;
11988 +
11989 +- WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
11990 ++ if (dev->flags & IFF_UP)
11991 ++ dev_deactivate(dev);
11992 ++ qdisc = dev_graft_qdisc(queue_old, NULL);
11993 ++ WARN_ON(qdisc != cl_old->leaf.q);
11994 ++ }
11995 ++
11996 ++ if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
11997 ++ cl_old->leaf.q->dev_queue = queue_new;
11998 ++ cl_old->leaf.offload_queue = queue_new;
11999 ++
12000 ++ if (!destroying) {
12001 ++ struct Qdisc *qdisc;
12002 ++
12003 ++ qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
12004 ++ if (dev->flags & IFF_UP)
12005 ++ dev_activate(dev);
12006 ++ WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
12007 ++ }
12008 + }
12009 +
12010 + static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
12011 +@@ -1442,10 +1466,8 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
12012 + if (cl->level)
12013 + return -EINVAL;
12014 +
12015 +- if (q->offload) {
12016 +- dev_queue = new->dev_queue;
12017 +- WARN_ON(dev_queue != cl->leaf.q->dev_queue);
12018 +- }
12019 ++ if (q->offload)
12020 ++ dev_queue = htb_offload_get_queue(cl);
12021 +
12022 + if (!new) {
12023 + new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
12024 +@@ -1514,6 +1536,8 @@ static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
12025 + parent->ctokens = parent->cbuffer;
12026 + parent->t_c = ktime_get_ns();
12027 + parent->cmode = HTB_CAN_SEND;
12028 ++ if (q->offload)
12029 ++ parent->leaf.offload_queue = cl->leaf.offload_queue;
12030 + }
12031 +
12032 + static void htb_parent_to_leaf_offload(struct Qdisc *sch,
12033 +@@ -1534,6 +1558,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
12034 + struct netlink_ext_ack *extack)
12035 + {
12036 + struct tc_htb_qopt_offload offload_opt;
12037 ++ struct netdev_queue *dev_queue;
12038 + struct Qdisc *q = cl->leaf.q;
12039 + struct Qdisc *old = NULL;
12040 + int err;
12041 +@@ -1542,16 +1567,15 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
12042 + return -EINVAL;
12043 +
12044 + WARN_ON(!q);
12045 +- if (!destroying) {
12046 +- /* On destroy of HTB, two cases are possible:
12047 +- * 1. q is a normal qdisc, but q->dev_queue has noop qdisc.
12048 +- * 2. q is a noop qdisc (for nodes that were inner),
12049 +- * q->dev_queue is noop_netdev_queue.
12050 ++ dev_queue = htb_offload_get_queue(cl);
12051 ++ old = htb_graft_helper(dev_queue, NULL);
12052 ++ if (destroying)
12053 ++ /* Before HTB is destroyed, the kernel grafts noop_qdisc to
12054 ++ * all queues.
12055 + */
12056 +- old = htb_graft_helper(q->dev_queue, NULL);
12057 +- WARN_ON(!old);
12058 ++ WARN_ON(!(old->flags & TCQ_F_BUILTIN));
12059 ++ else
12060 + WARN_ON(old != q);
12061 +- }
12062 +
12063 + if (cl->parent) {
12064 + cl->parent->bstats_bias.bytes += q->bstats.bytes;
12065 +@@ -1570,18 +1594,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
12066 + if (!err || destroying)
12067 + qdisc_put(old);
12068 + else
12069 +- htb_graft_helper(q->dev_queue, old);
12070 ++ htb_graft_helper(dev_queue, old);
12071 +
12072 + if (last_child)
12073 + return err;
12074 +
12075 +- if (!err && offload_opt.moved_qid != 0) {
12076 +- if (destroying)
12077 +- q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch),
12078 +- offload_opt.qid);
12079 +- else
12080 +- htb_offload_move_qdisc(sch, offload_opt.moved_qid,
12081 +- offload_opt.qid);
12082 ++ if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
12083 ++ u32 classid = TC_H_MAJ(sch->handle) |
12084 ++ TC_H_MIN(offload_opt.classid);
12085 ++ struct htb_class *moved_cl = htb_find(classid, sch);
12086 ++
12087 ++ htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
12088 + }
12089 +
12090 + return err;
12091 +@@ -1704,9 +1727,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
12092 + }
12093 +
12094 + if (last_child) {
12095 +- struct netdev_queue *dev_queue;
12096 ++ struct netdev_queue *dev_queue = sch->dev_queue;
12097 ++
12098 ++ if (q->offload)
12099 ++ dev_queue = htb_offload_get_queue(cl);
12100 +
12101 +- dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue;
12102 + new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
12103 + cl->parent->common.classid,
12104 + NULL);
12105 +@@ -1878,7 +1903,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
12106 + }
12107 + dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
12108 + } else { /* First child. */
12109 +- dev_queue = parent->leaf.q->dev_queue;
12110 ++ dev_queue = htb_offload_get_queue(parent);
12111 + old_q = htb_graft_helper(dev_queue, NULL);
12112 + WARN_ON(old_q != parent->leaf.q);
12113 + offload_opt = (struct tc_htb_qopt_offload) {
12114 +@@ -1935,6 +1960,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
12115 +
12116 + /* leaf (we) needs elementary qdisc */
12117 + cl->leaf.q = new_q ? new_q : &noop_qdisc;
12118 ++ if (q->offload)
12119 ++ cl->leaf.offload_queue = dev_queue;
12120 +
12121 + cl->parent = parent;
12122 +
12123 +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
12124 +index 0de918cb3d90d..a47e290b0668e 100644
12125 +--- a/net/sunrpc/svc.c
12126 ++++ b/net/sunrpc/svc.c
12127 +@@ -1629,6 +1629,21 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
12128 + }
12129 + EXPORT_SYMBOL_GPL(svc_max_payload);
12130 +
12131 ++/**
12132 ++ * svc_proc_name - Return RPC procedure name in string form
12133 ++ * @rqstp: svc_rqst to operate on
12134 ++ *
12135 ++ * Return value:
12136 ++ * Pointer to a NUL-terminated string
12137 ++ */
12138 ++const char *svc_proc_name(const struct svc_rqst *rqstp)
12139 ++{
12140 ++ if (rqstp && rqstp->rq_procinfo)
12141 ++ return rqstp->rq_procinfo->pc_name;
12142 ++ return "unknown";
12143 ++}
12144 ++
12145 ++
12146 + /**
12147 + * svc_encode_result_payload - mark a range of bytes as a result payload
12148 + * @rqstp: svc_rqst to operate on
12149 +diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
12150 +index 5764116125237..c7d7d35867302 100644
12151 +--- a/samples/bpf/xdp_redirect_cpu_user.c
12152 ++++ b/samples/bpf/xdp_redirect_cpu_user.c
12153 +@@ -831,7 +831,7 @@ int main(int argc, char **argv)
12154 + memset(cpu, 0, n_cpus * sizeof(int));
12155 +
12156 + /* Parse commands line args */
12157 +- while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:",
12158 ++ while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:n",
12159 + long_options, &longindex)) != -1) {
12160 + switch (opt) {
12161 + case 'd':
12162 +diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
12163 +index 56c5f5af350f6..cff51f861506d 100755
12164 +--- a/samples/pktgen/pktgen_sample04_many_flows.sh
12165 ++++ b/samples/pktgen/pktgen_sample04_many_flows.sh
12166 +@@ -13,13 +13,15 @@ root_check_run_with_sudo "$@"
12167 + # Parameter parsing via include
12168 + source ${basedir}/parameters.sh
12169 + # Set some default params, if they didn't get set
12170 +-[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
12171 ++if [ -z "$DEST_IP" ]; then
12172 ++ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
12173 ++fi
12174 + [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
12175 + [ -z "$CLONE_SKB" ] && CLONE_SKB="0"
12176 + [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
12177 + if [ -n "$DEST_IP" ]; then
12178 +- validate_addr $DEST_IP
12179 +- read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
12180 ++ validate_addr${IP6} $DEST_IP
12181 ++ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
12182 + fi
12183 + if [ -n "$DST_PORT" ]; then
12184 + read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
12185 +@@ -62,8 +64,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
12186 +
12187 + # Single destination
12188 + pg_set $dev "dst_mac $DST_MAC"
12189 +- pg_set $dev "dst_min $DST_MIN"
12190 +- pg_set $dev "dst_max $DST_MAX"
12191 ++ pg_set $dev "dst${IP6}_min $DST_MIN"
12192 ++ pg_set $dev "dst${IP6}_max $DST_MAX"
12193 +
12194 + if [ -n "$DST_PORT" ]; then
12195 + # Single destination port or random port range
12196 +diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
12197 +index 6e0effabca594..3578d0aa4ac55 100755
12198 +--- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh
12199 ++++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
12200 +@@ -17,14 +17,16 @@ root_check_run_with_sudo "$@"
12201 + # Parameter parsing via include
12202 + source ${basedir}/parameters.sh
12203 + # Set some default params, if they didn't get set
12204 +-[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
12205 ++if [ -z "$DEST_IP" ]; then
12206 ++ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
12207 ++fi
12208 + [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
12209 + [ -z "$CLONE_SKB" ] && CLONE_SKB="0"
12210 + [ -z "$BURST" ] && BURST=32
12211 + [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
12212 + if [ -n "$DEST_IP" ]; then
12213 +- validate_addr $DEST_IP
12214 +- read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
12215 ++ validate_addr${IP6} $DEST_IP
12216 ++ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
12217 + fi
12218 + if [ -n "$DST_PORT" ]; then
12219 + read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
12220 +@@ -52,8 +54,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
12221 +
12222 + # Single destination
12223 + pg_set $dev "dst_mac $DST_MAC"
12224 +- pg_set $dev "dst_min $DST_MIN"
12225 +- pg_set $dev "dst_max $DST_MAX"
12226 ++ pg_set $dev "dst${IP6}_min $DST_MIN"
12227 ++ pg_set $dev "dst${IP6}_max $DST_MAX"
12228 +
12229 + if [ -n "$DST_PORT" ]; then
12230 + # Single destination port or random port range
12231 +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
12232 +index d0ceada99243a..f3a9cc201c8c2 100644
12233 +--- a/security/integrity/ima/Kconfig
12234 ++++ b/security/integrity/ima/Kconfig
12235 +@@ -6,7 +6,6 @@ config IMA
12236 + select SECURITYFS
12237 + select CRYPTO
12238 + select CRYPTO_HMAC
12239 +- select CRYPTO_MD5
12240 + select CRYPTO_SHA1
12241 + select CRYPTO_HASH_INFO
12242 + select TCG_TPM if HAS_IOMEM && !UML
12243 +diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
12244 +index 1e5c019161738..95cc31525c573 100644
12245 +--- a/security/integrity/ima/ima_mok.c
12246 ++++ b/security/integrity/ima/ima_mok.c
12247 +@@ -21,7 +21,7 @@ struct key *ima_blacklist_keyring;
12248 + /*
12249 + * Allocate the IMA blacklist keyring
12250 + */
12251 +-__init int ima_mok_init(void)
12252 ++static __init int ima_mok_init(void)
12253 + {
12254 + struct key_restriction *restriction;
12255 +
12256 +diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
12257 +index 4a56a52adab5d..b9d5d7a0975b3 100644
12258 +--- a/sound/soc/codecs/rt5682-i2c.c
12259 ++++ b/sound/soc/codecs/rt5682-i2c.c
12260 +@@ -117,6 +117,13 @@ static struct snd_soc_dai_driver rt5682_dai[] = {
12261 + },
12262 + };
12263 +
12264 ++static void rt5682_i2c_disable_regulators(void *data)
12265 ++{
12266 ++ struct rt5682_priv *rt5682 = data;
12267 ++
12268 ++ regulator_bulk_disable(ARRAY_SIZE(rt5682->supplies), rt5682->supplies);
12269 ++}
12270 ++
12271 + static int rt5682_i2c_probe(struct i2c_client *i2c,
12272 + const struct i2c_device_id *id)
12273 + {
12274 +@@ -157,6 +164,11 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
12275 + return ret;
12276 + }
12277 +
12278 ++ ret = devm_add_action_or_reset(&i2c->dev, rt5682_i2c_disable_regulators,
12279 ++ rt5682);
12280 ++ if (ret)
12281 ++ return ret;
12282 ++
12283 + ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
12284 + rt5682->supplies);
12285 + if (ret) {
12286 +@@ -282,10 +294,7 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
12287 +
12288 + static int rt5682_i2c_remove(struct i2c_client *client)
12289 + {
12290 +- struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
12291 +-
12292 + rt5682_i2c_shutdown(client);
12293 +- regulator_bulk_disable(ARRAY_SIZE(rt5682->supplies), rt5682->supplies);
12294 +
12295 + return 0;
12296 + }
12297 +diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
12298 +index 2e9175b37dc9c..254a016cb1f36 100644
12299 +--- a/sound/soc/codecs/tlv320aic32x4.c
12300 ++++ b/sound/soc/codecs/tlv320aic32x4.c
12301 +@@ -1131,7 +1131,7 @@ static struct snd_soc_dai_driver aic32x4_tas2505_dai = {
12302 + .playback = {
12303 + .stream_name = "Playback",
12304 + .channels_min = 1,
12305 +- .channels_max = 1,
12306 ++ .channels_max = 2,
12307 + .rates = SNDRV_PCM_RATE_8000_96000,
12308 + .formats = AIC32X4_FORMATS,},
12309 + .ops = &aic32x4_ops,
12310 +diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
12311 +index 86c92e03ea5d4..d885ced34f606 100644
12312 +--- a/sound/soc/codecs/wcd9335.c
12313 ++++ b/sound/soc/codecs/wcd9335.c
12314 +@@ -4076,6 +4076,16 @@ static int wcd9335_setup_irqs(struct wcd9335_codec *wcd)
12315 + return ret;
12316 + }
12317 +
12318 ++static void wcd9335_teardown_irqs(struct wcd9335_codec *wcd)
12319 ++{
12320 ++ int i;
12321 ++
12322 ++ /* disable interrupts on all slave ports */
12323 ++ for (i = 0; i < WCD9335_SLIM_NUM_PORT_REG; i++)
12324 ++ regmap_write(wcd->if_regmap, WCD9335_SLIM_PGD_PORT_INT_EN0 + i,
12325 ++ 0x00);
12326 ++}
12327 ++
12328 + static void wcd9335_cdc_sido_ccl_enable(struct wcd9335_codec *wcd,
12329 + bool ccl_flag)
12330 + {
12331 +@@ -4844,6 +4854,7 @@ static void wcd9335_codec_init(struct snd_soc_component *component)
12332 + static int wcd9335_codec_probe(struct snd_soc_component *component)
12333 + {
12334 + struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
12335 ++ int ret;
12336 + int i;
12337 +
12338 + snd_soc_component_init_regmap(component, wcd->regmap);
12339 +@@ -4861,7 +4872,15 @@ static int wcd9335_codec_probe(struct snd_soc_component *component)
12340 + for (i = 0; i < NUM_CODEC_DAIS; i++)
12341 + INIT_LIST_HEAD(&wcd->dai[i].slim_ch_list);
12342 +
12343 +- return wcd9335_setup_irqs(wcd);
12344 ++ ret = wcd9335_setup_irqs(wcd);
12345 ++ if (ret)
12346 ++ goto free_clsh_ctrl;
12347 ++
12348 ++ return 0;
12349 ++
12350 ++free_clsh_ctrl:
12351 ++ wcd_clsh_ctrl_free(wcd->clsh_ctrl);
12352 ++ return ret;
12353 + }
12354 +
12355 + static void wcd9335_codec_remove(struct snd_soc_component *comp)
12356 +@@ -4869,7 +4888,7 @@ static void wcd9335_codec_remove(struct snd_soc_component *comp)
12357 + struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
12358 +
12359 + wcd_clsh_ctrl_free(wcd->clsh_ctrl);
12360 +- free_irq(regmap_irq_get_virq(wcd->irq_data, WCD9335_IRQ_SLIMBUS), wcd);
12361 ++ wcd9335_teardown_irqs(wcd);
12362 + }
12363 +
12364 + static int wcd9335_codec_set_sysclk(struct snd_soc_component *comp,
12365 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
12366 +index fe15cbc7bcafd..a4d4cbf716a1c 100644
12367 +--- a/sound/soc/codecs/wm_adsp.c
12368 ++++ b/sound/soc/codecs/wm_adsp.c
12369 +@@ -747,6 +747,8 @@ static void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
12370 + static void wm_adsp2_cleanup_debugfs(struct wm_adsp *dsp)
12371 + {
12372 + wm_adsp_debugfs_clear(dsp);
12373 ++ debugfs_remove_recursive(dsp->debugfs_root);
12374 ++ dsp->debugfs_root = NULL;
12375 + }
12376 + #else
12377 + static inline void wm_adsp2_init_debugfs(struct wm_adsp *dsp,
12378 +diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c
12379 +index ea5c973e2e846..d60f4dac6c1b3 100644
12380 +--- a/sound/soc/fsl/fsl_rpmsg.c
12381 ++++ b/sound/soc/fsl/fsl_rpmsg.c
12382 +@@ -165,25 +165,25 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
12383 + }
12384 +
12385 + /* Get the optional clocks */
12386 +- rpmsg->ipg = devm_clk_get(&pdev->dev, "ipg");
12387 ++ rpmsg->ipg = devm_clk_get_optional(&pdev->dev, "ipg");
12388 + if (IS_ERR(rpmsg->ipg))
12389 +- rpmsg->ipg = NULL;
12390 ++ return PTR_ERR(rpmsg->ipg);
12391 +
12392 +- rpmsg->mclk = devm_clk_get(&pdev->dev, "mclk");
12393 ++ rpmsg->mclk = devm_clk_get_optional(&pdev->dev, "mclk");
12394 + if (IS_ERR(rpmsg->mclk))
12395 +- rpmsg->mclk = NULL;
12396 ++ return PTR_ERR(rpmsg->mclk);
12397 +
12398 +- rpmsg->dma = devm_clk_get(&pdev->dev, "dma");
12399 ++ rpmsg->dma = devm_clk_get_optional(&pdev->dev, "dma");
12400 + if (IS_ERR(rpmsg->dma))
12401 +- rpmsg->dma = NULL;
12402 ++ return PTR_ERR(rpmsg->dma);
12403 +
12404 +- rpmsg->pll8k = devm_clk_get(&pdev->dev, "pll8k");
12405 ++ rpmsg->pll8k = devm_clk_get_optional(&pdev->dev, "pll8k");
12406 + if (IS_ERR(rpmsg->pll8k))
12407 +- rpmsg->pll8k = NULL;
12408 ++ return PTR_ERR(rpmsg->pll8k);
12409 +
12410 +- rpmsg->pll11k = devm_clk_get(&pdev->dev, "pll11k");
12411 ++ rpmsg->pll11k = devm_clk_get_optional(&pdev->dev, "pll11k");
12412 + if (IS_ERR(rpmsg->pll11k))
12413 +- rpmsg->pll11k = NULL;
12414 ++ return PTR_ERR(rpmsg->pll11k);
12415 +
12416 + platform_set_drvdata(pdev, rpmsg);
12417 + pm_runtime_enable(&pdev->dev);
12418 +diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
12419 +index a31a7a7bbf667..2b43459adc33a 100644
12420 +--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
12421 ++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
12422 +@@ -199,7 +199,7 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
12423 + }
12424 + if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) {
12425 + ret = snd_soc_dai_set_tdm_slot(codec_dai,
12426 +- 0x03, 3, 8, 24);
12427 ++ 0x30, 3, 8, 16);
12428 + if (ret < 0) {
12429 + dev_err(runtime->dev,
12430 + "DEV0 TDM slot err:%d\n", ret);
12431 +@@ -208,10 +208,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
12432 + }
12433 + if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) {
12434 + ret = snd_soc_dai_set_tdm_slot(codec_dai,
12435 +- 0x0C, 3, 8, 24);
12436 ++ 0xC0, 3, 8, 16);
12437 + if (ret < 0) {
12438 + dev_err(runtime->dev,
12439 +- "DEV0 TDM slot err:%d\n", ret);
12440 ++ "DEV1 TDM slot err:%d\n", ret);
12441 + return ret;
12442 + }
12443 + }
12444 +@@ -311,24 +311,6 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
12445 + * The above 2 loops are mutually exclusive based on the stream direction,
12446 + * thus rtd_dpcm variable will never be overwritten
12447 + */
12448 +- /*
12449 +- * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
12450 +- * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
12451 +- * Skipping the port wise FE and BE configuration for kblda7219m98373 &
12452 +- * kblmax98373 as the topology (FE & BE) supports S24_LE only.
12453 +- */
12454 +-
12455 +- if (!strcmp(rtd->card->name, "kblda7219m98373") ||
12456 +- !strcmp(rtd->card->name, "kblmax98373")) {
12457 +- /* The ADSP will convert the FE rate to 48k, stereo */
12458 +- rate->min = rate->max = 48000;
12459 +- chan->min = chan->max = DUAL_CHANNEL;
12460 +-
12461 +- /* set SSP to 24 bit */
12462 +- snd_mask_none(fmt);
12463 +- snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
12464 +- return 0;
12465 +- }
12466 +
12467 + /*
12468 + * The ADSP will convert the FE rate to 48k, stereo, 24 bit
12469 +@@ -479,31 +461,20 @@ static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
12470 + static int kbl_fe_startup(struct snd_pcm_substream *substream)
12471 + {
12472 + struct snd_pcm_runtime *runtime = substream->runtime;
12473 +- struct snd_soc_pcm_runtime *soc_rt = asoc_substream_to_rtd(substream);
12474 +
12475 + /*
12476 + * On this platform for PCM device we support,
12477 + * 48Khz
12478 + * stereo
12479 ++ * 16 bit audio
12480 + */
12481 +
12482 + runtime->hw.channels_max = DUAL_CHANNEL;
12483 + snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
12484 + &constraints_channels);
12485 +- /*
12486 +- * Setup S24_LE (32 bit container and 24 bit valid data) for
12487 +- * kblda7219m98373 & kblmax98373. For kblda7219m98927 &
12488 +- * kblmax98927 keeping it as 16/16 due to topology FW dependency.
12489 +- */
12490 +- if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
12491 +- !strcmp(soc_rt->card->name, "kblmax98373")) {
12492 +- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
12493 +- snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
12494 +-
12495 +- } else {
12496 +- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
12497 +- snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
12498 +- }
12499 ++
12500 ++ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
12501 ++ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
12502 +
12503 + snd_pcm_hw_constraint_list(runtime, 0,
12504 + SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
12505 +@@ -536,23 +507,11 @@ static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
12506 + static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
12507 + {
12508 + struct snd_pcm_runtime *runtime = substream->runtime;
12509 +- struct snd_soc_pcm_runtime *soc_rt = asoc_substream_to_rtd(substream);
12510 +
12511 + runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
12512 + snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
12513 + &constraints_channels_quad);
12514 +
12515 +- /*
12516 +- * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE.
12517 +- * The DMIC also configured for S24_LE. Forcing the DMIC format to
12518 +- * S24_LE due to the topology FW dependency.
12519 +- */
12520 +- if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
12521 +- !strcmp(soc_rt->card->name, "kblmax98373")) {
12522 +- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
12523 +- snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
12524 +- }
12525 +-
12526 + return snd_pcm_hw_constraint_list(substream->runtime, 0,
12527 + SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
12528 + }
12529 +diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
12530 +index 42ef51c3fb4f4..b591c6fd13fdd 100644
12531 +--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
12532 ++++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
12533 +@@ -75,7 +75,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
12534 + },
12535 + {
12536 + .id = "DLGS7219",
12537 +- .drv_name = "cml_da7219_max98357a",
12538 ++ .drv_name = "cml_da7219_mx98357a",
12539 + .machine_quirk = snd_soc_acpi_codec_list,
12540 + .quirk_data = &max98390_spk_codecs,
12541 + .sof_fw_filename = "sof-cml.ri",
12542 +diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
12543 +index ba5ff468c265a..741bf2f9e081f 100644
12544 +--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
12545 ++++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
12546 +@@ -87,7 +87,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
12547 + },
12548 + {
12549 + .id = "DLGS7219",
12550 +- .drv_name = "kbl_da7219_max98357a",
12551 ++ .drv_name = "kbl_da7219_mx98357a",
12552 + .fw_filename = "intel/dsp_fw_kbl.bin",
12553 + .machine_quirk = snd_soc_acpi_codec_list,
12554 + .quirk_data = &kbl_7219_98357_codecs,
12555 +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
12556 +index c0fdab39e7c28..09037d555ec49 100644
12557 +--- a/sound/soc/intel/skylake/skl-topology.c
12558 ++++ b/sound/soc/intel/skylake/skl-topology.c
12559 +@@ -113,7 +113,7 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
12560 +
12561 + static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
12562 + {
12563 +- struct skl_module_iface *iface = &mcfg->module->formats[0];
12564 ++ struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
12565 +
12566 + dev_dbg(skl->dev, "Dumping config\n");
12567 + dev_dbg(skl->dev, "Input Format:\n");
12568 +@@ -195,8 +195,8 @@ static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
12569 + struct skl_module_fmt *in_fmt, *out_fmt;
12570 +
12571 + /* Fixups will be applied to pin 0 only */
12572 +- in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
12573 +- out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
12574 ++ in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
12575 ++ out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
12576 +
12577 + if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
12578 + if (is_fe) {
12579 +@@ -239,9 +239,9 @@ static void skl_tplg_update_buffer_size(struct skl_dev *skl,
12580 + /* Since fixups is applied to pin 0 only, ibs, obs needs
12581 + * change for pin 0 only
12582 + */
12583 +- res = &mcfg->module->resources[0];
12584 +- in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
12585 +- out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
12586 ++ res = &mcfg->module->resources[mcfg->res_idx];
12587 ++ in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
12588 ++ out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
12589 +
12590 + if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
12591 + multiplier = 5;
12592 +@@ -1463,12 +1463,6 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
12593 + struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
12594 +
12595 + if (ac->params) {
12596 +- /*
12597 +- * Widget data is expected to be stripped of T and L
12598 +- */
12599 +- size -= 2 * sizeof(unsigned int);
12600 +- data += 2;
12601 +-
12602 + if (size > ac->max)
12603 + return -EINVAL;
12604 + ac->size = size;
12605 +@@ -1637,11 +1631,12 @@ int skl_tplg_update_pipe_params(struct device *dev,
12606 + struct skl_module_cfg *mconfig,
12607 + struct skl_pipe_params *params)
12608 + {
12609 +- struct skl_module_res *res = &mconfig->module->resources[0];
12610 ++ struct skl_module_res *res;
12611 + struct skl_dev *skl = get_skl_ctx(dev);
12612 + struct skl_module_fmt *format = NULL;
12613 + u8 cfg_idx = mconfig->pipe->cur_config_idx;
12614 +
12615 ++ res = &mconfig->module->resources[mconfig->res_idx];
12616 + skl_tplg_fill_dma_id(mconfig, params);
12617 + mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
12618 + mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
12619 +@@ -1650,9 +1645,9 @@ int skl_tplg_update_pipe_params(struct device *dev,
12620 + return 0;
12621 +
12622 + if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
12623 +- format = &mconfig->module->formats[0].inputs[0].fmt;
12624 ++ format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
12625 + else
12626 +- format = &mconfig->module->formats[0].outputs[0].fmt;
12627 ++ format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
12628 +
12629 + /* set the hw_params */
12630 + format->s_freq = params->s_freq;
12631 +diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
12632 +index c4a598cbbdaa1..14e77df06b011 100644
12633 +--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
12634 ++++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
12635 +@@ -1119,25 +1119,26 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12636 + afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
12637 + if (IS_ERR(afe->regmap)) {
12638 + dev_err(dev, "could not get regmap from parent\n");
12639 +- return PTR_ERR(afe->regmap);
12640 ++ ret = PTR_ERR(afe->regmap);
12641 ++ goto err_pm_disable;
12642 + }
12643 + ret = regmap_attach_dev(dev, afe->regmap, &mt8183_afe_regmap_config);
12644 + if (ret) {
12645 + dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
12646 +- return ret;
12647 ++ goto err_pm_disable;
12648 + }
12649 +
12650 + rstc = devm_reset_control_get(dev, "audiosys");
12651 + if (IS_ERR(rstc)) {
12652 + ret = PTR_ERR(rstc);
12653 + dev_err(dev, "could not get audiosys reset:%d\n", ret);
12654 +- return ret;
12655 ++ goto err_pm_disable;
12656 + }
12657 +
12658 + ret = reset_control_reset(rstc);
12659 + if (ret) {
12660 + dev_err(dev, "failed to trigger audio reset:%d\n", ret);
12661 +- return ret;
12662 ++ goto err_pm_disable;
12663 + }
12664 +
12665 + /* enable clock for regcache get default value from hw */
12666 +@@ -1147,7 +1148,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12667 + ret = regmap_reinit_cache(afe->regmap, &mt8183_afe_regmap_config);
12668 + if (ret) {
12669 + dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
12670 +- return ret;
12671 ++ goto err_pm_disable;
12672 + }
12673 +
12674 + pm_runtime_put_sync(&pdev->dev);
12675 +@@ -1160,8 +1161,10 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12676 + afe->memif_size = MT8183_MEMIF_NUM;
12677 + afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
12678 + GFP_KERNEL);
12679 +- if (!afe->memif)
12680 +- return -ENOMEM;
12681 ++ if (!afe->memif) {
12682 ++ ret = -ENOMEM;
12683 ++ goto err_pm_disable;
12684 ++ }
12685 +
12686 + for (i = 0; i < afe->memif_size; i++) {
12687 + afe->memif[i].data = &memif_data[i];
12688 +@@ -1178,22 +1181,26 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12689 + afe->irqs_size = MT8183_IRQ_NUM;
12690 + afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
12691 + GFP_KERNEL);
12692 +- if (!afe->irqs)
12693 +- return -ENOMEM;
12694 ++ if (!afe->irqs) {
12695 ++ ret = -ENOMEM;
12696 ++ goto err_pm_disable;
12697 ++ }
12698 +
12699 + for (i = 0; i < afe->irqs_size; i++)
12700 + afe->irqs[i].irq_data = &irq_data[i];
12701 +
12702 + /* request irq */
12703 + irq_id = platform_get_irq(pdev, 0);
12704 +- if (irq_id < 0)
12705 +- return irq_id;
12706 ++ if (irq_id < 0) {
12707 ++ ret = irq_id;
12708 ++ goto err_pm_disable;
12709 ++ }
12710 +
12711 + ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler,
12712 + IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
12713 + if (ret) {
12714 + dev_err(dev, "could not request_irq for asys-isr\n");
12715 +- return ret;
12716 ++ goto err_pm_disable;
12717 + }
12718 +
12719 + /* init sub_dais */
12720 +@@ -1204,7 +1211,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12721 + if (ret) {
12722 + dev_warn(afe->dev, "dai register i %d fail, ret %d\n",
12723 + i, ret);
12724 +- return ret;
12725 ++ goto err_pm_disable;
12726 + }
12727 + }
12728 +
12729 +@@ -1213,7 +1220,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12730 + if (ret) {
12731 + dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n",
12732 + ret);
12733 +- return ret;
12734 ++ goto err_pm_disable;
12735 + }
12736 +
12737 + afe->mtk_afe_hardware = &mt8183_afe_hardware;
12738 +@@ -1229,7 +1236,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12739 + NULL, 0);
12740 + if (ret) {
12741 + dev_warn(dev, "err_platform\n");
12742 +- return ret;
12743 ++ goto err_pm_disable;
12744 + }
12745 +
12746 + ret = devm_snd_soc_register_component(afe->dev,
12747 +@@ -1238,10 +1245,14 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
12748 + afe->num_dai_drivers);
12749 + if (ret) {
12750 + dev_warn(dev, "err_dai_component\n");
12751 +- return ret;
12752 ++ goto err_pm_disable;
12753 + }
12754 +
12755 + return ret;
12756 ++
12757 ++err_pm_disable:
12758 ++ pm_runtime_disable(&pdev->dev);
12759 ++ return ret;
12760 + }
12761 +
12762 + static int mt8183_afe_pcm_dev_remove(struct platform_device *pdev)
12763 +diff --git a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
12764 +index 7a1724f5ff4c6..31c280339c503 100644
12765 +--- a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
12766 ++++ b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
12767 +@@ -2229,12 +2229,13 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
12768 + afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
12769 + if (IS_ERR(afe->regmap)) {
12770 + dev_err(dev, "could not get regmap from parent\n");
12771 +- return PTR_ERR(afe->regmap);
12772 ++ ret = PTR_ERR(afe->regmap);
12773 ++ goto err_pm_disable;
12774 + }
12775 + ret = regmap_attach_dev(dev, afe->regmap, &mt8192_afe_regmap_config);
12776 + if (ret) {
12777 + dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
12778 +- return ret;
12779 ++ goto err_pm_disable;
12780 + }
12781 +
12782 + /* enable clock for regcache get default value from hw */
12783 +@@ -2244,7 +2245,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
12784 + ret = regmap_reinit_cache(afe->regmap, &mt8192_afe_regmap_config);
12785 + if (ret) {
12786 + dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
12787 +- return ret;
12788 ++ goto err_pm_disable;
12789 + }
12790 +
12791 + pm_runtime_put_sync(&pdev->dev);
12792 +@@ -2257,8 +2258,10 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
12793 + afe->memif_size = MT8192_MEMIF_NUM;
12794 + afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
12795 + GFP_KERNEL);
12796 +- if (!afe->memif)
12797 +- return -ENOMEM;
12798 ++ if (!afe->memif) {
12799 ++ ret = -ENOMEM;
12800 ++ goto err_pm_disable;
12801 ++ }
12802 +
12803 + for (i = 0; i < afe->memif_size; i++) {
12804 + afe->memif[i].data = &memif_data[i];
12805 +@@ -2272,22 +2275,26 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
12806 + afe->irqs_size = MT8192_IRQ_NUM;
12807 + afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
12808 + GFP_KERNEL);
12809 +- if (!afe->irqs)
12810 +- return -ENOMEM;
12811 ++ if (!afe->irqs) {
12812 ++ ret = -ENOMEM;
12813 ++ goto err_pm_disable;
12814 ++ }
12815 +
12816 + for (i = 0; i < afe->irqs_size; i++)
12817 + afe->irqs[i].irq_data = &irq_data[i];
12818 +
12819 + /* request irq */
12820 + irq_id = platform_get_irq(pdev, 0);
12821 +- if (irq_id < 0)
12822 +- return irq_id;
12823 ++ if (irq_id < 0) {
12824 ++ ret = irq_id;
12825 ++ goto err_pm_disable;
12826 ++ }
12827 +
12828 + ret = devm_request_irq(dev, irq_id, mt8192_afe_irq_handler,
12829 + IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
12830 + if (ret) {
12831 + dev_err(dev, "could not request_irq for Afe_ISR_Handle\n");
12832 +- return ret;
12833 ++ goto err_pm_disable;
12834 + }
12835 +
12836 + /* init sub_dais */
12837 +diff --git a/sound/usb/card.c b/sound/usb/card.c
12838 +index a1f8c3a026f57..6abfc9d079e7c 100644
12839 +--- a/sound/usb/card.c
12840 ++++ b/sound/usb/card.c
12841 +@@ -68,6 +68,7 @@ static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
12842 + static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
12843 + static bool ignore_ctl_error;
12844 + static bool autoclock = true;
12845 ++static bool lowlatency = true;
12846 + static char *quirk_alias[SNDRV_CARDS];
12847 + static char *delayed_register[SNDRV_CARDS];
12848 + static bool implicit_fb[SNDRV_CARDS];
12849 +@@ -92,6 +93,8 @@ MODULE_PARM_DESC(ignore_ctl_error,
12850 + "Ignore errors from USB controller for mixer interfaces.");
12851 + module_param(autoclock, bool, 0444);
12852 + MODULE_PARM_DESC(autoclock, "Enable auto-clock selection for UAC2 devices (default: yes).");
12853 ++module_param(lowlatency, bool, 0444);
12854 ++MODULE_PARM_DESC(lowlatency, "Enable low latency playback (default: yes).");
12855 + module_param_array(quirk_alias, charp, NULL, 0444);
12856 + MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
12857 + module_param_array(delayed_register, charp, NULL, 0444);
12858 +@@ -599,6 +602,7 @@ static int snd_usb_audio_create(struct usb_interface *intf,
12859 + chip->setup = device_setup[idx];
12860 + chip->generic_implicit_fb = implicit_fb[idx];
12861 + chip->autoclock = autoclock;
12862 ++ chip->lowlatency = lowlatency;
12863 + atomic_set(&chip->active, 1); /* avoid autopm during probing */
12864 + atomic_set(&chip->usage_count, 0);
12865 + atomic_set(&chip->shutdown, 0);
12866 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
12867 +index f5cbf61ac366e..5dc9266180e37 100644
12868 +--- a/sound/usb/pcm.c
12869 ++++ b/sound/usb/pcm.c
12870 +@@ -617,7 +617,8 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
12871 + /* check whether early start is needed for playback stream */
12872 + subs->early_playback_start =
12873 + subs->direction == SNDRV_PCM_STREAM_PLAYBACK &&
12874 +- subs->data_endpoint->nominal_queue_size >= subs->buffer_bytes;
12875 ++ (!chip->lowlatency ||
12876 ++ (subs->data_endpoint->nominal_queue_size >= subs->buffer_bytes));
12877 +
12878 + if (subs->early_playback_start)
12879 + ret = start_endpoints(subs);
12880 +diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
12881 +index 538831cbd9254..8b70c9ea91b96 100644
12882 +--- a/sound/usb/usbaudio.h
12883 ++++ b/sound/usb/usbaudio.h
12884 +@@ -57,6 +57,7 @@ struct snd_usb_audio {
12885 + bool generic_implicit_fb; /* from the 'implicit_fb' module param */
12886 + bool autoclock; /* from the 'autoclock' module param */
12887 +
12888 ++ bool lowlatency; /* from the 'lowlatency' module param */
12889 + struct usb_host_interface *ctrl_intf; /* the audio control interface */
12890 + struct media_device *media_dev;
12891 + struct media_intf_devnode *ctl_intf_media_devnode;
12892 +diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
12893 +index f45fa992e01d3..fd67496a947f3 100644
12894 +--- a/tools/bootconfig/main.c
12895 ++++ b/tools/bootconfig/main.c
12896 +@@ -111,9 +111,11 @@ static void xbc_show_list(void)
12897 + char key[XBC_KEYLEN_MAX];
12898 + struct xbc_node *leaf;
12899 + const char *val;
12900 ++ int ret;
12901 +
12902 + xbc_for_each_key_value(leaf, val) {
12903 +- if (xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX) < 0) {
12904 ++ ret = xbc_node_compose_key(leaf, key, XBC_KEYLEN_MAX);
12905 ++ if (ret < 0) {
12906 + fprintf(stderr, "Failed to compose key %d\n", ret);
12907 + break;
12908 + }
12909 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
12910 +index cc48726740ade..9d709b4276655 100644
12911 +--- a/tools/bpf/bpftool/prog.c
12912 ++++ b/tools/bpf/bpftool/prog.c
12913 +@@ -781,6 +781,8 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
12914 + kernel_syms_destroy(&dd);
12915 + }
12916 +
12917 ++ btf__free(btf);
12918 ++
12919 + return 0;
12920 + }
12921 +
12922 +@@ -2002,8 +2004,8 @@ static char *profile_target_name(int tgt_fd)
12923 + struct bpf_prog_info_linear *info_linear;
12924 + struct bpf_func_info *func_info;
12925 + const struct btf_type *t;
12926 ++ struct btf *btf = NULL;
12927 + char *name = NULL;
12928 +- struct btf *btf;
12929 +
12930 + info_linear = bpf_program__get_prog_info_linear(
12931 + tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
12932 +@@ -2027,6 +2029,7 @@ static char *profile_target_name(int tgt_fd)
12933 + }
12934 + name = strdup(btf__name_by_offset(btf, t->name_off));
12935 + out:
12936 ++ btf__free(btf);
12937 + free(info_linear);
12938 + return name;
12939 + }
12940 +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
12941 +index bf9252c7381e8..5cdff1631608c 100644
12942 +--- a/tools/include/uapi/linux/bpf.h
12943 ++++ b/tools/include/uapi/linux/bpf.h
12944 +@@ -3249,7 +3249,7 @@ union bpf_attr {
12945 + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
12946 + * Description
12947 + * Select a **SO_REUSEPORT** socket from a
12948 +- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
12949 ++ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
12950 + * It checks the selected socket is matching the incoming
12951 + * request in the socket buffer.
12952 + * Return
12953 +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
12954 +index ec14aa725bb00..74c3b73a5fbe8 100644
12955 +--- a/tools/lib/bpf/Makefile
12956 ++++ b/tools/lib/bpf/Makefile
12957 +@@ -4,8 +4,9 @@
12958 + RM ?= rm
12959 + srctree = $(abs_srctree)
12960 +
12961 ++VERSION_SCRIPT := libbpf.map
12962 + LIBBPF_VERSION := $(shell \
12963 +- grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \
12964 ++ grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \
12965 + sort -rV | head -n1 | cut -d'_' -f2)
12966 + LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
12967 +
12968 +@@ -110,7 +111,6 @@ SHARED_OBJDIR := $(OUTPUT)sharedobjs/
12969 + STATIC_OBJDIR := $(OUTPUT)staticobjs/
12970 + BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
12971 + BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
12972 +-VERSION_SCRIPT := libbpf.map
12973 + BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h
12974 +
12975 + LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
12976 +@@ -163,10 +163,10 @@ $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
12977 +
12978 + $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
12979 +
12980 +-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
12981 ++$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) $(VERSION_SCRIPT)
12982 + $(QUIET_LINK)$(CC) $(LDFLAGS) \
12983 + --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
12984 +- -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -lz -o $@
12985 ++ -Wl,--version-script=$(VERSION_SCRIPT) $< -lelf -lz -o $@
12986 + @ln -sf $(@F) $(OUTPUT)libbpf.so
12987 + @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
12988 +
12989 +@@ -181,7 +181,7 @@ $(OUTPUT)libbpf.pc:
12990 +
12991 + check: check_abi
12992 +
12993 +-check_abi: $(OUTPUT)libbpf.so
12994 ++check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
12995 + @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
12996 + echo "Warning: Num of global symbols in $(BPF_IN_SHARED)" \
12997 + "($(GLOBAL_SYM_COUNT)) does NOT match with num of" \
12998 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
12999 +index 6f5e2757bb3cf..2234d5c33177a 100644
13000 +--- a/tools/lib/bpf/libbpf.c
13001 ++++ b/tools/lib/bpf/libbpf.c
13002 +@@ -4479,6 +4479,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
13003 + {
13004 + struct bpf_create_map_attr create_attr;
13005 + struct bpf_map_def *def = &map->def;
13006 ++ int err = 0;
13007 +
13008 + memset(&create_attr, 0, sizeof(create_attr));
13009 +
13010 +@@ -4521,8 +4522,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
13011 +
13012 + if (bpf_map_type__is_map_in_map(def->type)) {
13013 + if (map->inner_map) {
13014 +- int err;
13015 +-
13016 + err = bpf_object__create_map(obj, map->inner_map, true);
13017 + if (err) {
13018 + pr_warn("map '%s': failed to create inner map: %d\n",
13019 +@@ -4547,8 +4546,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
13020 + if (map->fd < 0 && (create_attr.btf_key_type_id ||
13021 + create_attr.btf_value_type_id)) {
13022 + char *cp, errmsg[STRERR_BUFSIZE];
13023 +- int err = -errno;
13024 +
13025 ++ err = -errno;
13026 + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
13027 + pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
13028 + map->name, cp, err);
13029 +@@ -4560,8 +4559,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
13030 + map->fd = bpf_create_map_xattr(&create_attr);
13031 + }
13032 +
13033 +- if (map->fd < 0)
13034 +- return -errno;
13035 ++ err = map->fd < 0 ? -errno : 0;
13036 +
13037 + if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
13038 + if (obj->gen_loader)
13039 +@@ -4570,7 +4568,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
13040 + zfree(&map->inner_map);
13041 + }
13042 +
13043 +- return 0;
13044 ++ return err;
13045 + }
13046 +
13047 + static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
13048 +@@ -7588,8 +7586,10 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
13049 + kconfig = OPTS_GET(opts, kconfig, NULL);
13050 + if (kconfig) {
13051 + obj->kconfig = strdup(kconfig);
13052 +- if (!obj->kconfig)
13053 +- return ERR_PTR(-ENOMEM);
13054 ++ if (!obj->kconfig) {
13055 ++ err = -ENOMEM;
13056 ++ goto out;
13057 ++ }
13058 + }
13059 +
13060 + err = bpf_object__elf_init(obj);
13061 +@@ -9515,7 +9515,7 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
13062 + struct bpf_prog_info_linear *info_linear;
13063 + struct bpf_prog_info *info;
13064 + struct btf *btf = NULL;
13065 +- int err = -EINVAL;
13066 ++ int err;
13067 +
13068 + info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
13069 + err = libbpf_get_error(info_linear);
13070 +@@ -9524,6 +9524,8 @@ static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
13071 + attach_prog_fd);
13072 + return err;
13073 + }
13074 ++
13075 ++ err = -EINVAL;
13076 + info = &info_linear->info;
13077 + if (!info->btf_id) {
13078 + pr_warn("The target program doesn't have BTF\n");
13079 +diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
13080 +index cdecda1ddd36e..17a9844e4fbf8 100644
13081 +--- a/tools/perf/util/bpf-event.c
13082 ++++ b/tools/perf/util/bpf-event.c
13083 +@@ -296,7 +296,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
13084 +
13085 + out:
13086 + free(info_linear);
13087 +- free(btf);
13088 ++ btf__free(btf);
13089 + return err ? -1 : 0;
13090 + }
13091 +
13092 +@@ -486,7 +486,7 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
13093 + perf_env__fetch_btf(env, btf_id, btf);
13094 +
13095 + out:
13096 +- free(btf);
13097 ++ btf__free(btf);
13098 + close(fd);
13099 + }
13100 +
13101 +diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
13102 +index 8150e03367bba..beca55129b0b2 100644
13103 +--- a/tools/perf/util/bpf_counter.c
13104 ++++ b/tools/perf/util/bpf_counter.c
13105 +@@ -64,8 +64,8 @@ static char *bpf_target_prog_name(int tgt_fd)
13106 + struct bpf_prog_info_linear *info_linear;
13107 + struct bpf_func_info *func_info;
13108 + const struct btf_type *t;
13109 ++ struct btf *btf = NULL;
13110 + char *name = NULL;
13111 +- struct btf *btf;
13112 +
13113 + info_linear = bpf_program__get_prog_info_linear(
13114 + tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
13115 +@@ -89,6 +89,7 @@ static char *bpf_target_prog_name(int tgt_fd)
13116 + }
13117 + name = strdup(btf__name_by_offset(btf, t->name_off));
13118 + out:
13119 ++ btf__free(btf);
13120 + free(info_linear);
13121 + return name;
13122 + }
13123 +diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
13124 +index 857e3f26086fe..68e415f4d33cd 100644
13125 +--- a/tools/testing/selftests/bpf/prog_tests/btf.c
13126 ++++ b/tools/testing/selftests/bpf/prog_tests/btf.c
13127 +@@ -4386,6 +4386,7 @@ skip:
13128 + fprintf(stderr, "OK");
13129 +
13130 + done:
13131 ++ btf__free(btf);
13132 + free(func_info);
13133 + bpf_object__close(obj);
13134 + }
13135 +diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
13136 +index 2e4775c354149..92267abb462fc 100644
13137 +--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
13138 ++++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
13139 +@@ -121,7 +121,7 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
13140 + }
13141 +
13142 + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
13143 +- seq_num, src, srcp, destp, destp);
13144 ++ seq_num, src, srcp, dest, destp);
13145 + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
13146 + state,
13147 + tp->write_seq - tp->snd_una, rx_queue,
13148 +diff --git a/tools/testing/selftests/bpf/progs/test_core_autosize.c b/tools/testing/selftests/bpf/progs/test_core_autosize.c
13149 +index 44f5aa2e8956f..9a7829c5e4a72 100644
13150 +--- a/tools/testing/selftests/bpf/progs/test_core_autosize.c
13151 ++++ b/tools/testing/selftests/bpf/progs/test_core_autosize.c
13152 +@@ -125,6 +125,16 @@ int handle_downsize(void *ctx)
13153 + return 0;
13154 + }
13155 +
13156 ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
13157 ++#define bpf_core_read_int bpf_core_read
13158 ++#else
13159 ++#define bpf_core_read_int(dst, sz, src) ({ \
13160 ++ /* Prevent "subtraction from stack pointer prohibited" */ \
13161 ++ volatile long __off = sizeof(*dst) - (sz); \
13162 ++ bpf_core_read((char *)(dst) + __off, sz, src); \
13163 ++})
13164 ++#endif
13165 ++
13166 + SEC("raw_tp/sys_enter")
13167 + int handle_probed(void *ctx)
13168 + {
13169 +@@ -132,23 +142,23 @@ int handle_probed(void *ctx)
13170 + __u64 tmp;
13171 +
13172 + tmp = 0;
13173 +- bpf_core_read(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
13174 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
13175 + ptr_probed = tmp;
13176 +
13177 + tmp = 0;
13178 +- bpf_core_read(&tmp, bpf_core_field_size(in->val1), &in->val1);
13179 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val1), &in->val1);
13180 + val1_probed = tmp;
13181 +
13182 + tmp = 0;
13183 +- bpf_core_read(&tmp, bpf_core_field_size(in->val2), &in->val2);
13184 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val2), &in->val2);
13185 + val2_probed = tmp;
13186 +
13187 + tmp = 0;
13188 +- bpf_core_read(&tmp, bpf_core_field_size(in->val3), &in->val3);
13189 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val3), &in->val3);
13190 + val3_probed = tmp;
13191 +
13192 + tmp = 0;
13193 +- bpf_core_read(&tmp, bpf_core_field_size(in->val4), &in->val4);
13194 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val4), &in->val4);
13195 + val4_probed = tmp;
13196 +
13197 + return 0;
13198 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
13199 +index 30cbf5d98f7dc..abdfc41f7685a 100644
13200 +--- a/tools/testing/selftests/bpf/test_maps.c
13201 ++++ b/tools/testing/selftests/bpf/test_maps.c
13202 +@@ -764,8 +764,8 @@ static void test_sockmap(unsigned int tasks, void *data)
13203 + udp = socket(AF_INET, SOCK_DGRAM, 0);
13204 + i = 0;
13205 + err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
13206 +- if (!err) {
13207 +- printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
13208 ++ if (err) {
13209 ++ printf("Failed socket update SOCK_DGRAM '%i:%i'\n",
13210 + i, udp);
13211 + goto out_sockmap;
13212 + }