Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.13 commit in: /
Date: Wed, 15 Sep 2021 11:59:22
Message-Id: 1631707141.28c88a1f45d165a39d82b198849358c4823da767.mpagano@gentoo
1 commit: 28c88a1f45d165a39d82b198849358c4823da767
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 15 11:59:01 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 15 11:59:01 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=28c88a1f
7
8 Linux patch 5.13.17
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1016_linux-5.13.17.patch | 11676 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 11680 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 640800f..ab01fc2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -107,6 +107,10 @@ Patch: 1015_linux-5.13.16.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.13.16
23
24 +Patch: 1016_linux-5.13.17.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.13.17
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1016_linux-5.13.17.patch b/1016_linux-5.13.17.patch
33 new file mode 100644
34 index 0000000..6561764
35 --- /dev/null
36 +++ b/1016_linux-5.13.17.patch
37 @@ -0,0 +1,11676 @@
38 +diff --git a/Documentation/fault-injection/provoke-crashes.rst b/Documentation/fault-injection/provoke-crashes.rst
39 +index a20ba5d939320..18de17354206a 100644
40 +--- a/Documentation/fault-injection/provoke-crashes.rst
41 ++++ b/Documentation/fault-injection/provoke-crashes.rst
42 +@@ -29,7 +29,7 @@ recur_count
43 + cpoint_name
44 + Where in the kernel to trigger the action. It can be
45 + one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
46 +- FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
47 ++ FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ,
48 + IDE_CORE_CP, or DIRECT
49 +
50 + cpoint_type
51 +diff --git a/Makefile b/Makefile
52 +index cbb2f35baedbc..c79a2c70a22ba 100644
53 +--- a/Makefile
54 ++++ b/Makefile
55 +@@ -1,7 +1,7 @@
56 + # SPDX-License-Identifier: GPL-2.0
57 + VERSION = 5
58 + PATCHLEVEL = 13
59 +-SUBLEVEL = 16
60 ++SUBLEVEL = 17
61 + EXTRAVERSION =
62 + NAME = Opossums on Parade
63 +
64 +diff --git a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
65 +index 7028e21bdd980..910eacc8ad3bd 100644
66 +--- a/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
67 ++++ b/arch/arm/boot/dts/aspeed-g6-pinctrl.dtsi
68 +@@ -208,12 +208,12 @@
69 + };
70 +
71 + pinctrl_hvi3c3_default: hvi3c3_default {
72 +- function = "HVI3C3";
73 ++ function = "I3C3";
74 + groups = "HVI3C3";
75 + };
76 +
77 + pinctrl_hvi3c4_default: hvi3c4_default {
78 +- function = "HVI3C4";
79 ++ function = "I3C4";
80 + groups = "HVI3C4";
81 + };
82 +
83 +diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts
84 +index edca66c232c15..ebbc9b23aef1c 100644
85 +--- a/arch/arm/boot/dts/at91-sam9x60ek.dts
86 ++++ b/arch/arm/boot/dts/at91-sam9x60ek.dts
87 +@@ -92,6 +92,8 @@
88 +
89 + leds {
90 + compatible = "gpio-leds";
91 ++ pinctrl-names = "default";
92 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
93 + status = "okay"; /* Conflict with pwm0. */
94 +
95 + red {
96 +@@ -537,6 +539,10 @@
97 + AT91_PIOA 19 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA19 DAT2 periph A with pullup */
98 + AT91_PIOA 20 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)>; /* PA20 DAT3 periph A with pullup */
99 + };
100 ++ pinctrl_sdmmc0_cd: sdmmc0_cd {
101 ++ atmel,pins =
102 ++ <AT91_PIOA 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
103 ++ };
104 + };
105 +
106 + sdmmc1 {
107 +@@ -569,6 +575,14 @@
108 + AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
109 + };
110 + };
111 ++
112 ++ leds {
113 ++ pinctrl_gpio_leds: gpio_leds {
114 ++ atmel,pins = <AT91_PIOB 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
115 ++ AT91_PIOB 12 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
116 ++ AT91_PIOB 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
117 ++ };
118 ++ };
119 + }; /* pinctrl */
120 +
121 + &pwm0 {
122 +@@ -580,7 +594,7 @@
123 + &sdmmc0 {
124 + bus-width = <4>;
125 + pinctrl-names = "default";
126 +- pinctrl-0 = <&pinctrl_sdmmc0_default>;
127 ++ pinctrl-0 = <&pinctrl_sdmmc0_default &pinctrl_sdmmc0_cd>;
128 + status = "okay";
129 + cd-gpios = <&pioA 23 GPIO_ACTIVE_LOW>;
130 + disable-wp;
131 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
132 +index 9c55a921263bd..cc55d1684322b 100644
133 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
134 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
135 +@@ -57,6 +57,8 @@
136 + };
137 +
138 + spi0: spi@f0004000 {
139 ++ pinctrl-names = "default";
140 ++ pinctrl-0 = <&pinctrl_spi0_cs>;
141 + cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
142 + status = "okay";
143 + };
144 +@@ -169,6 +171,8 @@
145 + };
146 +
147 + spi1: spi@f8008000 {
148 ++ pinctrl-names = "default";
149 ++ pinctrl-0 = <&pinctrl_spi1_cs>;
150 + cs-gpios = <&pioC 25 0>;
151 + status = "okay";
152 + };
153 +@@ -248,6 +252,26 @@
154 + <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
155 + AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
156 + };
157 ++
158 ++ pinctrl_gpio_leds: gpio_leds_default {
159 ++ atmel,pins =
160 ++ <AT91_PIOE 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
161 ++ AT91_PIOE 24 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
162 ++ };
163 ++
164 ++ pinctrl_spi0_cs: spi0_cs_default {
165 ++ atmel,pins =
166 ++ <AT91_PIOD 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
167 ++ AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
168 ++ };
169 ++
170 ++ pinctrl_spi1_cs: spi1_cs_default {
171 ++ atmel,pins = <AT91_PIOC 25 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
172 ++ };
173 ++
174 ++ pinctrl_vcc_mmc0_reg_gpio: vcc_mmc0_reg_gpio_default {
175 ++ atmel,pins = <AT91_PIOE 2 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
176 ++ };
177 + };
178 + };
179 + };
180 +@@ -339,6 +363,8 @@
181 +
182 + vcc_mmc0_reg: fixedregulator_mmc0 {
183 + compatible = "regulator-fixed";
184 ++ pinctrl-names = "default";
185 ++ pinctrl-0 = <&pinctrl_vcc_mmc0_reg_gpio>;
186 + gpio = <&pioE 2 GPIO_ACTIVE_LOW>;
187 + regulator-name = "mmc0-card-supply";
188 + regulator-min-microvolt = <3300000>;
189 +@@ -362,6 +388,9 @@
190 +
191 + leds {
192 + compatible = "gpio-leds";
193 ++ pinctrl-names = "default";
194 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
195 ++ status = "okay";
196 +
197 + d2 {
198 + label = "d2";
199 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
200 +index 0b3ad1b580b83..e42dae06b5826 100644
201 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
202 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
203 +@@ -90,6 +90,8 @@
204 + };
205 +
206 + spi1: spi@fc018000 {
207 ++ pinctrl-names = "default";
208 ++ pinctrl-0 = <&pinctrl_spi0_cs>;
209 + cs-gpios = <&pioB 21 0>;
210 + status = "okay";
211 + };
212 +@@ -147,6 +149,19 @@
213 + atmel,pins =
214 + <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
215 + };
216 ++ pinctrl_spi0_cs: spi0_cs_default {
217 ++ atmel,pins =
218 ++ <AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
219 ++ };
220 ++ pinctrl_gpio_leds: gpio_leds_default {
221 ++ atmel,pins =
222 ++ <AT91_PIOD 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
223 ++ AT91_PIOE 15 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
224 ++ };
225 ++ pinctrl_vcc_mmc1_reg: vcc_mmc1_reg {
226 ++ atmel,pins =
227 ++ <AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
228 ++ };
229 + };
230 + };
231 + };
232 +@@ -252,6 +267,8 @@
233 +
234 + leds {
235 + compatible = "gpio-leds";
236 ++ pinctrl-names = "default";
237 ++ pinctrl-0 = <&pinctrl_gpio_leds>;
238 + status = "okay";
239 +
240 + d8 {
241 +@@ -278,6 +295,8 @@
242 +
243 + vcc_mmc1_reg: fixedregulator_mmc1 {
244 + compatible = "regulator-fixed";
245 ++ pinctrl-names = "default";
246 ++ pinctrl-0 = <&pinctrl_vcc_mmc1_reg>;
247 + gpio = <&pioE 4 GPIO_ACTIVE_LOW>;
248 + regulator-name = "VDD MCI1";
249 + regulator-min-microvolt = <3300000>;
250 +diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
251 +index 157a950a55d38..686c7b7c79d55 100644
252 +--- a/arch/arm/boot/dts/meson8.dtsi
253 ++++ b/arch/arm/boot/dts/meson8.dtsi
254 +@@ -304,8 +304,13 @@
255 + "pp2", "ppmmu2", "pp4", "ppmmu4",
256 + "pp5", "ppmmu5", "pp6", "ppmmu6";
257 + resets = <&reset RESET_MALI>;
258 ++
259 + clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
260 + clock-names = "bus", "core";
261 ++
262 ++ assigned-clocks = <&clkc CLKID_MALI>;
263 ++ assigned-clock-rates = <318750000>;
264 ++
265 + operating-points-v2 = <&gpu_opp_table>;
266 + #cooling-cells = <2>; /* min followed by max */
267 + };
268 +diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts
269 +index 8e48ccc6b634e..7e8ddc6f1252b 100644
270 +--- a/arch/arm/boot/dts/meson8b-ec100.dts
271 ++++ b/arch/arm/boot/dts/meson8b-ec100.dts
272 +@@ -148,7 +148,7 @@
273 + regulator-min-microvolt = <860000>;
274 + regulator-max-microvolt = <1140000>;
275 +
276 +- vin-supply = <&vcc_5v>;
277 ++ pwm-supply = <&vcc_5v>;
278 +
279 + pwms = <&pwm_cd 0 1148 0>;
280 + pwm-dutycycle-range = <100 0>;
281 +@@ -232,7 +232,7 @@
282 + regulator-min-microvolt = <860000>;
283 + regulator-max-microvolt = <1140000>;
284 +
285 +- vin-supply = <&vcc_5v>;
286 ++ pwm-supply = <&vcc_5v>;
287 +
288 + pwms = <&pwm_cd 1 1148 0>;
289 + pwm-dutycycle-range = <100 0>;
290 +diff --git a/arch/arm/boot/dts/meson8b-mxq.dts b/arch/arm/boot/dts/meson8b-mxq.dts
291 +index f3937d55472d4..7adedd3258c33 100644
292 +--- a/arch/arm/boot/dts/meson8b-mxq.dts
293 ++++ b/arch/arm/boot/dts/meson8b-mxq.dts
294 +@@ -34,6 +34,8 @@
295 + regulator-min-microvolt = <860000>;
296 + regulator-max-microvolt = <1140000>;
297 +
298 ++ pwm-supply = <&vcc_5v>;
299 ++
300 + pwms = <&pwm_cd 0 1148 0>;
301 + pwm-dutycycle-range = <100 0>;
302 +
303 +@@ -79,7 +81,7 @@
304 + regulator-min-microvolt = <860000>;
305 + regulator-max-microvolt = <1140000>;
306 +
307 +- vin-supply = <&vcc_5v>;
308 ++ pwm-supply = <&vcc_5v>;
309 +
310 + pwms = <&pwm_cd 1 1148 0>;
311 + pwm-dutycycle-range = <100 0>;
312 +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
313 +index c440ef94e0820..04356bc639faf 100644
314 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
315 ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
316 +@@ -131,7 +131,7 @@
317 + regulator-min-microvolt = <860000>;
318 + regulator-max-microvolt = <1140000>;
319 +
320 +- vin-supply = <&p5v0>;
321 ++ pwm-supply = <&p5v0>;
322 +
323 + pwms = <&pwm_cd 0 12218 0>;
324 + pwm-dutycycle-range = <91 0>;
325 +@@ -163,7 +163,7 @@
326 + regulator-min-microvolt = <860000>;
327 + regulator-max-microvolt = <1140000>;
328 +
329 +- vin-supply = <&p5v0>;
330 ++ pwm-supply = <&p5v0>;
331 +
332 + pwms = <&pwm_cd 1 12218 0>;
333 + pwm-dutycycle-range = <91 0>;
334 +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
335 +index 10244e59d56dd..56a0bb7eb0e69 100644
336 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
337 ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
338 +@@ -102,7 +102,7 @@
339 + #address-cells = <0>;
340 + interrupt-controller;
341 + reg = <0x11001000 0x1000>,
342 +- <0x11002000 0x1000>,
343 ++ <0x11002000 0x2000>,
344 + <0x11004000 0x2000>,
345 + <0x11006000 0x2000>;
346 + };
347 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
348 +index a05b1ab2dd12c..04da07ae44208 100644
349 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
350 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
351 +@@ -135,6 +135,23 @@
352 + pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
353 + status = "okay";
354 + reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
355 ++ /*
356 ++ * U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
357 ++ * contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
358 ++ * 2 size cells and also expects that the second range starts at 16 MB offset. If these
359 ++ * conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
360 ++ * space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
361 ++ * for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
362 ++ * This bug is not present in U-Boot ports for other Armada 3700 devices and is fixed in
363 ++ * U-Boot version 2021.07. See relevant U-Boot commits (the last one contains fix):
364 ++ * https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
365 ++ * https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
366 ++ * https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
367 ++ */
368 ++ #address-cells = <3>;
369 ++ #size-cells = <2>;
370 ++ ranges = <0x81000000 0 0xe8000000 0 0xe8000000 0 0x01000000 /* Port 0 IO */
371 ++ 0x82000000 0 0xe9000000 0 0xe9000000 0 0x07000000>; /* Port 0 MEM */
372 +
373 + /* enabled by U-Boot if PCIe module is present */
374 + status = "disabled";
375 +diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
376 +index 5db81a416cd65..9acc5d2b5a002 100644
377 +--- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
378 ++++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi
379 +@@ -489,8 +489,15 @@
380 + #interrupt-cells = <1>;
381 + msi-parent = <&pcie0>;
382 + msi-controller;
383 +- ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
384 +- 0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
385 ++ /*
386 ++ * The 128 MiB address range [0xe8000000-0xf0000000] is
387 ++ * dedicated for PCIe and can be assigned to 8 windows
388 ++ * with size a power of two. Use one 64 KiB window for
389 ++ * IO at the end and the remaining seven windows
390 ++ * (totaling 127 MiB) for MEM.
391 ++ */
392 ++ ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
393 ++ 0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
394 + interrupt-map-mask = <0 0 0 7>;
395 + interrupt-map = <0 0 0 1 &pcie_intc 0>,
396 + <0 0 0 2 &pcie_intc 1>,
397 +diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
398 +index 3eb8550da1fc5..3fa1ad1d0f02c 100644
399 +--- a/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
400 ++++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor-coachz.dtsi
401 +@@ -23,7 +23,7 @@ ap_h1_spi: &spi0 {};
402 + adau7002: audio-codec-1 {
403 + compatible = "adi,adau7002";
404 + IOVDD-supply = <&pp1800_l15a>;
405 +- wakeup-delay-ms = <15>;
406 ++ wakeup-delay-ms = <80>;
407 + #sound-dai-cells = <0>;
408 + };
409 + };
410 +diff --git a/arch/arm64/boot/dts/qcom/sm8250.dtsi b/arch/arm64/boot/dts/qcom/sm8250.dtsi
411 +index 09b5523965579..1316bea3eab52 100644
412 +--- a/arch/arm64/boot/dts/qcom/sm8250.dtsi
413 ++++ b/arch/arm64/boot/dts/qcom/sm8250.dtsi
414 +@@ -2123,7 +2123,7 @@
415 + <&gcc GCC_USB3_PHY_SEC_BCR>;
416 + reset-names = "phy", "common";
417 +
418 +- usb_2_ssphy: lane@88eb200 {
419 ++ usb_2_ssphy: lanes@88eb200 {
420 + reg = <0 0x088eb200 0 0x200>,
421 + <0 0x088eb400 0 0x200>,
422 + <0 0x088eb800 0 0x800>;
423 +diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
424 +index 202c4fc88bd51..dde3a07bc417c 100644
425 +--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
426 ++++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
427 +@@ -20,6 +20,7 @@
428 + pinctrl-names = "default";
429 + phy-handle = <&phy0>;
430 + tx-internal-delay-ps = <2000>;
431 ++ rx-internal-delay-ps = <1800>;
432 + status = "okay";
433 +
434 + phy0: ethernet-phy@0 {
435 +diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
436 +index 6783c3ad08567..57784341f39d7 100644
437 +--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
438 ++++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
439 +@@ -277,10 +277,6 @@
440 + interrupt-parent = <&gpio1>;
441 + interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
442 +
443 +- /* Depends on LVDS */
444 +- max-clock = <135000000>;
445 +- min-vrefresh = <50>;
446 +-
447 + adi,input-depth = <8>;
448 + adi,input-colorspace = "rgb";
449 + adi,input-clock = "1x";
450 +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
451 +index facf4d41d32a2..3ed09e7f81f03 100644
452 +--- a/arch/arm64/kvm/arm.c
453 ++++ b/arch/arm64/kvm/arm.c
454 +@@ -15,6 +15,7 @@
455 + #include <linux/fs.h>
456 + #include <linux/mman.h>
457 + #include <linux/sched.h>
458 ++#include <linux/kmemleak.h>
459 + #include <linux/kvm.h>
460 + #include <linux/kvm_irqfd.h>
461 + #include <linux/irqbypass.h>
462 +@@ -1957,6 +1958,12 @@ static int finalize_hyp_mode(void)
463 + if (ret)
464 + return ret;
465 +
466 ++ /*
467 ++ * Exclude HYP BSS from kmemleak so that it doesn't get peeked
468 ++ * at, which would end badly once the section is inaccessible.
469 ++ * None of other sections should ever be introspected.
470 ++ */
471 ++ kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
472 + ret = pkvm_mark_hyp_section(__hyp_bss);
473 + if (ret)
474 + return ret;
475 +diff --git a/arch/m68k/Kconfig.cpu b/arch/m68k/Kconfig.cpu
476 +index f4d23977d2a5a..1127d64b6c1dc 100644
477 +--- a/arch/m68k/Kconfig.cpu
478 ++++ b/arch/m68k/Kconfig.cpu
479 +@@ -26,6 +26,7 @@ config COLDFIRE
480 + bool "Coldfire CPU family support"
481 + select ARCH_HAVE_CUSTOM_GPIO_H
482 + select CPU_HAS_NO_BITFIELDS
483 ++ select CPU_HAS_NO_CAS
484 + select CPU_HAS_NO_MULDIV64
485 + select GENERIC_CSUM
486 + select GPIOLIB
487 +@@ -39,6 +40,7 @@ config M68000
488 + bool
489 + depends on !MMU
490 + select CPU_HAS_NO_BITFIELDS
491 ++ select CPU_HAS_NO_CAS
492 + select CPU_HAS_NO_MULDIV64
493 + select CPU_HAS_NO_UNALIGNED
494 + select GENERIC_CSUM
495 +@@ -54,6 +56,7 @@ config M68000
496 + config MCPU32
497 + bool
498 + select CPU_HAS_NO_BITFIELDS
499 ++ select CPU_HAS_NO_CAS
500 + select CPU_HAS_NO_UNALIGNED
501 + select CPU_NO_EFFICIENT_FFS
502 + help
503 +@@ -383,7 +386,7 @@ config ADVANCED
504 +
505 + config RMW_INSNS
506 + bool "Use read-modify-write instructions"
507 +- depends on ADVANCED
508 ++ depends on ADVANCED && !CPU_HAS_NO_CAS
509 + help
510 + This allows to use certain instructions that work with indivisible
511 + read-modify-write bus cycles. While this is faster than the
512 +@@ -459,6 +462,9 @@ config NODES_SHIFT
513 + config CPU_HAS_NO_BITFIELDS
514 + bool
515 +
516 ++config CPU_HAS_NO_CAS
517 ++ bool
518 ++
519 + config CPU_HAS_NO_MULDIV64
520 + bool
521 +
522 +diff --git a/arch/m68k/coldfire/clk.c b/arch/m68k/coldfire/clk.c
523 +index 076a9caa9557b..c895a189c5ae3 100644
524 +--- a/arch/m68k/coldfire/clk.c
525 ++++ b/arch/m68k/coldfire/clk.c
526 +@@ -92,7 +92,7 @@ int clk_enable(struct clk *clk)
527 + unsigned long flags;
528 +
529 + if (!clk)
530 +- return -EINVAL;
531 ++ return 0;
532 +
533 + spin_lock_irqsave(&clk_lock, flags);
534 + if ((clk->enabled++ == 0) && clk->clk_ops)
535 +diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
536 +index d2875e32abfca..79e55421cfb18 100644
537 +--- a/arch/m68k/emu/nfeth.c
538 ++++ b/arch/m68k/emu/nfeth.c
539 +@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
540 +
541 + for (i = 0; i < MAX_UNIT; i++) {
542 + if (nfeth_dev[i]) {
543 +- unregister_netdev(nfeth_dev[0]);
544 +- free_netdev(nfeth_dev[0]);
545 ++ unregister_netdev(nfeth_dev[i]);
546 ++ free_netdev(nfeth_dev[i]);
547 + }
548 + }
549 + free_irq(nfEtherIRQ, nfeth_interrupt);
550 +diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
551 +index 8925f3969478f..5445ae106077f 100644
552 +--- a/arch/s390/include/asm/kvm_host.h
553 ++++ b/arch/s390/include/asm/kvm_host.h
554 +@@ -962,6 +962,7 @@ struct kvm_arch{
555 + atomic64_t cmma_dirty_pages;
556 + /* subset of available cpu features enabled by user space */
557 + DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
558 ++ /* indexed by vcpu_idx */
559 + DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
560 + struct kvm_s390_gisa_interrupt gisa_int;
561 + struct kvm_s390_pv pv;
562 +diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
563 +index bb958d32bd813..a142671c449b1 100644
564 +--- a/arch/s390/kernel/debug.c
565 ++++ b/arch/s390/kernel/debug.c
566 +@@ -24,6 +24,7 @@
567 + #include <linux/export.h>
568 + #include <linux/init.h>
569 + #include <linux/fs.h>
570 ++#include <linux/minmax.h>
571 + #include <linux/debugfs.h>
572 +
573 + #include <asm/debug.h>
574 +@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
575 + char *out_buf, const char *in_buf);
576 + static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
577 + char *out_buf, debug_sprintf_entry_t *curr_event);
578 ++static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
579 ++static void debug_events_append(debug_info_t *dest, debug_info_t *src);
580 +
581 + /* globals */
582 +
583 +@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
584 + goto out;
585 +
586 + rc->mode = mode & ~S_IFMT;
587 +-
588 +- /* create root directory */
589 +- rc->debugfs_root_entry = debugfs_create_dir(rc->name,
590 +- debug_debugfs_root_entry);
591 +-
592 +- /* append new element to linked list */
593 +- if (!debug_area_first) {
594 +- /* first element in list */
595 +- debug_area_first = rc;
596 +- rc->prev = NULL;
597 +- } else {
598 +- /* append element to end of list */
599 +- debug_area_last->next = rc;
600 +- rc->prev = debug_area_last;
601 +- }
602 +- debug_area_last = rc;
603 +- rc->next = NULL;
604 +-
605 + refcount_set(&rc->ref_count, 1);
606 + out:
607 + return rc;
608 +@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
609 + */
610 + static void debug_info_put(debug_info_t *db_info)
611 + {
612 +- int i;
613 +-
614 + if (!db_info)
615 + return;
616 +- if (refcount_dec_and_test(&db_info->ref_count)) {
617 +- for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
618 +- if (!db_info->views[i])
619 +- continue;
620 +- debugfs_remove(db_info->debugfs_entries[i]);
621 +- }
622 +- debugfs_remove(db_info->debugfs_root_entry);
623 +- if (db_info == debug_area_first)
624 +- debug_area_first = db_info->next;
625 +- if (db_info == debug_area_last)
626 +- debug_area_last = db_info->prev;
627 +- if (db_info->prev)
628 +- db_info->prev->next = db_info->next;
629 +- if (db_info->next)
630 +- db_info->next->prev = db_info->prev;
631 ++ if (refcount_dec_and_test(&db_info->ref_count))
632 + debug_info_free(db_info);
633 +- }
634 + }
635 +
636 + /*
637 +@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
638 + return 0; /* success */
639 + }
640 +
641 ++/* Create debugfs entries and add to internal list. */
642 ++static void _debug_register(debug_info_t *id)
643 ++{
644 ++ /* create root directory */
645 ++ id->debugfs_root_entry = debugfs_create_dir(id->name,
646 ++ debug_debugfs_root_entry);
647 ++
648 ++ /* append new element to linked list */
649 ++ if (!debug_area_first) {
650 ++ /* first element in list */
651 ++ debug_area_first = id;
652 ++ id->prev = NULL;
653 ++ } else {
654 ++ /* append element to end of list */
655 ++ debug_area_last->next = id;
656 ++ id->prev = debug_area_last;
657 ++ }
658 ++ debug_area_last = id;
659 ++ id->next = NULL;
660 ++
661 ++ debug_register_view(id, &debug_level_view);
662 ++ debug_register_view(id, &debug_flush_view);
663 ++ debug_register_view(id, &debug_pages_view);
664 ++}
665 ++
666 + /**
667 + * debug_register_mode() - creates and initializes debug area.
668 + *
669 +@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
670 + if ((uid != 0) || (gid != 0))
671 + pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
672 + BUG_ON(!initialized);
673 +- mutex_lock(&debug_mutex);
674 +
675 + /* create new debug_info */
676 + rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
677 +- if (!rc)
678 +- goto out;
679 +- debug_register_view(rc, &debug_level_view);
680 +- debug_register_view(rc, &debug_flush_view);
681 +- debug_register_view(rc, &debug_pages_view);
682 +-out:
683 +- if (!rc)
684 ++ if (rc) {
685 ++ mutex_lock(&debug_mutex);
686 ++ _debug_register(rc);
687 ++ mutex_unlock(&debug_mutex);
688 ++ } else {
689 + pr_err("Registering debug feature %s failed\n", name);
690 +- mutex_unlock(&debug_mutex);
691 ++ }
692 + return rc;
693 + }
694 + EXPORT_SYMBOL(debug_register_mode);
695 +@@ -702,6 +692,27 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
696 + }
697 + EXPORT_SYMBOL(debug_register);
698 +
699 ++/* Remove debugfs entries and remove from internal list. */
700 ++static void _debug_unregister(debug_info_t *id)
701 ++{
702 ++ int i;
703 ++
704 ++ for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
705 ++ if (!id->views[i])
706 ++ continue;
707 ++ debugfs_remove(id->debugfs_entries[i]);
708 ++ }
709 ++ debugfs_remove(id->debugfs_root_entry);
710 ++ if (id == debug_area_first)
711 ++ debug_area_first = id->next;
712 ++ if (id == debug_area_last)
713 ++ debug_area_last = id->prev;
714 ++ if (id->prev)
715 ++ id->prev->next = id->next;
716 ++ if (id->next)
717 ++ id->next->prev = id->prev;
718 ++}
719 ++
720 + /**
721 + * debug_unregister() - give back debug area.
722 + *
723 +@@ -715,8 +726,10 @@ void debug_unregister(debug_info_t *id)
724 + if (!id)
725 + return;
726 + mutex_lock(&debug_mutex);
727 +- debug_info_put(id);
728 ++ _debug_unregister(id);
729 + mutex_unlock(&debug_mutex);
730 ++
731 ++ debug_info_put(id);
732 + }
733 + EXPORT_SYMBOL(debug_unregister);
734 +
735 +@@ -726,35 +739,28 @@ EXPORT_SYMBOL(debug_unregister);
736 + */
737 + static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
738 + {
739 +- debug_entry_t ***new_areas;
740 ++ debug_info_t *new_id;
741 + unsigned long flags;
742 +- int rc = 0;
743 +
744 + if (!id || (nr_areas <= 0) || (pages_per_area < 0))
745 + return -EINVAL;
746 +- if (pages_per_area > 0) {
747 +- new_areas = debug_areas_alloc(pages_per_area, nr_areas);
748 +- if (!new_areas) {
749 +- pr_info("Allocating memory for %i pages failed\n",
750 +- pages_per_area);
751 +- rc = -ENOMEM;
752 +- goto out;
753 +- }
754 +- } else {
755 +- new_areas = NULL;
756 ++
757 ++ new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
758 ++ id->level, ALL_AREAS);
759 ++ if (!new_id) {
760 ++ pr_info("Allocating memory for %i pages failed\n",
761 ++ pages_per_area);
762 ++ return -ENOMEM;
763 + }
764 ++
765 + spin_lock_irqsave(&id->lock, flags);
766 +- debug_areas_free(id);
767 +- id->areas = new_areas;
768 +- id->nr_areas = nr_areas;
769 +- id->pages_per_area = pages_per_area;
770 +- id->active_area = 0;
771 +- memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
772 +- memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
773 ++ debug_events_append(new_id, id);
774 ++ debug_areas_swap(new_id, id);
775 ++ debug_info_free(new_id);
776 + spin_unlock_irqrestore(&id->lock, flags);
777 + pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
778 +-out:
779 +- return rc;
780 ++
781 ++ return 0;
782 + }
783 +
784 + /**
785 +@@ -821,6 +827,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
786 + id->active_entries[id->active_area]);
787 + }
788 +
789 ++/* Swap debug areas of a and b. */
790 ++static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
791 ++{
792 ++ swap(a->nr_areas, b->nr_areas);
793 ++ swap(a->pages_per_area, b->pages_per_area);
794 ++ swap(a->areas, b->areas);
795 ++ swap(a->active_area, b->active_area);
796 ++ swap(a->active_pages, b->active_pages);
797 ++ swap(a->active_entries, b->active_entries);
798 ++}
799 ++
800 ++/* Append all debug events in active area from source to destination log. */
801 ++static void debug_events_append(debug_info_t *dest, debug_info_t *src)
802 ++{
803 ++ debug_entry_t *from, *to, *last;
804 ++
805 ++ if (!src->areas || !dest->areas)
806 ++ return;
807 ++
808 ++ /* Loop over all entries in src, starting with oldest. */
809 ++ from = get_active_entry(src);
810 ++ last = from;
811 ++ do {
812 ++ if (from->clock != 0LL) {
813 ++ to = get_active_entry(dest);
814 ++ memset(to, 0, dest->entry_size);
815 ++ memcpy(to, from, min(src->entry_size,
816 ++ dest->entry_size));
817 ++ proceed_active_entry(dest);
818 ++ }
819 ++
820 ++ proceed_active_entry(src);
821 ++ from = get_active_entry(src);
822 ++ } while (from != last);
823 ++}
824 ++
825 + /*
826 + * debug_finish_entry:
827 + * - set timestamp, caller address, cpu number etc.
828 +diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
829 +index d548d60caed25..16256e17a544a 100644
830 +--- a/arch/s390/kvm/interrupt.c
831 ++++ b/arch/s390/kvm/interrupt.c
832 +@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
833 + static void __set_cpu_idle(struct kvm_vcpu *vcpu)
834 + {
835 + kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
836 +- set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
837 ++ set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
838 + }
839 +
840 + static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
841 + {
842 + kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
843 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
844 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
845 + }
846 +
847 + static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
848 +@@ -3050,18 +3050,18 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
849 +
850 + static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
851 + {
852 +- int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
853 ++ int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
854 + struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
855 + struct kvm_vcpu *vcpu;
856 +
857 +- for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
858 +- vcpu = kvm_get_vcpu(kvm, vcpu_id);
859 ++ for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
860 ++ vcpu = kvm_get_vcpu(kvm, vcpu_idx);
861 + if (psw_ioint_disabled(vcpu))
862 + continue;
863 + deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
864 + if (deliverable_mask) {
865 + /* lately kicked but not yet running */
866 +- if (test_and_set_bit(vcpu_id, gi->kicked_mask))
867 ++ if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
868 + return;
869 + kvm_s390_vcpu_wakeup(vcpu);
870 + return;
871 +diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
872 +index 876fc1f7282a0..32173fffad3f1 100644
873 +--- a/arch/s390/kvm/kvm-s390.c
874 ++++ b/arch/s390/kvm/kvm-s390.c
875 +@@ -4020,7 +4020,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
876 + kvm_s390_patch_guest_per_regs(vcpu);
877 + }
878 +
879 +- clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
880 ++ clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
881 +
882 + vcpu->arch.sie_block->icptcode = 0;
883 + cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
884 +diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
885 +index 9fad25109b0dd..ecd741ee3276e 100644
886 +--- a/arch/s390/kvm/kvm-s390.h
887 ++++ b/arch/s390/kvm/kvm-s390.h
888 +@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
889 +
890 + static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
891 + {
892 +- return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
893 ++ return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
894 + }
895 +
896 + static inline int kvm_is_ucontrol(struct kvm *kvm)
897 +diff --git a/arch/s390/mm/kasan_init.c b/arch/s390/mm/kasan_init.c
898 +index db4d303aaaa9a..d7fcfe97d168d 100644
899 +--- a/arch/s390/mm/kasan_init.c
900 ++++ b/arch/s390/mm/kasan_init.c
901 +@@ -108,6 +108,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
902 + sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
903 + }
904 +
905 ++ /*
906 ++ * The first 1MB of 1:1 mapping is mapped with 4KB pages
907 ++ */
908 + while (address < end) {
909 + pg_dir = pgd_offset_k(address);
910 + if (pgd_none(*pg_dir)) {
911 +@@ -158,30 +161,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
912 +
913 + pm_dir = pmd_offset(pu_dir, address);
914 + if (pmd_none(*pm_dir)) {
915 +- if (mode == POPULATE_ZERO_SHADOW &&
916 +- IS_ALIGNED(address, PMD_SIZE) &&
917 ++ if (IS_ALIGNED(address, PMD_SIZE) &&
918 + end - address >= PMD_SIZE) {
919 +- pmd_populate(&init_mm, pm_dir,
920 +- kasan_early_shadow_pte);
921 +- address = (address + PMD_SIZE) & PMD_MASK;
922 +- continue;
923 +- }
924 +- /* the first megabyte of 1:1 is mapped with 4k pages */
925 +- if (has_edat && address && end - address >= PMD_SIZE &&
926 +- mode != POPULATE_ZERO_SHADOW) {
927 +- void *page;
928 +-
929 +- if (mode == POPULATE_ONE2ONE) {
930 +- page = (void *)address;
931 +- } else {
932 +- page = kasan_early_alloc_segment();
933 +- memset(page, 0, _SEGMENT_SIZE);
934 ++ if (mode == POPULATE_ZERO_SHADOW) {
935 ++ pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
936 ++ address = (address + PMD_SIZE) & PMD_MASK;
937 ++ continue;
938 ++ } else if (has_edat && address) {
939 ++ void *page;
940 ++
941 ++ if (mode == POPULATE_ONE2ONE) {
942 ++ page = (void *)address;
943 ++ } else {
944 ++ page = kasan_early_alloc_segment();
945 ++ memset(page, 0, _SEGMENT_SIZE);
946 ++ }
947 ++ pmd_val(*pm_dir) = __pa(page) | sgt_prot;
948 ++ address = (address + PMD_SIZE) & PMD_MASK;
949 ++ continue;
950 + }
951 +- pmd_val(*pm_dir) = __pa(page) | sgt_prot;
952 +- address = (address + PMD_SIZE) & PMD_MASK;
953 +- continue;
954 + }
955 +-
956 + pt_dir = kasan_early_pte_alloc();
957 + pmd_populate(&init_mm, pm_dir, pt_dir);
958 + } else if (pmd_large(*pm_dir)) {
959 +diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
960 +index 8fcb7ecb7225a..77cd965cffefa 100644
961 +--- a/arch/s390/pci/pci.c
962 ++++ b/arch/s390/pci/pci.c
963 +@@ -661,9 +661,10 @@ int zpci_enable_device(struct zpci_dev *zdev)
964 + {
965 + int rc;
966 +
967 +- rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
968 +- if (rc)
969 ++ if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
970 ++ rc = -EIO;
971 + goto out;
972 ++ }
973 +
974 + rc = zpci_dma_init_device(zdev);
975 + if (rc)
976 +@@ -684,7 +685,7 @@ int zpci_disable_device(struct zpci_dev *zdev)
977 + * The zPCI function may already be disabled by the platform, this is
978 + * detected in clp_disable_fh() which becomes a no-op.
979 + */
980 +- return clp_disable_fh(zdev);
981 ++ return clp_disable_fh(zdev) ? -EIO : 0;
982 + }
983 +
984 + /**
985 +diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
986 +index d3331596ddbe1..0a0e8b8293bef 100644
987 +--- a/arch/s390/pci/pci_clp.c
988 ++++ b/arch/s390/pci/pci_clp.c
989 +@@ -213,15 +213,19 @@ out:
990 + }
991 +
992 + static int clp_refresh_fh(u32 fid);
993 +-/*
994 +- * Enable/Disable a given PCI function and update its function handle if
995 +- * necessary
996 ++/**
997 ++ * clp_set_pci_fn() - Execute a command on a PCI function
998 ++ * @zdev: Function that will be affected
999 ++ * @nr_dma_as: DMA address space number
1000 ++ * @command: The command code to execute
1001 ++ *
1002 ++ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
1003 ++ * > 0 for non-success platform responses
1004 + */
1005 + static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1006 + {
1007 + struct clp_req_rsp_set_pci *rrb;
1008 + int rc, retries = 100;
1009 +- u32 fid = zdev->fid;
1010 +
1011 + rrb = clp_alloc_block(GFP_KERNEL);
1012 + if (!rrb)
1013 +@@ -245,17 +249,16 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
1014 + }
1015 + } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
1016 +
1017 +- if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
1018 +- zpci_err("Set PCI FN:\n");
1019 +- zpci_err_clp(rrb->response.hdr.rsp, rc);
1020 +- }
1021 +-
1022 + if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
1023 + zdev->fh = rrb->response.fh;
1024 +- } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
1025 +- rrb->response.fh == 0) {
1026 ++ } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY) {
1027 + /* Function is already in desired state - update handle */
1028 +- rc = clp_refresh_fh(fid);
1029 ++ rc = clp_refresh_fh(zdev->fid);
1030 ++ } else {
1031 ++ zpci_err("Set PCI FN:\n");
1032 ++ zpci_err_clp(rrb->response.hdr.rsp, rc);
1033 ++ if (!rc)
1034 ++ rc = rrb->response.hdr.rsp;
1035 + }
1036 + clp_free_block(rrb);
1037 + return rc;
1038 +@@ -301,17 +304,13 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
1039 +
1040 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
1041 + zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
1042 +- if (rc)
1043 +- goto out;
1044 +-
1045 +- if (zpci_use_mio(zdev)) {
1046 ++ if (!rc && zpci_use_mio(zdev)) {
1047 + rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
1048 + zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
1049 + zdev->fid, zdev->fh, rc);
1050 + if (rc)
1051 + clp_disable_fh(zdev);
1052 + }
1053 +-out:
1054 + return rc;
1055 + }
1056 +
1057 +diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
1058 +index 95a223b3e56a2..8bb92e9f4e973 100644
1059 +--- a/arch/x86/boot/compressed/efi_thunk_64.S
1060 ++++ b/arch/x86/boot/compressed/efi_thunk_64.S
1061 +@@ -5,9 +5,8 @@
1062 + * Early support for invoking 32-bit EFI services from a 64-bit kernel.
1063 + *
1064 + * Because this thunking occurs before ExitBootServices() we have to
1065 +- * restore the firmware's 32-bit GDT before we make EFI service calls,
1066 +- * since the firmware's 32-bit IDT is still currently installed and it
1067 +- * needs to be able to service interrupts.
1068 ++ * restore the firmware's 32-bit GDT and IDT before we make EFI service
1069 ++ * calls.
1070 + *
1071 + * On the plus side, we don't have to worry about mangling 64-bit
1072 + * addresses into 32-bits because we're executing with an identity
1073 +@@ -39,7 +38,7 @@ SYM_FUNC_START(__efi64_thunk)
1074 + /*
1075 + * Convert x86-64 ABI params to i386 ABI
1076 + */
1077 +- subq $32, %rsp
1078 ++ subq $64, %rsp
1079 + movl %esi, 0x0(%rsp)
1080 + movl %edx, 0x4(%rsp)
1081 + movl %ecx, 0x8(%rsp)
1082 +@@ -49,14 +48,19 @@ SYM_FUNC_START(__efi64_thunk)
1083 + leaq 0x14(%rsp), %rbx
1084 + sgdt (%rbx)
1085 +
1086 ++ addq $16, %rbx
1087 ++ sidt (%rbx)
1088 ++
1089 + /*
1090 +- * Switch to gdt with 32-bit segments. This is the firmware GDT
1091 +- * that was installed when the kernel started executing. This
1092 +- * pointer was saved at the EFI stub entry point in head_64.S.
1093 ++ * Switch to IDT and GDT with 32-bit segments. This is the firmware GDT
1094 ++ * and IDT that was installed when the kernel started executing. The
1095 ++ * pointers were saved at the EFI stub entry point in head_64.S.
1096 + *
1097 + * Pass the saved DS selector to the 32-bit code, and use far return to
1098 + * restore the saved CS selector.
1099 + */
1100 ++ leaq efi32_boot_idt(%rip), %rax
1101 ++ lidt (%rax)
1102 + leaq efi32_boot_gdt(%rip), %rax
1103 + lgdt (%rax)
1104 +
1105 +@@ -67,7 +71,7 @@ SYM_FUNC_START(__efi64_thunk)
1106 + pushq %rax
1107 + lretq
1108 +
1109 +-1: addq $32, %rsp
1110 ++1: addq $64, %rsp
1111 + movq %rdi, %rax
1112 +
1113 + pop %rbx
1114 +@@ -128,10 +132,13 @@ SYM_FUNC_START_LOCAL(efi_enter32)
1115 +
1116 + /*
1117 + * Some firmware will return with interrupts enabled. Be sure to
1118 +- * disable them before we switch GDTs.
1119 ++ * disable them before we switch GDTs and IDTs.
1120 + */
1121 + cli
1122 +
1123 ++ lidtl (%ebx)
1124 ++ subl $16, %ebx
1125 ++
1126 + lgdtl (%ebx)
1127 +
1128 + movl %cr4, %eax
1129 +@@ -166,6 +173,11 @@ SYM_DATA_START(efi32_boot_gdt)
1130 + .quad 0
1131 + SYM_DATA_END(efi32_boot_gdt)
1132 +
1133 ++SYM_DATA_START(efi32_boot_idt)
1134 ++ .word 0
1135 ++ .quad 0
1136 ++SYM_DATA_END(efi32_boot_idt)
1137 ++
1138 + SYM_DATA_START(efi32_boot_cs)
1139 + .word 0
1140 + SYM_DATA_END(efi32_boot_cs)
1141 +diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
1142 +index a2347ded77ea2..572c535cf45bc 100644
1143 +--- a/arch/x86/boot/compressed/head_64.S
1144 ++++ b/arch/x86/boot/compressed/head_64.S
1145 +@@ -319,6 +319,9 @@ SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
1146 + movw %cs, rva(efi32_boot_cs)(%ebp)
1147 + movw %ds, rva(efi32_boot_ds)(%ebp)
1148 +
1149 ++ /* Store firmware IDT descriptor */
1150 ++ sidtl rva(efi32_boot_idt)(%ebp)
1151 ++
1152 + /* Disable paging */
1153 + movl %cr0, %eax
1154 + btrl $X86_CR0_PG_BIT, %eax
1155 +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
1156 +index 2144e54a6c892..388643ca2177e 100644
1157 +--- a/arch/x86/crypto/aesni-intel_glue.c
1158 ++++ b/arch/x86/crypto/aesni-intel_glue.c
1159 +@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
1160 + return -EINVAL;
1161 +
1162 + err = skcipher_walk_virt(&walk, req, false);
1163 ++ if (err)
1164 ++ return err;
1165 +
1166 + if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
1167 + int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
1168 +@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
1169 + skcipher_request_set_crypt(&subreq, req->src, req->dst,
1170 + blocks * AES_BLOCK_SIZE, req->iv);
1171 + req = &subreq;
1172 ++
1173 + err = skcipher_walk_virt(&walk, req, false);
1174 ++ if (err)
1175 ++ return err;
1176 + } else {
1177 + tail = 0;
1178 + }
1179 +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
1180 +index 921f47b9bb247..ccc9ee1971e89 100644
1181 +--- a/arch/x86/events/amd/ibs.c
1182 ++++ b/arch/x86/events/amd/ibs.c
1183 +@@ -571,6 +571,7 @@ static struct perf_ibs perf_ibs_op = {
1184 + .start = perf_ibs_start,
1185 + .stop = perf_ibs_stop,
1186 + .read = perf_ibs_read,
1187 ++ .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
1188 + },
1189 + .msr = MSR_AMD64_IBSOPCTL,
1190 + .config_mask = IBS_OP_CONFIG_MASK,
1191 +diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
1192 +index ddfb3cad8dff2..2a8319fad0b75 100644
1193 +--- a/arch/x86/include/asm/mce.h
1194 ++++ b/arch/x86/include/asm/mce.h
1195 +@@ -265,6 +265,7 @@ enum mcp_flags {
1196 + MCP_TIMESTAMP = BIT(0), /* log time stamp */
1197 + MCP_UC = BIT(1), /* log uncorrected errors */
1198 + MCP_DONTLOG = BIT(2), /* only clear, don't log */
1199 ++ MCP_QUEUE_LOG = BIT(3), /* only queue to genpool */
1200 + };
1201 + bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);
1202 +
1203 +diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c
1204 +index bf7fe87a7e884..01ff4014b7f67 100644
1205 +--- a/arch/x86/kernel/cpu/mce/core.c
1206 ++++ b/arch/x86/kernel/cpu/mce/core.c
1207 +@@ -817,7 +817,10 @@ log_it:
1208 + if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
1209 + goto clear_it;
1210 +
1211 +- mce_log(&m);
1212 ++ if (flags & MCP_QUEUE_LOG)
1213 ++ mce_gen_pool_add(&m);
1214 ++ else
1215 ++ mce_log(&m);
1216 +
1217 + clear_it:
1218 + /*
1219 +@@ -1630,10 +1633,12 @@ static void __mcheck_cpu_init_generic(void)
1220 + m_fl = MCP_DONTLOG;
1221 +
1222 + /*
1223 +- * Log the machine checks left over from the previous reset.
1224 ++ * Log the machine checks left over from the previous reset. Log them
1225 ++ * only, do not start processing them. That will happen in mcheck_late_init()
1226 ++ * when all consumers have been registered on the notifier chain.
1227 + */
1228 + bitmap_fill(all_banks, MAX_NR_BANKS);
1229 +- machine_check_poll(MCP_UC | m_fl, &all_banks);
1230 ++ machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
1231 +
1232 + cr4_set_bits(X86_CR4_MCE);
1233 +
1234 +diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
1235 +index 57e4bb695ff96..8caf871b796f2 100644
1236 +--- a/arch/x86/kernel/cpu/resctrl/monitor.c
1237 ++++ b/arch/x86/kernel/cpu/resctrl/monitor.c
1238 +@@ -304,6 +304,12 @@ static u64 __mon_event_count(u32 rmid, struct rmid_read *rr)
1239 + case QOS_L3_MBM_LOCAL_EVENT_ID:
1240 + m = &rr->d->mbm_local[rmid];
1241 + break;
1242 ++ default:
1243 ++ /*
1244 ++ * Code would never reach here because an invalid
1245 ++ * event id would fail the __rmid_read.
1246 ++ */
1247 ++ return RMID_VAL_ERROR;
1248 + }
1249 +
1250 + if (rr->first) {
1251 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
1252 +index 9d3783800c8ce..9d76c33683649 100644
1253 +--- a/arch/x86/kvm/mmu/mmu.c
1254 ++++ b/arch/x86/kvm/mmu/mmu.c
1255 +@@ -257,12 +257,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
1256 + static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
1257 + struct x86_exception *exception)
1258 + {
1259 +- /* Check if guest physical address doesn't exceed guest maximum */
1260 +- if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
1261 +- exception->error_code |= PFERR_RSVD_MASK;
1262 +- return UNMAPPED_GVA;
1263 +- }
1264 +-
1265 + return gpa;
1266 + }
1267 +
1268 +@@ -2760,6 +2754,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
1269 + kvm_pfn_t pfn, int max_level)
1270 + {
1271 + struct kvm_lpage_info *linfo;
1272 ++ int host_level;
1273 +
1274 + max_level = min(max_level, max_huge_page_level);
1275 + for ( ; max_level > PG_LEVEL_4K; max_level--) {
1276 +@@ -2771,7 +2766,8 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
1277 + if (max_level == PG_LEVEL_4K)
1278 + return PG_LEVEL_4K;
1279 +
1280 +- return host_pfn_mapping_level(kvm, gfn, pfn, slot);
1281 ++ host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
1282 ++ return min(host_level, max_level);
1283 + }
1284 +
1285 + int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
1286 +@@ -2795,17 +2791,12 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
1287 + if (!slot)
1288 + return PG_LEVEL_4K;
1289 +
1290 +- level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
1291 +- if (level == PG_LEVEL_4K)
1292 +- return level;
1293 +-
1294 +- *req_level = level = min(level, max_level);
1295 +-
1296 + /*
1297 + * Enforce the iTLB multihit workaround after capturing the requested
1298 + * level, which will be used to do precise, accurate accounting.
1299 + */
1300 +- if (huge_page_disallowed)
1301 ++ *req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
1302 ++ if (level == PG_LEVEL_4K || huge_page_disallowed)
1303 + return PG_LEVEL_4K;
1304 +
1305 + /*
1306 +diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
1307 +index 41ef3ed5349f1..3c225bc0c0826 100644
1308 +--- a/arch/x86/kvm/mmu/tdp_mmu.c
1309 ++++ b/arch/x86/kvm/mmu/tdp_mmu.c
1310 +@@ -410,6 +410,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1311 + bool was_leaf = was_present && is_last_spte(old_spte, level);
1312 + bool is_leaf = is_present && is_last_spte(new_spte, level);
1313 + bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
1314 ++ bool was_large, is_large;
1315 +
1316 + WARN_ON(level > PT64_ROOT_MAX_LEVEL);
1317 + WARN_ON(level < PG_LEVEL_4K);
1318 +@@ -443,13 +444,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1319 +
1320 + trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
1321 +
1322 +- if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
1323 +- if (is_large_pte(old_spte))
1324 +- atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
1325 +- else
1326 +- atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
1327 +- }
1328 +-
1329 + /*
1330 + * The only times a SPTE should be changed from a non-present to
1331 + * non-present state is when an MMIO entry is installed/modified/
1332 +@@ -475,6 +469,18 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
1333 + return;
1334 + }
1335 +
1336 ++ /*
1337 ++ * Update large page stats if a large page is being zapped, created, or
1338 ++ * is replacing an existing shadow page.
1339 ++ */
1340 ++ was_large = was_leaf && is_large_pte(old_spte);
1341 ++ is_large = is_leaf && is_large_pte(new_spte);
1342 ++ if (was_large != is_large) {
1343 ++ if (was_large)
1344 ++ atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
1345 ++ else
1346 ++ atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
1347 ++ }
1348 +
1349 + if (was_leaf && is_dirty_spte(old_spte) &&
1350 + (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
1351 +diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
1352 +index df3b7e5644169..1d47f2dbe3e99 100644
1353 +--- a/arch/x86/kvm/vmx/nested.c
1354 ++++ b/arch/x86/kvm/vmx/nested.c
1355 +@@ -2226,12 +2226,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
1356 + ~PIN_BASED_VMX_PREEMPTION_TIMER);
1357 +
1358 + /* Posted interrupts setting is only taken from vmcs12. */
1359 +- if (nested_cpu_has_posted_intr(vmcs12)) {
1360 ++ vmx->nested.pi_pending = false;
1361 ++ if (nested_cpu_has_posted_intr(vmcs12))
1362 + vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
1363 +- vmx->nested.pi_pending = false;
1364 +- } else {
1365 ++ else
1366 + exec_control &= ~PIN_BASED_POSTED_INTR;
1367 +- }
1368 + pin_controls_set(vmx, exec_control);
1369 +
1370 + /*
1371 +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
1372 +index dcd4f43c23de5..6af7d0b0c154b 100644
1373 +--- a/arch/x86/kvm/vmx/vmx.c
1374 ++++ b/arch/x86/kvm/vmx/vmx.c
1375 +@@ -6452,6 +6452,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
1376 + {
1377 + struct vcpu_vmx *vmx = to_vmx(vcpu);
1378 +
1379 ++ if (vmx->emulation_required)
1380 ++ return;
1381 ++
1382 + if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
1383 + handle_external_interrupt_irqoff(vcpu);
1384 + else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)
1385 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1386 +index 1e11198f89934..93e851041c9c0 100644
1387 +--- a/arch/x86/kvm/x86.c
1388 ++++ b/arch/x86/kvm/x86.c
1389 +@@ -3223,6 +3223,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1390 + if (!msr_info->host_initiated) {
1391 + s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
1392 + adjust_tsc_offset_guest(vcpu, adj);
1393 ++ /* Before back to guest, tsc_timestamp must be adjusted
1394 ++ * as well, otherwise guest's percpu pvclock time could jump.
1395 ++ */
1396 ++ kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1397 + }
1398 + vcpu->arch.ia32_tsc_adjust_msr = data;
1399 + }
1400 +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1401 +index eccbe2aed7c3f..4df33cc08eee0 100644
1402 +--- a/block/bfq-iosched.c
1403 ++++ b/block/bfq-iosched.c
1404 +@@ -2333,6 +2333,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
1405 + __rq = bfq_find_rq_fmerge(bfqd, bio, q);
1406 + if (__rq && elv_bio_merge_ok(__rq, bio)) {
1407 + *req = __rq;
1408 ++
1409 ++ if (blk_discard_mergable(__rq))
1410 ++ return ELEVATOR_DISCARD_MERGE;
1411 + return ELEVATOR_FRONT_MERGE;
1412 + }
1413 +
1414 +diff --git a/block/bio.c b/block/bio.c
1415 +index 1fab762e079be..d95e3456ba0c5 100644
1416 +--- a/block/bio.c
1417 ++++ b/block/bio.c
1418 +@@ -979,6 +979,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
1419 + return 0;
1420 + }
1421 +
1422 ++static void bio_put_pages(struct page **pages, size_t size, size_t off)
1423 ++{
1424 ++ size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
1425 ++
1426 ++ for (i = 0; i < nr; i++)
1427 ++ put_page(pages[i]);
1428 ++}
1429 ++
1430 + #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
1431 +
1432 + /**
1433 +@@ -1023,8 +1031,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
1434 + if (same_page)
1435 + put_page(page);
1436 + } else {
1437 +- if (WARN_ON_ONCE(bio_full(bio, len)))
1438 +- return -EINVAL;
1439 ++ if (WARN_ON_ONCE(bio_full(bio, len))) {
1440 ++ bio_put_pages(pages + i, left, offset);
1441 ++ return -EINVAL;
1442 ++ }
1443 + __bio_add_page(bio, page, len, offset);
1444 + }
1445 + offset = 0;
1446 +@@ -1069,6 +1079,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
1447 + len = min_t(size_t, PAGE_SIZE - offset, left);
1448 + if (bio_add_hw_page(q, bio, page, len, offset,
1449 + max_append_sectors, &same_page) != len) {
1450 ++ bio_put_pages(pages + i, left, offset);
1451 + ret = -EINVAL;
1452 + break;
1453 + }
1454 +diff --git a/block/blk-crypto.c b/block/blk-crypto.c
1455 +index c5bdaafffa29f..103c2e2d50d67 100644
1456 +--- a/block/blk-crypto.c
1457 ++++ b/block/blk-crypto.c
1458 +@@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
1459 + if (mode->keysize == 0)
1460 + return -EINVAL;
1461 +
1462 +- if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
1463 ++ if (dun_bytes == 0 || dun_bytes > mode->ivsize)
1464 + return -EINVAL;
1465 +
1466 + if (!is_power_of_2(data_unit_size))
1467 +diff --git a/block/blk-merge.c b/block/blk-merge.c
1468 +index bcdff1879c346..526953525e35e 100644
1469 +--- a/block/blk-merge.c
1470 ++++ b/block/blk-merge.c
1471 +@@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
1472 + trace_block_split(split, (*bio)->bi_iter.bi_sector);
1473 + submit_bio_noacct(*bio);
1474 + *bio = split;
1475 ++
1476 ++ blk_throtl_charge_bio_split(*bio);
1477 + }
1478 + }
1479 +
1480 +@@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
1481 + }
1482 + }
1483 +
1484 +-/*
1485 +- * Two cases of handling DISCARD merge:
1486 +- * If max_discard_segments > 1, the driver takes every bio
1487 +- * as a range and send them to controller together. The ranges
1488 +- * needn't to be contiguous.
1489 +- * Otherwise, the bios/requests will be handled as same as
1490 +- * others which should be contiguous.
1491 +- */
1492 +-static inline bool blk_discard_mergable(struct request *req)
1493 +-{
1494 +- if (req_op(req) == REQ_OP_DISCARD &&
1495 +- queue_max_discard_segments(req->q) > 1)
1496 +- return true;
1497 +- return false;
1498 +-}
1499 +-
1500 + static enum elv_merge blk_try_req_merge(struct request *req,
1501 + struct request *next)
1502 + {
1503 +diff --git a/block/blk-throttle.c b/block/blk-throttle.c
1504 +index b1b22d863bdf8..55c49015e5333 100644
1505 +--- a/block/blk-throttle.c
1506 ++++ b/block/blk-throttle.c
1507 +@@ -178,6 +178,9 @@ struct throtl_grp {
1508 + unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
1509 + unsigned long bio_cnt_reset_time;
1510 +
1511 ++ atomic_t io_split_cnt[2];
1512 ++ atomic_t last_io_split_cnt[2];
1513 ++
1514 + struct blkg_rwstat stat_bytes;
1515 + struct blkg_rwstat stat_ios;
1516 + };
1517 +@@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
1518 + tg->bytes_disp[rw] = 0;
1519 + tg->io_disp[rw] = 0;
1520 +
1521 ++ atomic_set(&tg->io_split_cnt[rw], 0);
1522 ++
1523 + /*
1524 + * Previous slice has expired. We must have trimmed it after last
1525 + * bio dispatch. That means since start of last slice, we never used
1526 +@@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
1527 + tg->io_disp[rw] = 0;
1528 + tg->slice_start[rw] = jiffies;
1529 + tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
1530 ++
1531 ++ atomic_set(&tg->io_split_cnt[rw], 0);
1532 ++
1533 + throtl_log(&tg->service_queue,
1534 + "[%c] new slice start=%lu end=%lu jiffies=%lu",
1535 + rw == READ ? 'R' : 'W', tg->slice_start[rw],
1536 +@@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1537 + jiffies + tg->td->throtl_slice);
1538 + }
1539 +
1540 ++ if (iops_limit != UINT_MAX)
1541 ++ tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
1542 ++
1543 + if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
1544 + tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
1545 + if (wait)
1546 +@@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
1547 + }
1548 +
1549 + if (tg->iops[READ][LIMIT_LOW]) {
1550 ++ tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
1551 + iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1552 + if (iops >= tg->iops[READ][LIMIT_LOW])
1553 + tg->last_low_overflow_time[READ] = now;
1554 + }
1555 +
1556 + if (tg->iops[WRITE][LIMIT_LOW]) {
1557 ++ tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
1558 + iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1559 + if (iops >= tg->iops[WRITE][LIMIT_LOW])
1560 + tg->last_low_overflow_time[WRITE] = now;
1561 +@@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
1562 + }
1563 + #endif
1564 +
1565 ++void blk_throtl_charge_bio_split(struct bio *bio)
1566 ++{
1567 ++ struct blkcg_gq *blkg = bio->bi_blkg;
1568 ++ struct throtl_grp *parent = blkg_to_tg(blkg);
1569 ++ struct throtl_service_queue *parent_sq;
1570 ++ bool rw = bio_data_dir(bio);
1571 ++
1572 ++ do {
1573 ++ if (!parent->has_rules[rw])
1574 ++ break;
1575 ++
1576 ++ atomic_inc(&parent->io_split_cnt[rw]);
1577 ++ atomic_inc(&parent->last_io_split_cnt[rw]);
1578 ++
1579 ++ parent_sq = parent->service_queue.parent_sq;
1580 ++ parent = sq_to_tg(parent_sq);
1581 ++ } while (parent);
1582 ++}
1583 ++
1584 + bool blk_throtl_bio(struct bio *bio)
1585 + {
1586 + struct request_queue *q = bio->bi_bdev->bd_disk->queue;
1587 +diff --git a/block/blk.h b/block/blk.h
1588 +index 54d48987c21b2..40b00d18bdb25 100644
1589 +--- a/block/blk.h
1590 ++++ b/block/blk.h
1591 +@@ -290,11 +290,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
1592 + extern int blk_throtl_init(struct request_queue *q);
1593 + extern void blk_throtl_exit(struct request_queue *q);
1594 + extern void blk_throtl_register_queue(struct request_queue *q);
1595 ++extern void blk_throtl_charge_bio_split(struct bio *bio);
1596 + bool blk_throtl_bio(struct bio *bio);
1597 + #else /* CONFIG_BLK_DEV_THROTTLING */
1598 + static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1599 + static inline void blk_throtl_exit(struct request_queue *q) { }
1600 + static inline void blk_throtl_register_queue(struct request_queue *q) { }
1601 ++static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
1602 + static inline bool blk_throtl_bio(struct bio *bio) { return false; }
1603 + #endif /* CONFIG_BLK_DEV_THROTTLING */
1604 + #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1605 +diff --git a/block/elevator.c b/block/elevator.c
1606 +index 440699c281193..73e0591acfd4b 100644
1607 +--- a/block/elevator.c
1608 ++++ b/block/elevator.c
1609 +@@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
1610 + __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
1611 + if (__rq && elv_bio_merge_ok(__rq, bio)) {
1612 + *req = __rq;
1613 ++
1614 ++ if (blk_discard_mergable(__rq))
1615 ++ return ELEVATOR_DISCARD_MERGE;
1616 + return ELEVATOR_BACK_MERGE;
1617 + }
1618 +
1619 +diff --git a/block/mq-deadline.c b/block/mq-deadline.c
1620 +index 8eea2cbf2bf4a..8dca7255d04cf 100644
1621 +--- a/block/mq-deadline.c
1622 ++++ b/block/mq-deadline.c
1623 +@@ -454,6 +454,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
1624 +
1625 + if (elv_bio_merge_ok(__rq, bio)) {
1626 + *rq = __rq;
1627 ++ if (blk_discard_mergable(__rq))
1628 ++ return ELEVATOR_DISCARD_MERGE;
1629 + return ELEVATOR_FRONT_MERGE;
1630 + }
1631 + }
1632 +diff --git a/certs/Makefile b/certs/Makefile
1633 +index 359239a0ee9e3..f9344e52ecdae 100644
1634 +--- a/certs/Makefile
1635 ++++ b/certs/Makefile
1636 +@@ -57,11 +57,19 @@ endif
1637 + redirect_openssl = 2>&1
1638 + quiet_redirect_openssl = 2>&1
1639 + silent_redirect_openssl = 2>/dev/null
1640 ++openssl_available = $(shell openssl help 2>/dev/null && echo yes)
1641 +
1642 + # We do it this way rather than having a boolean option for enabling an
1643 + # external private key, because 'make randconfig' might enable such a
1644 + # boolean option and we unfortunately can't make it depend on !RANDCONFIG.
1645 + ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
1646 ++
1647 ++ifeq ($(openssl_available),yes)
1648 ++X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
1649 ++
1650 ++$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
1651 ++endif
1652 ++
1653 + $(obj)/signing_key.pem: $(obj)/x509.genkey
1654 + @$(kecho) "###"
1655 + @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
1656 +diff --git a/crypto/Makefile b/crypto/Makefile
1657 +index 10526d4559b80..c633f15a04813 100644
1658 +--- a/crypto/Makefile
1659 ++++ b/crypto/Makefile
1660 +@@ -74,7 +74,6 @@ obj-$(CONFIG_CRYPTO_NULL2) += crypto_null.o
1661 + obj-$(CONFIG_CRYPTO_MD4) += md4.o
1662 + obj-$(CONFIG_CRYPTO_MD5) += md5.o
1663 + obj-$(CONFIG_CRYPTO_RMD160) += rmd160.o
1664 +-obj-$(CONFIG_CRYPTO_RMD320) += rmd320.o
1665 + obj-$(CONFIG_CRYPTO_SHA1) += sha1_generic.o
1666 + obj-$(CONFIG_CRYPTO_SHA256) += sha256_generic.o
1667 + obj-$(CONFIG_CRYPTO_SHA512) += sha512_generic.o
1668 +diff --git a/crypto/ecc.h b/crypto/ecc.h
1669 +index a006132646a43..1350e8eb6ac23 100644
1670 +--- a/crypto/ecc.h
1671 ++++ b/crypto/ecc.h
1672 +@@ -27,6 +27,7 @@
1673 + #define _CRYPTO_ECC_H
1674 +
1675 + #include <crypto/ecc_curve.h>
1676 ++#include <asm/unaligned.h>
1677 +
1678 + /* One digit is u64 qword. */
1679 + #define ECC_CURVE_NIST_P192_DIGITS 3
1680 +@@ -46,13 +47,13 @@
1681 + * @out: Output array
1682 + * @ndigits: Number of digits to copy
1683 + */
1684 +-static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
1685 ++static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
1686 + {
1687 + const __be64 *src = (__force __be64 *)in;
1688 + int i;
1689 +
1690 + for (i = 0; i < ndigits; i++)
1691 +- out[i] = be64_to_cpu(src[ndigits - 1 - i]);
1692 ++ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
1693 + }
1694 +
1695 + /**
1696 +diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
1697 +index 6b7c158dc5087..f9c00875bc0e4 100644
1698 +--- a/crypto/tcrypt.c
1699 ++++ b/crypto/tcrypt.c
1700 +@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
1701 + }
1702 +
1703 + ret = crypto_aead_setauthsize(tfm, authsize);
1704 ++ if (ret) {
1705 ++ pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
1706 ++ ret);
1707 ++ goto out_free_tfm;
1708 ++ }
1709 +
1710 + for (i = 0; i < num_mb; ++i)
1711 + if (testmgr_alloc_buf(data[i].xbuf)) {
1712 +@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
1713 + for (i = 0; i < num_mb; ++i) {
1714 + data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
1715 + if (!data[i].req) {
1716 +- pr_err("alg: skcipher: Failed to allocate request for %s\n",
1717 ++ pr_err("alg: aead: Failed to allocate request for %s\n",
1718 + algo);
1719 + while (i--)
1720 + aead_request_free(data[i].req);
1721 +@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
1722 + sgout = &sg[9];
1723 +
1724 + tfm = crypto_alloc_aead(algo, 0, 0);
1725 +-
1726 + if (IS_ERR(tfm)) {
1727 + pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
1728 + PTR_ERR(tfm));
1729 + goto out_notfm;
1730 + }
1731 +
1732 ++ ret = crypto_aead_setauthsize(tfm, authsize);
1733 ++ if (ret) {
1734 ++ pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
1735 ++ ret);
1736 ++ goto out_noreq;
1737 ++ }
1738 ++
1739 + crypto_init_wait(&wait);
1740 + printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
1741 + get_driver_name(crypto_aead, tfm), e);
1742 +@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
1743 + break;
1744 + }
1745 + }
1746 ++
1747 + ret = crypto_aead_setkey(tfm, key, *keysize);
1748 +- ret = crypto_aead_setauthsize(tfm, authsize);
1749 ++ if (ret) {
1750 ++ pr_err("setkey() failed flags=%x: %d\n",
1751 ++ crypto_aead_get_flags(tfm), ret);
1752 ++ goto out;
1753 ++ }
1754 +
1755 + iv_len = crypto_aead_ivsize(tfm);
1756 + if (iv_len)
1757 +@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
1758 + printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
1759 + i, *keysize * 8, bs);
1760 +
1761 +-
1762 + memset(tvmem[0], 0xff, PAGE_SIZE);
1763 +
1764 +- if (ret) {
1765 +- pr_err("setkey() failed flags=%x\n",
1766 +- crypto_aead_get_flags(tfm));
1767 +- goto out;
1768 +- }
1769 +-
1770 + sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
1771 + assoc, aad_size);
1772 +
1773 +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
1774 +index 61c762961ca8e..44f434acfce08 100644
1775 +--- a/drivers/ata/libata-core.c
1776 ++++ b/drivers/ata/libata-core.c
1777 +@@ -5573,7 +5573,7 @@ int ata_host_start(struct ata_host *host)
1778 + have_stop = 1;
1779 + }
1780 +
1781 +- if (host->ops->host_stop)
1782 ++ if (host->ops && host->ops->host_stop)
1783 + have_stop = 1;
1784 +
1785 + if (have_stop) {
1786 +diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
1787 +index 2e5e7c9939334..8b2a0eb3f32a4 100644
1788 +--- a/drivers/auxdisplay/hd44780.c
1789 ++++ b/drivers/auxdisplay/hd44780.c
1790 +@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
1791 + {
1792 + struct charlcd *lcd = platform_get_drvdata(pdev);
1793 +
1794 +- kfree(lcd->drvdata);
1795 + charlcd_unregister(lcd);
1796 ++ kfree(lcd->drvdata);
1797 +
1798 + kfree(lcd);
1799 + return 0;
1800 +diff --git a/drivers/base/dd.c b/drivers/base/dd.c
1801 +index 592b3955abe22..a421da0c9c012 100644
1802 +--- a/drivers/base/dd.c
1803 ++++ b/drivers/base/dd.c
1804 +@@ -560,7 +560,8 @@ re_probe:
1805 + goto probe_failed;
1806 + }
1807 +
1808 +- if (driver_sysfs_add(dev)) {
1809 ++ ret = driver_sysfs_add(dev);
1810 ++ if (ret) {
1811 + pr_err("%s: driver_sysfs_add(%s) failed\n",
1812 + __func__, dev_name(dev));
1813 + goto probe_failed;
1814 +@@ -582,15 +583,18 @@ re_probe:
1815 + goto probe_failed;
1816 + }
1817 +
1818 +- if (device_add_groups(dev, drv->dev_groups)) {
1819 ++ ret = device_add_groups(dev, drv->dev_groups);
1820 ++ if (ret) {
1821 + dev_err(dev, "device_add_groups() failed\n");
1822 + goto dev_groups_failed;
1823 + }
1824 +
1825 +- if (dev_has_sync_state(dev) &&
1826 +- device_create_file(dev, &dev_attr_state_synced)) {
1827 +- dev_err(dev, "state_synced sysfs add failed\n");
1828 +- goto dev_sysfs_state_synced_failed;
1829 ++ if (dev_has_sync_state(dev)) {
1830 ++ ret = device_create_file(dev, &dev_attr_state_synced);
1831 ++ if (ret) {
1832 ++ dev_err(dev, "state_synced sysfs add failed\n");
1833 ++ goto dev_sysfs_state_synced_failed;
1834 ++ }
1835 + }
1836 +
1837 + if (test_remove) {
1838 +diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
1839 +index 68c549d712304..bdbedc6660a87 100644
1840 +--- a/drivers/base/firmware_loader/main.c
1841 ++++ b/drivers/base/firmware_loader/main.c
1842 +@@ -165,7 +165,7 @@ static inline int fw_state_wait(struct fw_priv *fw_priv)
1843 + return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
1844 + }
1845 +
1846 +-static int fw_cache_piggyback_on_request(const char *name);
1847 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
1848 +
1849 + static struct fw_priv *__allocate_fw_priv(const char *fw_name,
1850 + struct firmware_cache *fwc,
1851 +@@ -707,10 +707,8 @@ int assign_fw(struct firmware *fw, struct device *device)
1852 + * on request firmware.
1853 + */
1854 + if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
1855 +- fw_priv->fwc->state == FW_LOADER_START_CACHE) {
1856 +- if (fw_cache_piggyback_on_request(fw_priv->fw_name))
1857 +- kref_get(&fw_priv->ref);
1858 +- }
1859 ++ fw_priv->fwc->state == FW_LOADER_START_CACHE)
1860 ++ fw_cache_piggyback_on_request(fw_priv);
1861 +
1862 + /* pass the pages buffer to driver at the last minute */
1863 + fw_set_page_data(fw_priv, fw);
1864 +@@ -1259,11 +1257,11 @@ static int __fw_entry_found(const char *name)
1865 + return 0;
1866 + }
1867 +
1868 +-static int fw_cache_piggyback_on_request(const char *name)
1869 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1870 + {
1871 +- struct firmware_cache *fwc = &fw_cache;
1872 ++ const char *name = fw_priv->fw_name;
1873 ++ struct firmware_cache *fwc = fw_priv->fwc;
1874 + struct fw_cache_entry *fce;
1875 +- int ret = 0;
1876 +
1877 + spin_lock(&fwc->name_lock);
1878 + if (__fw_entry_found(name))
1879 +@@ -1271,13 +1269,12 @@ static int fw_cache_piggyback_on_request(const char *name)
1880 +
1881 + fce = alloc_fw_cache_entry(name);
1882 + if (fce) {
1883 +- ret = 1;
1884 + list_add(&fce->list, &fwc->fw_names);
1885 ++ kref_get(&fw_priv->ref);
1886 + pr_debug("%s: fw: %s\n", __func__, name);
1887 + }
1888 + found:
1889 + spin_unlock(&fwc->name_lock);
1890 +- return ret;
1891 + }
1892 +
1893 + static void free_fw_cache_entry(struct fw_cache_entry *fce)
1894 +@@ -1508,9 +1505,8 @@ static inline void unregister_fw_pm_ops(void)
1895 + unregister_pm_notifier(&fw_cache.pm_notify);
1896 + }
1897 + #else
1898 +-static int fw_cache_piggyback_on_request(const char *name)
1899 ++static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
1900 + {
1901 +- return 0;
1902 + }
1903 + static inline int register_fw_pm_ops(void)
1904 + {
1905 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1906 +index 297e95be25b3b..cf1dca0cde2c1 100644
1907 +--- a/drivers/base/regmap/regmap.c
1908 ++++ b/drivers/base/regmap/regmap.c
1909 +@@ -1652,7 +1652,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1910 + if (ret) {
1911 + dev_err(map->dev,
1912 + "Error in caching of register: %x ret: %d\n",
1913 +- reg + i, ret);
1914 ++ reg + regmap_get_offset(map, i), ret);
1915 + return ret;
1916 + }
1917 + }
1918 +diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
1919 +index 6535614a7dc13..1df2b5801c3bc 100644
1920 +--- a/drivers/bcma/main.c
1921 ++++ b/drivers/bcma/main.c
1922 +@@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
1923 +
1924 + void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
1925 + {
1926 ++ device_initialize(&core->dev);
1927 + core->dev.release = bcma_release_core_dev;
1928 + core->dev.bus = &bcma_bus_type;
1929 + dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
1930 +@@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
1931 + {
1932 + int err;
1933 +
1934 +- err = device_register(&core->dev);
1935 ++ err = device_add(&core->dev);
1936 + if (err) {
1937 + bcma_err(bus, "Could not register dev for core 0x%03X\n",
1938 + core->id.id);
1939 +- put_device(&core->dev);
1940 + return;
1941 + }
1942 + core->dev_registered = true;
1943 +@@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
1944 + /* Now noone uses internally-handled cores, we can free them */
1945 + list_for_each_entry_safe(core, tmp, &bus->cores, list) {
1946 + list_del(&core->list);
1947 +- kfree(core);
1948 ++ put_device(&core->dev);
1949 + }
1950 + }
1951 +
1952 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1953 +index 1061894a55df2..4acf5c6cb80d2 100644
1954 +--- a/drivers/block/nbd.c
1955 ++++ b/drivers/block/nbd.c
1956 +@@ -1369,6 +1369,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1957 + unsigned int cmd, unsigned long arg)
1958 + {
1959 + struct nbd_config *config = nbd->config;
1960 ++ loff_t bytesize;
1961 +
1962 + switch (cmd) {
1963 + case NBD_DISCONNECT:
1964 +@@ -1383,8 +1384,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1965 + case NBD_SET_SIZE:
1966 + return nbd_set_size(nbd, arg, config->blksize);
1967 + case NBD_SET_SIZE_BLOCKS:
1968 +- return nbd_set_size(nbd, arg * config->blksize,
1969 +- config->blksize);
1970 ++ if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
1971 ++ return -EINVAL;
1972 ++ return nbd_set_size(nbd, bytesize, config->blksize);
1973 + case NBD_SET_TIMEOUT:
1974 + nbd_set_cmd_timeout(nbd, arg);
1975 + return 0;
1976 +@@ -1715,7 +1717,17 @@ static int nbd_dev_add(int index)
1977 + refcount_set(&nbd->refs, 1);
1978 + INIT_LIST_HEAD(&nbd->list);
1979 + disk->major = NBD_MAJOR;
1980 ++
1981 ++ /* Too big first_minor can cause duplicate creation of
1982 ++ * sysfs files/links, since first_minor will be truncated to
1983 ++ * byte in __device_add_disk().
1984 ++ */
1985 + disk->first_minor = index << part_shift;
1986 ++ if (disk->first_minor > 0xff) {
1987 ++ err = -EINVAL;
1988 ++ goto out_free_idr;
1989 ++ }
1990 ++
1991 + disk->fops = &nbd_fops;
1992 + disk->private_data = nbd;
1993 + sprintf(disk->disk_name, "nbd%d", index);
1994 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1995 +index 9122f9cc97cbe..ae0cf5e715842 100644
1996 +--- a/drivers/bluetooth/btusb.c
1997 ++++ b/drivers/bluetooth/btusb.c
1998 +@@ -2912,10 +2912,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
1999 + /* Read the Intel supported features and if new exception formats
2000 + * supported, need to load the additional DDC config to enable.
2001 + */
2002 +- btintel_read_debug_features(hdev, &features);
2003 +-
2004 +- /* Set DDC mask for available debug features */
2005 +- btintel_set_debug_features(hdev, &features);
2006 ++ err = btintel_read_debug_features(hdev, &features);
2007 ++ if (!err) {
2008 ++ /* Set DDC mask for available debug features */
2009 ++ btintel_set_debug_features(hdev, &features);
2010 ++ }
2011 +
2012 + /* Read the Intel version information after loading the FW */
2013 + err = btintel_read_version(hdev, &ver);
2014 +@@ -3008,10 +3009,11 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
2015 + /* Read the Intel supported features and if new exception formats
2016 + * supported, need to load the additional DDC config to enable.
2017 + */
2018 +- btintel_read_debug_features(hdev, &features);
2019 +-
2020 +- /* Set DDC mask for available debug features */
2021 +- btintel_set_debug_features(hdev, &features);
2022 ++ err = btintel_read_debug_features(hdev, &features);
2023 ++ if (!err) {
2024 ++ /* Set DDC mask for available debug features */
2025 ++ btintel_set_debug_features(hdev, &features);
2026 ++ }
2027 +
2028 + /* Read the Intel version information after loading the FW */
2029 + err = btintel_read_version_tlv(hdev, &version);
2030 +diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
2031 +index 4308f9ca7a43d..d6ba644f6b00a 100644
2032 +--- a/drivers/char/tpm/Kconfig
2033 ++++ b/drivers/char/tpm/Kconfig
2034 +@@ -89,7 +89,6 @@ config TCG_TIS_SYNQUACER
2035 + config TCG_TIS_I2C_CR50
2036 + tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
2037 + depends on I2C
2038 +- select TCG_CR50
2039 + help
2040 + This is a driver for the Google cr50 I2C TPM interface which is a
2041 + custom microcontroller and requires a custom i2c protocol interface
2042 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
2043 +index 903604769de99..3af4c07a9342f 100644
2044 +--- a/drivers/char/tpm/tpm_ibmvtpm.c
2045 ++++ b/drivers/char/tpm/tpm_ibmvtpm.c
2046 +@@ -106,17 +106,12 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
2047 + {
2048 + struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
2049 + u16 len;
2050 +- int sig;
2051 +
2052 + if (!ibmvtpm->rtce_buf) {
2053 + dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
2054 + return 0;
2055 + }
2056 +
2057 +- sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
2058 +- if (sig)
2059 +- return -EINTR;
2060 +-
2061 + len = ibmvtpm->res_len;
2062 +
2063 + if (count < len) {
2064 +@@ -237,7 +232,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
2065 + * set the processing flag before the Hcall, since we may get the
2066 + * result (interrupt) before even being able to check rc.
2067 + */
2068 +- ibmvtpm->tpm_processing_cmd = true;
2069 ++ ibmvtpm->tpm_processing_cmd = 1;
2070 +
2071 + again:
2072 + rc = ibmvtpm_send_crq(ibmvtpm->vdev,
2073 +@@ -255,7 +250,7 @@ again:
2074 + goto again;
2075 + }
2076 + dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
2077 +- ibmvtpm->tpm_processing_cmd = false;
2078 ++ ibmvtpm->tpm_processing_cmd = 0;
2079 + }
2080 +
2081 + spin_unlock(&ibmvtpm->rtce_lock);
2082 +@@ -269,7 +264,9 @@ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
2083 +
2084 + static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
2085 + {
2086 +- return 0;
2087 ++ struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
2088 ++
2089 ++ return ibmvtpm->tpm_processing_cmd;
2090 + }
2091 +
2092 + /**
2093 +@@ -457,7 +454,7 @@ static const struct tpm_class_ops tpm_ibmvtpm = {
2094 + .send = tpm_ibmvtpm_send,
2095 + .cancel = tpm_ibmvtpm_cancel,
2096 + .status = tpm_ibmvtpm_status,
2097 +- .req_complete_mask = 0,
2098 ++ .req_complete_mask = 1,
2099 + .req_complete_val = 0,
2100 + .req_canceled = tpm_ibmvtpm_req_canceled,
2101 + };
2102 +@@ -550,7 +547,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
2103 + case VTPM_TPM_COMMAND_RES:
2104 + /* len of the data in rtce buffer */
2105 + ibmvtpm->res_len = be16_to_cpu(crq->len);
2106 +- ibmvtpm->tpm_processing_cmd = false;
2107 ++ ibmvtpm->tpm_processing_cmd = 0;
2108 + wake_up_interruptible(&ibmvtpm->wq);
2109 + return;
2110 + default:
2111 +@@ -688,8 +685,15 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
2112 + goto init_irq_cleanup;
2113 + }
2114 +
2115 +- if (!strcmp(id->compat, "IBM,vtpm20")) {
2116 ++
2117 ++ if (!strcmp(id->compat, "IBM,vtpm20"))
2118 + chip->flags |= TPM_CHIP_FLAG_TPM2;
2119 ++
2120 ++ rc = tpm_get_timeouts(chip);
2121 ++ if (rc)
2122 ++ goto init_irq_cleanup;
2123 ++
2124 ++ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
2125 + rc = tpm2_get_cc_attrs_tbl(chip);
2126 + if (rc)
2127 + goto init_irq_cleanup;
2128 +diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
2129 +index b92aa7d3e93e7..51198b137461e 100644
2130 +--- a/drivers/char/tpm/tpm_ibmvtpm.h
2131 ++++ b/drivers/char/tpm/tpm_ibmvtpm.h
2132 +@@ -41,7 +41,7 @@ struct ibmvtpm_dev {
2133 + wait_queue_head_t wq;
2134 + u16 res_len;
2135 + u32 vtpm_version;
2136 +- bool tpm_processing_cmd;
2137 ++ u8 tpm_processing_cmd;
2138 + };
2139 +
2140 + #define CRQ_RES_BUF_SIZE PAGE_SIZE
2141 +diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
2142 +index 47680237d0beb..8bc893df47364 100644
2143 +--- a/drivers/clk/mvebu/kirkwood.c
2144 ++++ b/drivers/clk/mvebu/kirkwood.c
2145 +@@ -265,6 +265,7 @@ static const char *powersave_parents[] = {
2146 + static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
2147 + { "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
2148 + 11, 1, 0 },
2149 ++ { }
2150 + };
2151 +
2152 + static struct clk *clk_muxing_get_src(
2153 +diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
2154 +index d7ed99f0001f8..dd0956ad969c1 100644
2155 +--- a/drivers/clocksource/sh_cmt.c
2156 ++++ b/drivers/clocksource/sh_cmt.c
2157 +@@ -579,7 +579,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
2158 + ch->flags |= flag;
2159 +
2160 + /* setup timeout if no clockevent */
2161 +- if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
2162 ++ if (ch->cmt->num_channels == 1 &&
2163 ++ flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
2164 + __sh_cmt_set_next(ch, ch->max_match_value);
2165 + out:
2166 + raw_spin_unlock_irqrestore(&ch->lock, flags);
2167 +@@ -621,20 +622,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
2168 + static u64 sh_cmt_clocksource_read(struct clocksource *cs)
2169 + {
2170 + struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
2171 +- unsigned long flags;
2172 + u32 has_wrapped;
2173 +- u64 value;
2174 +- u32 raw;
2175 +
2176 +- raw_spin_lock_irqsave(&ch->lock, flags);
2177 +- value = ch->total_cycles;
2178 +- raw = sh_cmt_get_counter(ch, &has_wrapped);
2179 ++ if (ch->cmt->num_channels == 1) {
2180 ++ unsigned long flags;
2181 ++ u64 value;
2182 ++ u32 raw;
2183 +
2184 +- if (unlikely(has_wrapped))
2185 +- raw += ch->match_value + 1;
2186 +- raw_spin_unlock_irqrestore(&ch->lock, flags);
2187 ++ raw_spin_lock_irqsave(&ch->lock, flags);
2188 ++ value = ch->total_cycles;
2189 ++ raw = sh_cmt_get_counter(ch, &has_wrapped);
2190 ++
2191 ++ if (unlikely(has_wrapped))
2192 ++ raw += ch->match_value + 1;
2193 ++ raw_spin_unlock_irqrestore(&ch->lock, flags);
2194 ++
2195 ++ return value + raw;
2196 ++ }
2197 +
2198 +- return value + raw;
2199 ++ return sh_cmt_get_counter(ch, &has_wrapped);
2200 + }
2201 +
2202 + static int sh_cmt_clocksource_enable(struct clocksource *cs)
2203 +@@ -697,7 +703,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
2204 + cs->disable = sh_cmt_clocksource_disable;
2205 + cs->suspend = sh_cmt_clocksource_suspend;
2206 + cs->resume = sh_cmt_clocksource_resume;
2207 +- cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
2208 ++ cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
2209 + cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
2210 +
2211 + dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
2212 +diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
2213 +index 9691f8612be87..8de776f3b142a 100644
2214 +--- a/drivers/counter/104-quad-8.c
2215 ++++ b/drivers/counter/104-quad-8.c
2216 +@@ -715,12 +715,13 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
2217 + case 1:
2218 + case 3:
2219 + quad8_preset_register_set(priv, count->id, ceiling);
2220 +- break;
2221 ++ mutex_unlock(&priv->lock);
2222 ++ return len;
2223 + }
2224 +
2225 + mutex_unlock(&priv->lock);
2226 +
2227 +- return len;
2228 ++ return -EINVAL;
2229 + }
2230 +
2231 + static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,
2232 +diff --git a/drivers/crypto/hisilicon/sec2/sec.h b/drivers/crypto/hisilicon/sec2/sec.h
2233 +index dfdce2f21e658..1aeb53f28eddb 100644
2234 +--- a/drivers/crypto/hisilicon/sec2/sec.h
2235 ++++ b/drivers/crypto/hisilicon/sec2/sec.h
2236 +@@ -140,11 +140,6 @@ struct sec_ctx {
2237 + struct device *dev;
2238 + };
2239 +
2240 +-enum sec_endian {
2241 +- SEC_LE = 0,
2242 +- SEC_32BE,
2243 +- SEC_64BE
2244 +-};
2245 +
2246 + enum sec_debug_file_index {
2247 + SEC_CLEAR_ENABLE,
2248 +diff --git a/drivers/crypto/hisilicon/sec2/sec_main.c b/drivers/crypto/hisilicon/sec2/sec_main.c
2249 +index 6f0062d4408c3..0305e656b4778 100644
2250 +--- a/drivers/crypto/hisilicon/sec2/sec_main.c
2251 ++++ b/drivers/crypto/hisilicon/sec2/sec_main.c
2252 +@@ -304,31 +304,20 @@ static const struct pci_device_id sec_dev_ids[] = {
2253 + };
2254 + MODULE_DEVICE_TABLE(pci, sec_dev_ids);
2255 +
2256 +-static u8 sec_get_endian(struct hisi_qm *qm)
2257 ++static void sec_set_endian(struct hisi_qm *qm)
2258 + {
2259 + u32 reg;
2260 +
2261 +- /*
2262 +- * As for VF, it is a wrong way to get endian setting by
2263 +- * reading a register of the engine
2264 +- */
2265 +- if (qm->pdev->is_virtfn) {
2266 +- dev_err_ratelimited(&qm->pdev->dev,
2267 +- "cannot access a register in VF!\n");
2268 +- return SEC_LE;
2269 +- }
2270 + reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
2271 +- /* BD little endian mode */
2272 +- if (!(reg & BIT(0)))
2273 +- return SEC_LE;
2274 ++ reg &= ~(BIT(1) | BIT(0));
2275 ++ if (!IS_ENABLED(CONFIG_64BIT))
2276 ++ reg |= BIT(1);
2277 +
2278 +- /* BD 32-bits big endian mode */
2279 +- else if (!(reg & BIT(1)))
2280 +- return SEC_32BE;
2281 +
2282 +- /* BD 64-bits big endian mode */
2283 +- else
2284 +- return SEC_64BE;
2285 ++ if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
2286 ++ reg |= BIT(0);
2287 ++
2288 ++ writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
2289 + }
2290 +
2291 + static int sec_engine_init(struct hisi_qm *qm)
2292 +@@ -382,9 +371,7 @@ static int sec_engine_init(struct hisi_qm *qm)
2293 + qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
2294 +
2295 + /* config endian */
2296 +- reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
2297 +- reg |= sec_get_endian(qm);
2298 +- writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
2299 ++ sec_set_endian(qm);
2300 +
2301 + return 0;
2302 + }
2303 +@@ -921,7 +908,8 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2304 + return 0;
2305 +
2306 + err_alg_unregister:
2307 +- hisi_qm_alg_unregister(qm, &sec_devices);
2308 ++ if (qm->qp_num >= ctx_q_num)
2309 ++ hisi_qm_alg_unregister(qm, &sec_devices);
2310 + err_qm_stop:
2311 + sec_debugfs_exit(qm);
2312 + hisi_qm_stop(qm, QM_NORMAL);
2313 +diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
2314 +index d6a7784d29888..f397cc5bf1021 100644
2315 +--- a/drivers/crypto/mxs-dcp.c
2316 ++++ b/drivers/crypto/mxs-dcp.c
2317 +@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
2318 +
2319 + static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
2320 + {
2321 ++ int dma_err;
2322 + struct dcp *sdcp = global_sdcp;
2323 + const int chan = actx->chan;
2324 + uint32_t stat;
2325 + unsigned long ret;
2326 + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
2327 +-
2328 + dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
2329 + DMA_TO_DEVICE);
2330 +
2331 ++ dma_err = dma_mapping_error(sdcp->dev, desc_phys);
2332 ++ if (dma_err)
2333 ++ return dma_err;
2334 ++
2335 + reinit_completion(&sdcp->completion[chan]);
2336 +
2337 + /* Clear status register. */
2338 +@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
2339 + static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
2340 + struct skcipher_request *req, int init)
2341 + {
2342 ++ dma_addr_t key_phys, src_phys, dst_phys;
2343 + struct dcp *sdcp = global_sdcp;
2344 + struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
2345 + struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
2346 + int ret;
2347 +
2348 +- dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2349 +- 2 * AES_KEYSIZE_128,
2350 +- DMA_TO_DEVICE);
2351 +- dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
2352 +- DCP_BUF_SZ, DMA_TO_DEVICE);
2353 +- dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
2354 +- DCP_BUF_SZ, DMA_FROM_DEVICE);
2355 ++ key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2356 ++ 2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
2357 ++ ret = dma_mapping_error(sdcp->dev, key_phys);
2358 ++ if (ret)
2359 ++ return ret;
2360 ++
2361 ++ src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
2362 ++ DCP_BUF_SZ, DMA_TO_DEVICE);
2363 ++ ret = dma_mapping_error(sdcp->dev, src_phys);
2364 ++ if (ret)
2365 ++ goto err_src;
2366 ++
2367 ++ dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
2368 ++ DCP_BUF_SZ, DMA_FROM_DEVICE);
2369 ++ ret = dma_mapping_error(sdcp->dev, dst_phys);
2370 ++ if (ret)
2371 ++ goto err_dst;
2372 +
2373 + if (actx->fill % AES_BLOCK_SIZE) {
2374 + dev_err(sdcp->dev, "Invalid block size!\n");
2375 +@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
2376 + ret = mxs_dcp_start_dma(actx);
2377 +
2378 + aes_done_run:
2379 ++ dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
2380 ++err_dst:
2381 ++ dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
2382 ++err_src:
2383 + dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
2384 + DMA_TO_DEVICE);
2385 +- dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
2386 +- dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
2387 +
2388 + return ret;
2389 + }
2390 +@@ -557,6 +574,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
2391 + dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
2392 + DCP_BUF_SZ, DMA_TO_DEVICE);
2393 +
2394 ++ ret = dma_mapping_error(sdcp->dev, buf_phys);
2395 ++ if (ret)
2396 ++ return ret;
2397 ++
2398 + /* Fill in the DMA descriptor. */
2399 + desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
2400 + MXS_DCP_CONTROL0_INTERRUPT |
2401 +@@ -589,6 +610,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
2402 + if (rctx->fini) {
2403 + digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
2404 + DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
2405 ++ ret = dma_mapping_error(sdcp->dev, digest_phys);
2406 ++ if (ret)
2407 ++ goto done_run;
2408 ++
2409 + desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
2410 + desc->payload = digest_phys;
2411 + }
2412 +diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
2413 +index 0dd4c6b157de9..9b968ac4ee7b6 100644
2414 +--- a/drivers/crypto/omap-aes.c
2415 ++++ b/drivers/crypto/omap-aes.c
2416 +@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
2417 + spin_lock_init(&dd->lock);
2418 +
2419 + INIT_LIST_HEAD(&dd->list);
2420 +- spin_lock(&list_lock);
2421 ++ spin_lock_bh(&list_lock);
2422 + list_add_tail(&dd->list, &dev_list);
2423 +- spin_unlock(&list_lock);
2424 ++ spin_unlock_bh(&list_lock);
2425 +
2426 + /* Initialize crypto engine */
2427 + dd->engine = crypto_engine_alloc_init(dev, 1);
2428 +@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
2429 + if (!dd)
2430 + return -ENODEV;
2431 +
2432 +- spin_lock(&list_lock);
2433 ++ spin_lock_bh(&list_lock);
2434 + list_del(&dd->list);
2435 +- spin_unlock(&list_lock);
2436 ++ spin_unlock_bh(&list_lock);
2437 +
2438 + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2439 + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2440 +diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
2441 +index c9d38bcfd1c77..7fdf38e07adf8 100644
2442 +--- a/drivers/crypto/omap-des.c
2443 ++++ b/drivers/crypto/omap-des.c
2444 +@@ -1035,9 +1035,9 @@ static int omap_des_probe(struct platform_device *pdev)
2445 +
2446 +
2447 + INIT_LIST_HEAD(&dd->list);
2448 +- spin_lock(&list_lock);
2449 ++ spin_lock_bh(&list_lock);
2450 + list_add_tail(&dd->list, &dev_list);
2451 +- spin_unlock(&list_lock);
2452 ++ spin_unlock_bh(&list_lock);
2453 +
2454 + /* Initialize des crypto engine */
2455 + dd->engine = crypto_engine_alloc_init(dev, 1);
2456 +@@ -1096,9 +1096,9 @@ static int omap_des_remove(struct platform_device *pdev)
2457 + if (!dd)
2458 + return -ENODEV;
2459 +
2460 +- spin_lock(&list_lock);
2461 ++ spin_lock_bh(&list_lock);
2462 + list_del(&dd->list);
2463 +- spin_unlock(&list_lock);
2464 ++ spin_unlock_bh(&list_lock);
2465 +
2466 + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2467 + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
2468 +diff --git a/drivers/crypto/omap-sham.c b/drivers/crypto/omap-sham.c
2469 +index dd53ad9987b0d..63beea7cdba5e 100644
2470 +--- a/drivers/crypto/omap-sham.c
2471 ++++ b/drivers/crypto/omap-sham.c
2472 +@@ -1736,7 +1736,7 @@ static void omap_sham_done_task(unsigned long data)
2473 + if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
2474 + goto finish;
2475 + } else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
2476 +- if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
2477 ++ if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
2478 + omap_sham_update_dma_stop(dd);
2479 + if (dd->err) {
2480 + err = dd->err;
2481 +@@ -2144,9 +2144,9 @@ static int omap_sham_probe(struct platform_device *pdev)
2482 + (rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
2483 + (rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
2484 +
2485 +- spin_lock(&sham.lock);
2486 ++ spin_lock_bh(&sham.lock);
2487 + list_add_tail(&dd->list, &sham.dev_list);
2488 +- spin_unlock(&sham.lock);
2489 ++ spin_unlock_bh(&sham.lock);
2490 +
2491 + dd->engine = crypto_engine_alloc_init(dev, 1);
2492 + if (!dd->engine) {
2493 +@@ -2194,9 +2194,9 @@ err_algs:
2494 + err_engine_start:
2495 + crypto_engine_exit(dd->engine);
2496 + err_engine:
2497 +- spin_lock(&sham.lock);
2498 ++ spin_lock_bh(&sham.lock);
2499 + list_del(&dd->list);
2500 +- spin_unlock(&sham.lock);
2501 ++ spin_unlock_bh(&sham.lock);
2502 + err_pm:
2503 + pm_runtime_disable(dev);
2504 + if (!dd->polling_mode)
2505 +@@ -2215,9 +2215,9 @@ static int omap_sham_remove(struct platform_device *pdev)
2506 + dd = platform_get_drvdata(pdev);
2507 + if (!dd)
2508 + return -ENODEV;
2509 +- spin_lock(&sham.lock);
2510 ++ spin_lock_bh(&sham.lock);
2511 + list_del(&dd->list);
2512 +- spin_unlock(&sham.lock);
2513 ++ spin_unlock_bh(&sham.lock);
2514 + for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
2515 + for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
2516 + crypto_unregister_ahash(
2517 +diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
2518 +index 15f6b9bdfb221..ddf42fb326251 100644
2519 +--- a/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
2520 ++++ b/drivers/crypto/qat/qat_c3xxxvf/adf_c3xxxvf_hw_data.c
2521 +@@ -81,10 +81,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
2522 + hw_data->enable_error_correction = adf_vf_void_noop;
2523 + hw_data->init_admin_comms = adf_vf_int_noop;
2524 + hw_data->exit_admin_comms = adf_vf_void_noop;
2525 +- hw_data->send_admin_init = adf_vf2pf_init;
2526 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
2527 + hw_data->init_arb = adf_vf_int_noop;
2528 + hw_data->exit_arb = adf_vf_void_noop;
2529 +- hw_data->disable_iov = adf_vf2pf_shutdown;
2530 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
2531 + hw_data->get_accel_mask = get_accel_mask;
2532 + hw_data->get_ae_mask = get_ae_mask;
2533 + hw_data->get_num_accels = get_num_accels;
2534 +diff --git a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
2535 +index d231583428c91..7e202ef925231 100644
2536 +--- a/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
2537 ++++ b/drivers/crypto/qat/qat_c62xvf/adf_c62xvf_hw_data.c
2538 +@@ -81,10 +81,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
2539 + hw_data->enable_error_correction = adf_vf_void_noop;
2540 + hw_data->init_admin_comms = adf_vf_int_noop;
2541 + hw_data->exit_admin_comms = adf_vf_void_noop;
2542 +- hw_data->send_admin_init = adf_vf2pf_init;
2543 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
2544 + hw_data->init_arb = adf_vf_int_noop;
2545 + hw_data->exit_arb = adf_vf_void_noop;
2546 +- hw_data->disable_iov = adf_vf2pf_shutdown;
2547 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
2548 + hw_data->get_accel_mask = get_accel_mask;
2549 + hw_data->get_ae_mask = get_ae_mask;
2550 + hw_data->get_num_accels = get_num_accels;
2551 +diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
2552 +index c61476553728d..dd4a811b7e89f 100644
2553 +--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
2554 ++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
2555 +@@ -198,8 +198,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
2556 + void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
2557 + void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
2558 +
2559 +-int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
2560 +-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
2561 ++int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
2562 ++void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
2563 + int adf_init_pf_wq(void);
2564 + void adf_exit_pf_wq(void);
2565 + int adf_init_vf_wq(void);
2566 +@@ -222,12 +222,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
2567 + {
2568 + }
2569 +
2570 +-static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
2571 ++static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
2572 + {
2573 + return 0;
2574 + }
2575 +
2576 +-static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
2577 ++static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
2578 + {
2579 + }
2580 +
2581 +diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
2582 +index 744c40351428d..02864985dbb04 100644
2583 +--- a/drivers/crypto/qat/qat_common/adf_init.c
2584 ++++ b/drivers/crypto/qat/qat_common/adf_init.c
2585 +@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
2586 + struct service_hndl *service;
2587 + struct list_head *list_itr;
2588 + struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2589 ++ int ret;
2590 +
2591 + if (!hw_data) {
2592 + dev_err(&GET_DEV(accel_dev),
2593 +@@ -127,9 +128,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
2594 + }
2595 +
2596 + hw_data->enable_error_correction(accel_dev);
2597 +- hw_data->enable_vf2pf_comms(accel_dev);
2598 ++ ret = hw_data->enable_vf2pf_comms(accel_dev);
2599 +
2600 +- return 0;
2601 ++ return ret;
2602 + }
2603 + EXPORT_SYMBOL_GPL(adf_dev_init);
2604 +
2605 +diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
2606 +index e3ad5587be49e..daab02011717d 100644
2607 +--- a/drivers/crypto/qat/qat_common/adf_isr.c
2608 ++++ b/drivers/crypto/qat/qat_common/adf_isr.c
2609 +@@ -15,6 +15,8 @@
2610 + #include "adf_transport_access_macros.h"
2611 + #include "adf_transport_internal.h"
2612 +
2613 ++#define ADF_MAX_NUM_VFS 32
2614 ++
2615 + static int adf_enable_msix(struct adf_accel_dev *accel_dev)
2616 + {
2617 + struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
2618 +@@ -72,7 +74,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
2619 + struct adf_bar *pmisc =
2620 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
2621 + void __iomem *pmisc_bar_addr = pmisc->virt_addr;
2622 +- u32 vf_mask;
2623 ++ unsigned long vf_mask;
2624 +
2625 + /* Get the interrupt sources triggered by VFs */
2626 + vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
2627 +@@ -93,8 +95,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
2628 + * unless the VF is malicious and is attempting to
2629 + * flood the host OS with VF2PF interrupts.
2630 + */
2631 +- for_each_set_bit(i, (const unsigned long *)&vf_mask,
2632 +- (sizeof(vf_mask) * BITS_PER_BYTE)) {
2633 ++ for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
2634 + vf_info = accel_dev->pf.vf_info + i;
2635 +
2636 + if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {
2637 +diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
2638 +index a1b77bd7a8944..efa4bffb4f601 100644
2639 +--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
2640 ++++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
2641 +@@ -186,7 +186,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
2642 +
2643 + return ret;
2644 + }
2645 +-EXPORT_SYMBOL_GPL(adf_iov_putmsg);
2646 +
2647 + void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
2648 + {
2649 +@@ -316,6 +315,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
2650 + msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
2651 + BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
2652 +
2653 ++ reinit_completion(&accel_dev->vf.iov_msg_completion);
2654 ++
2655 + /* Send request from VF to PF */
2656 + ret = adf_iov_putmsg(accel_dev, msg, 0);
2657 + if (ret) {
2658 +diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
2659 +index e85bd62d134a4..3e25fac051b25 100644
2660 +--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
2661 ++++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
2662 +@@ -5,14 +5,14 @@
2663 + #include "adf_pf2vf_msg.h"
2664 +
2665 + /**
2666 +- * adf_vf2pf_init() - send init msg to PF
2667 ++ * adf_vf2pf_notify_init() - send init msg to PF
2668 + * @accel_dev: Pointer to acceleration VF device.
2669 + *
2670 + * Function sends an init message from the VF to a PF
2671 + *
2672 + * Return: 0 on success, error code otherwise.
2673 + */
2674 +-int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
2675 ++int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
2676 + {
2677 + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
2678 + (ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
2679 +@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
2680 + set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
2681 + return 0;
2682 + }
2683 +-EXPORT_SYMBOL_GPL(adf_vf2pf_init);
2684 ++EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
2685 +
2686 + /**
2687 +- * adf_vf2pf_shutdown() - send shutdown msg to PF
2688 ++ * adf_vf2pf_notify_shutdown() - send shutdown msg to PF
2689 + * @accel_dev: Pointer to acceleration VF device.
2690 + *
2691 + * Function sends a shutdown message from the VF to a PF
2692 + *
2693 + * Return: void
2694 + */
2695 +-void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
2696 ++void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
2697 + {
2698 + u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
2699 + (ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
2700 +@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
2701 + dev_err(&GET_DEV(accel_dev),
2702 + "Failed to send Shutdown event to PF\n");
2703 + }
2704 +-EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
2705 ++EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);
2706 +diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
2707 +index 888388acb6bd3..3e4f64d248f9b 100644
2708 +--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
2709 ++++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
2710 +@@ -160,6 +160,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
2711 + struct adf_bar *pmisc =
2712 + &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
2713 + void __iomem *pmisc_bar_addr = pmisc->virt_addr;
2714 ++ bool handled = false;
2715 + u32 v_int;
2716 +
2717 + /* Read VF INT source CSR to determine the source of VF interrupt */
2718 +@@ -172,7 +173,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
2719 +
2720 + /* Schedule tasklet to handle interrupt BH */
2721 + tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
2722 +- return IRQ_HANDLED;
2723 ++ handled = true;
2724 + }
2725 +
2726 + /* Check bundle interrupt */
2727 +@@ -184,10 +185,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
2728 + csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
2729 + bank->bank_number, 0);
2730 + tasklet_hi_schedule(&bank->resp_handler);
2731 +- return IRQ_HANDLED;
2732 ++ handled = true;
2733 + }
2734 +
2735 +- return IRQ_NONE;
2736 ++ return handled ? IRQ_HANDLED : IRQ_NONE;
2737 + }
2738 +
2739 + static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
2740 +diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
2741 +index f14fb82ed6dfc..744734caaf7b7 100644
2742 +--- a/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
2743 ++++ b/drivers/crypto/qat/qat_dh895xccvf/adf_dh895xccvf_hw_data.c
2744 +@@ -81,10 +81,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
2745 + hw_data->enable_error_correction = adf_vf_void_noop;
2746 + hw_data->init_admin_comms = adf_vf_int_noop;
2747 + hw_data->exit_admin_comms = adf_vf_void_noop;
2748 +- hw_data->send_admin_init = adf_vf2pf_init;
2749 ++ hw_data->send_admin_init = adf_vf2pf_notify_init;
2750 + hw_data->init_arb = adf_vf_int_noop;
2751 + hw_data->exit_arb = adf_vf_void_noop;
2752 +- hw_data->disable_iov = adf_vf2pf_shutdown;
2753 ++ hw_data->disable_iov = adf_vf2pf_notify_shutdown;
2754 + hw_data->get_accel_mask = get_accel_mask;
2755 + hw_data->get_ae_mask = get_ae_mask;
2756 + hw_data->get_num_accels = get_num_accels;
2757 +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
2758 +index 37b4e875420e4..1cea5d8fa4349 100644
2759 +--- a/drivers/edac/i10nm_base.c
2760 ++++ b/drivers/edac/i10nm_base.c
2761 +@@ -26,8 +26,8 @@
2762 + pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
2763 + #define I10NM_GET_DIMMMTR(m, i, j) \
2764 + readl((m)->mbase + 0x2080c + (i) * (m)->chan_mmio_sz + (j) * 4)
2765 +-#define I10NM_GET_MCDDRTCFG(m, i, j) \
2766 +- readl((m)->mbase + 0x20970 + (i) * (m)->chan_mmio_sz + (j) * 4)
2767 ++#define I10NM_GET_MCDDRTCFG(m, i) \
2768 ++ readl((m)->mbase + 0x20970 + (i) * (m)->chan_mmio_sz)
2769 + #define I10NM_GET_MCMTR(m, i) \
2770 + readl((m)->mbase + 0x20ef8 + (i) * (m)->chan_mmio_sz)
2771 + #define I10NM_GET_AMAP(m, i) \
2772 +@@ -185,10 +185,10 @@ static int i10nm_get_dimm_config(struct mem_ctl_info *mci,
2773 +
2774 + ndimms = 0;
2775 + amap = I10NM_GET_AMAP(imc, i);
2776 ++ mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
2777 + for (j = 0; j < I10NM_NUM_DIMMS; j++) {
2778 + dimm = edac_get_dimm(mci, i, j, 0);
2779 + mtr = I10NM_GET_DIMMMTR(imc, i, j);
2780 +- mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i, j);
2781 + edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
2782 + mtr, mcddrtcfg, imc->mc, i, j);
2783 +
2784 +diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
2785 +index 5dd905a3f30ca..1a1629166aa30 100644
2786 +--- a/drivers/edac/mce_amd.c
2787 ++++ b/drivers/edac/mce_amd.c
2788 +@@ -1176,6 +1176,9 @@ static int __init mce_amd_init(void)
2789 + c->x86_vendor != X86_VENDOR_HYGON)
2790 + return -ENODEV;
2791 +
2792 ++ if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
2793 ++ return -ENODEV;
2794 ++
2795 + if (boot_cpu_has(X86_FEATURE_SMCA)) {
2796 + xec_mask = 0x3f;
2797 + goto out;
2798 +diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
2799 +index 250e016807422..4b8978b254f9a 100644
2800 +--- a/drivers/firmware/raspberrypi.c
2801 ++++ b/drivers/firmware/raspberrypi.c
2802 +@@ -329,12 +329,18 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node)
2803 +
2804 + fw = platform_get_drvdata(pdev);
2805 + if (!fw)
2806 +- return NULL;
2807 ++ goto err_put_device;
2808 +
2809 + if (!kref_get_unless_zero(&fw->consumers))
2810 +- return NULL;
2811 ++ goto err_put_device;
2812 ++
2813 ++ put_device(&pdev->dev);
2814 +
2815 + return fw;
2816 ++
2817 ++err_put_device:
2818 ++ put_device(&pdev->dev);
2819 ++ return NULL;
2820 + }
2821 + EXPORT_SYMBOL_GPL(rpi_firmware_get);
2822 +
2823 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
2824 +index b8655ff73a658..cc9c9f8b23b2c 100644
2825 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
2826 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
2827 +@@ -160,17 +160,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
2828 + return 0;
2829 + }
2830 +
2831 +-static struct device *get_mfd_cell_dev(const char *device_name, int r)
2832 ++static int acp_genpd_add_device(struct device *dev, void *data)
2833 + {
2834 +- char auto_dev_name[25];
2835 +- struct device *dev;
2836 ++ struct generic_pm_domain *gpd = data;
2837 ++ int ret;
2838 +
2839 +- snprintf(auto_dev_name, sizeof(auto_dev_name),
2840 +- "%s.%d.auto", device_name, r);
2841 +- dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
2842 +- dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
2843 ++ ret = pm_genpd_add_device(gpd, dev);
2844 ++ if (ret)
2845 ++ dev_err(dev, "Failed to add dev to genpd %d\n", ret);
2846 +
2847 +- return dev;
2848 ++ return ret;
2849 ++}
2850 ++
2851 ++static int acp_genpd_remove_device(struct device *dev, void *data)
2852 ++{
2853 ++ int ret;
2854 ++
2855 ++ ret = pm_genpd_remove_device(dev);
2856 ++ if (ret)
2857 ++ dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
2858 ++
2859 ++ /* Continue to remove */
2860 ++ return 0;
2861 + }
2862 +
2863 + /**
2864 +@@ -181,11 +192,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
2865 + */
2866 + static int acp_hw_init(void *handle)
2867 + {
2868 +- int r, i;
2869 ++ int r;
2870 + uint64_t acp_base;
2871 + u32 val = 0;
2872 + u32 count = 0;
2873 +- struct device *dev;
2874 + struct i2s_platform_data *i2s_pdata = NULL;
2875 +
2876 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2877 +@@ -341,15 +351,10 @@ static int acp_hw_init(void *handle)
2878 + if (r)
2879 + goto failure;
2880 +
2881 +- for (i = 0; i < ACP_DEVS ; i++) {
2882 +- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
2883 +- r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
2884 +- if (r) {
2885 +- dev_err(dev, "Failed to add dev to genpd\n");
2886 +- goto failure;
2887 +- }
2888 +- }
2889 +-
2890 ++ r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
2891 ++ acp_genpd_add_device);
2892 ++ if (r)
2893 ++ goto failure;
2894 +
2895 + /* Assert Soft reset of ACP */
2896 + val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
2897 +@@ -410,10 +415,8 @@ failure:
2898 + */
2899 + static int acp_hw_fini(void *handle)
2900 + {
2901 +- int i, ret;
2902 + u32 val = 0;
2903 + u32 count = 0;
2904 +- struct device *dev;
2905 + struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2906 +
2907 + /* return early if no ACP */
2908 +@@ -458,13 +461,8 @@ static int acp_hw_fini(void *handle)
2909 + udelay(100);
2910 + }
2911 +
2912 +- for (i = 0; i < ACP_DEVS ; i++) {
2913 +- dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
2914 +- ret = pm_genpd_remove_device(dev);
2915 +- /* If removal fails, dont giveup and try rest */
2916 +- if (ret)
2917 +- dev_err(dev, "remove dev from genpd failed\n");
2918 +- }
2919 ++ device_for_each_child(adev->acp.parent, NULL,
2920 ++ acp_genpd_remove_device);
2921 +
2922 + mfd_remove_devices(adev->acp.parent);
2923 + kfree(adev->acp.acp_res);
2924 +diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
2925 +index dc7d2e71aa6fd..5d1743f3321ef 100644
2926 +--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
2927 ++++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
2928 +@@ -104,8 +104,8 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
2929 +
2930 + ret = smu_cmn_wait_for_response(smu);
2931 + if (ret != 0x1) {
2932 +- dev_err(adev->dev, "Msg issuing pre-check failed and "
2933 +- "SMU may be not in the right state!\n");
2934 ++ dev_err(adev->dev, "Msg issuing pre-check failed(0x%x) and "
2935 ++ "SMU may be not in the right state!\n", ret);
2936 + if (ret != -ETIME)
2937 + ret = -EIO;
2938 + return ret;
2939 +diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
2940 +index ca04c34e82518..997b8827fed27 100644
2941 +--- a/drivers/gpu/drm/drm_of.c
2942 ++++ b/drivers/gpu/drm/drm_of.c
2943 +@@ -315,7 +315,7 @@ static int drm_of_lvds_get_remote_pixels_type(
2944 +
2945 + remote_port = of_graph_get_remote_port(endpoint);
2946 + if (!remote_port) {
2947 +- of_node_put(remote_port);
2948 ++ of_node_put(endpoint);
2949 + return -EPIPE;
2950 + }
2951 +
2952 +@@ -331,8 +331,10 @@ static int drm_of_lvds_get_remote_pixels_type(
2953 + * configurations by passing the endpoints explicitly to
2954 + * drm_of_lvds_get_dual_link_pixel_order().
2955 + */
2956 +- if (!current_pt || pixels_type != current_pt)
2957 ++ if (!current_pt || pixels_type != current_pt) {
2958 ++ of_node_put(endpoint);
2959 + return -EINVAL;
2960 ++ }
2961 + }
2962 +
2963 + return pixels_type;
2964 +diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
2965 +index 432bdcc57ac9e..a1332878857b2 100644
2966 +--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
2967 ++++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
2968 +@@ -117,7 +117,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
2969 + continue;
2970 + }
2971 +
2972 +- if (!connector) {
2973 ++ if (list_entry_is_head(connector, &mode_config->connector_list, head)) {
2974 + DRM_ERROR("Couldn't find connector when setting mode");
2975 + gma_power_end(dev);
2976 + return;
2977 +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
2978 +index 2d4645e01ebf6..e01135b7a404f 100644
2979 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
2980 ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
2981 +@@ -345,10 +345,12 @@ static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
2982 + int i;
2983 +
2984 + for (i = 0; i < ctx->mixer_count; i++) {
2985 +- DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
2986 +- DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
2987 +- DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
2988 +- DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
2989 ++ enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
2990 ++
2991 ++ DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
2992 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
2993 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
2994 ++ DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
2995 + }
2996 +
2997 + DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
2998 +diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
2999 +index 4a5b518288b06..0712752742f4f 100644
3000 +--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
3001 ++++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
3002 +@@ -19,30 +19,12 @@ static int mdp4_hw_init(struct msm_kms *kms)
3003 + {
3004 + struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
3005 + struct drm_device *dev = mdp4_kms->dev;
3006 +- uint32_t version, major, minor, dmap_cfg, vg_cfg;
3007 ++ u32 dmap_cfg, vg_cfg;
3008 + unsigned long clk;
3009 + int ret = 0;
3010 +
3011 + pm_runtime_get_sync(dev->dev);
3012 +
3013 +- mdp4_enable(mdp4_kms);
3014 +- version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
3015 +- mdp4_disable(mdp4_kms);
3016 +-
3017 +- major = FIELD(version, MDP4_VERSION_MAJOR);
3018 +- minor = FIELD(version, MDP4_VERSION_MINOR);
3019 +-
3020 +- DBG("found MDP4 version v%d.%d", major, minor);
3021 +-
3022 +- if (major != 4) {
3023 +- DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
3024 +- major, minor);
3025 +- ret = -ENXIO;
3026 +- goto out;
3027 +- }
3028 +-
3029 +- mdp4_kms->rev = minor;
3030 +-
3031 + if (mdp4_kms->rev > 1) {
3032 + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
3033 + mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
3034 +@@ -88,7 +70,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
3035 + if (mdp4_kms->rev > 1)
3036 + mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
3037 +
3038 +-out:
3039 + pm_runtime_put_sync(dev->dev);
3040 +
3041 + return ret;
3042 +@@ -411,6 +392,22 @@ fail:
3043 + return ret;
3044 + }
3045 +
3046 ++static void read_mdp_hw_revision(struct mdp4_kms *mdp4_kms,
3047 ++ u32 *major, u32 *minor)
3048 ++{
3049 ++ struct drm_device *dev = mdp4_kms->dev;
3050 ++ u32 version;
3051 ++
3052 ++ mdp4_enable(mdp4_kms);
3053 ++ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
3054 ++ mdp4_disable(mdp4_kms);
3055 ++
3056 ++ *major = FIELD(version, MDP4_VERSION_MAJOR);
3057 ++ *minor = FIELD(version, MDP4_VERSION_MINOR);
3058 ++
3059 ++ DRM_DEV_INFO(dev->dev, "MDP4 version v%d.%d", *major, *minor);
3060 ++}
3061 ++
3062 + struct msm_kms *mdp4_kms_init(struct drm_device *dev)
3063 + {
3064 + struct platform_device *pdev = to_platform_device(dev->dev);
3065 +@@ -419,6 +416,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
3066 + struct msm_kms *kms = NULL;
3067 + struct msm_gem_address_space *aspace;
3068 + int irq, ret;
3069 ++ u32 major, minor;
3070 +
3071 + mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
3072 + if (!mdp4_kms) {
3073 +@@ -479,15 +477,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
3074 + if (IS_ERR(mdp4_kms->pclk))
3075 + mdp4_kms->pclk = NULL;
3076 +
3077 +- if (mdp4_kms->rev >= 2) {
3078 +- mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
3079 +- if (IS_ERR(mdp4_kms->lut_clk)) {
3080 +- DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
3081 +- ret = PTR_ERR(mdp4_kms->lut_clk);
3082 +- goto fail;
3083 +- }
3084 +- }
3085 +-
3086 + mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
3087 + if (IS_ERR(mdp4_kms->axi_clk)) {
3088 + DRM_DEV_ERROR(dev->dev, "failed to get axi_clk\n");
3089 +@@ -496,8 +485,27 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
3090 + }
3091 +
3092 + clk_set_rate(mdp4_kms->clk, config->max_clk);
3093 +- if (mdp4_kms->lut_clk)
3094 ++
3095 ++ read_mdp_hw_revision(mdp4_kms, &major, &minor);
3096 ++
3097 ++ if (major != 4) {
3098 ++ DRM_DEV_ERROR(dev->dev, "unexpected MDP version: v%d.%d\n",
3099 ++ major, minor);
3100 ++ ret = -ENXIO;
3101 ++ goto fail;
3102 ++ }
3103 ++
3104 ++ mdp4_kms->rev = minor;
3105 ++
3106 ++ if (mdp4_kms->rev >= 2) {
3107 ++ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
3108 ++ if (IS_ERR(mdp4_kms->lut_clk)) {
3109 ++ DRM_DEV_ERROR(dev->dev, "failed to get lut_clk\n");
3110 ++ ret = PTR_ERR(mdp4_kms->lut_clk);
3111 ++ goto fail;
3112 ++ }
3113 + clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
3114 ++ }
3115 +
3116 + pm_runtime_enable(dev->dev);
3117 + mdp4_kms->rpm_enabled = true;
3118 +diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
3119 +index cdec0a367a2cb..2b1e127390e4e 100644
3120 +--- a/drivers/gpu/drm/msm/dp/dp_display.c
3121 ++++ b/drivers/gpu/drm/msm/dp/dp_display.c
3122 +@@ -55,7 +55,6 @@ enum {
3123 + EV_HPD_INIT_SETUP,
3124 + EV_HPD_PLUG_INT,
3125 + EV_IRQ_HPD_INT,
3126 +- EV_HPD_REPLUG_INT,
3127 + EV_HPD_UNPLUG_INT,
3128 + EV_USER_NOTIFICATION,
3129 + EV_CONNECT_PENDING_TIMEOUT,
3130 +@@ -1119,9 +1118,6 @@ static int hpd_event_thread(void *data)
3131 + case EV_IRQ_HPD_INT:
3132 + dp_irq_hpd_handle(dp_priv, todo->data);
3133 + break;
3134 +- case EV_HPD_REPLUG_INT:
3135 +- /* do nothing */
3136 +- break;
3137 + case EV_USER_NOTIFICATION:
3138 + dp_display_send_hpd_notification(dp_priv,
3139 + todo->data);
3140 +@@ -1165,10 +1161,8 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
3141 +
3142 + if (hpd_isr_status & 0x0F) {
3143 + /* hpd related interrupts */
3144 +- if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
3145 +- hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
3146 ++ if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
3147 + dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
3148 +- }
3149 +
3150 + if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
3151 + /* stop sentinel connect pending checking */
3152 +@@ -1176,8 +1170,10 @@ static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
3153 + dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
3154 + }
3155 +
3156 +- if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
3157 +- dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
3158 ++ if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
3159 ++ dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
3160 ++ dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
3161 ++ }
3162 +
3163 + if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
3164 + dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
3165 +@@ -1286,7 +1282,7 @@ static int dp_pm_resume(struct device *dev)
3166 + struct platform_device *pdev = to_platform_device(dev);
3167 + struct msm_dp *dp_display = platform_get_drvdata(pdev);
3168 + struct dp_display_private *dp;
3169 +- u32 status;
3170 ++ int sink_count = 0;
3171 +
3172 + dp = container_of(dp_display, struct dp_display_private, dp_display);
3173 +
3174 +@@ -1300,14 +1296,25 @@ static int dp_pm_resume(struct device *dev)
3175 +
3176 + dp_catalog_ctrl_hpd_config(dp->catalog);
3177 +
3178 +- status = dp_catalog_link_is_connected(dp->catalog);
3179 ++ /*
3180 ++ * set sink to normal operation mode -- D0
3181 ++ * before dpcd read
3182 ++ */
3183 ++ dp_link_psm_config(dp->link, &dp->panel->link_info, false);
3184 ++
3185 ++ if (dp_catalog_link_is_connected(dp->catalog)) {
3186 ++ sink_count = drm_dp_read_sink_count(dp->aux);
3187 ++ if (sink_count < 0)
3188 ++ sink_count = 0;
3189 ++ }
3190 +
3191 ++ dp->link->sink_count = sink_count;
3192 + /*
3193 + * can not declared display is connected unless
3194 + * HDMI cable is plugged in and sink_count of
3195 + * dongle become 1
3196 + */
3197 +- if (status && dp->link->sink_count)
3198 ++ if (dp->link->sink_count)
3199 + dp->dp_display.is_connected = true;
3200 + else
3201 + dp->dp_display.is_connected = false;
3202 +diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
3203 +index 627048851d99c..7e364b9c9f9e1 100644
3204 +--- a/drivers/gpu/drm/msm/dsi/dsi.c
3205 ++++ b/drivers/gpu/drm/msm/dsi/dsi.c
3206 +@@ -26,8 +26,10 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
3207 + }
3208 +
3209 + phy_pdev = of_find_device_by_node(phy_node);
3210 +- if (phy_pdev)
3211 ++ if (phy_pdev) {
3212 + msm_dsi->phy = platform_get_drvdata(phy_pdev);
3213 ++ msm_dsi->phy_dev = &phy_pdev->dev;
3214 ++ }
3215 +
3216 + of_node_put(phy_node);
3217 +
3218 +@@ -36,8 +38,6 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
3219 + return -EPROBE_DEFER;
3220 + }
3221 +
3222 +- msm_dsi->phy_dev = get_device(&phy_pdev->dev);
3223 +-
3224 + return 0;
3225 + }
3226 +
3227 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
3228 +index 6da93551e2e5f..c277d3f61a5ef 100644
3229 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
3230 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
3231 +@@ -51,6 +51,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
3232 + .hs_wdth_mask = 0xff,
3233 + .hs_wdth_shift = 24,
3234 + .has_overlay = false,
3235 ++ .has_ctrl2 = false,
3236 + },
3237 + [MXSFB_V4] = {
3238 + .transfer_count = LCDC_V4_TRANSFER_COUNT,
3239 +@@ -59,6 +60,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
3240 + .hs_wdth_mask = 0x3fff,
3241 + .hs_wdth_shift = 18,
3242 + .has_overlay = false,
3243 ++ .has_ctrl2 = true,
3244 + },
3245 + [MXSFB_V6] = {
3246 + .transfer_count = LCDC_V4_TRANSFER_COUNT,
3247 +@@ -67,6 +69,7 @@ static const struct mxsfb_devdata mxsfb_devdata[] = {
3248 + .hs_wdth_mask = 0x3fff,
3249 + .hs_wdth_shift = 18,
3250 + .has_overlay = true,
3251 ++ .has_ctrl2 = true,
3252 + },
3253 + };
3254 +
3255 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
3256 +index 399d23e91ed10..7c720e226fdfd 100644
3257 +--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h
3258 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h
3259 +@@ -22,6 +22,7 @@ struct mxsfb_devdata {
3260 + unsigned int hs_wdth_mask;
3261 + unsigned int hs_wdth_shift;
3262 + bool has_overlay;
3263 ++ bool has_ctrl2;
3264 + };
3265 +
3266 + struct mxsfb_drm_private {
3267 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
3268 +index 300e7bab0f431..54f905ac75c07 100644
3269 +--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
3270 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
3271 +@@ -107,6 +107,14 @@ static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
3272 + clk_prepare_enable(mxsfb->clk_disp_axi);
3273 + clk_prepare_enable(mxsfb->clk);
3274 +
3275 ++ /* Increase number of outstanding requests on all supported IPs */
3276 ++ if (mxsfb->devdata->has_ctrl2) {
3277 ++ reg = readl(mxsfb->base + LCDC_V4_CTRL2);
3278 ++ reg &= ~CTRL2_SET_OUTSTANDING_REQS_MASK;
3279 ++ reg |= CTRL2_SET_OUTSTANDING_REQS_16;
3280 ++ writel(reg, mxsfb->base + LCDC_V4_CTRL2);
3281 ++ }
3282 ++
3283 + /* If it was disabled, re-enable the mode again */
3284 + writel(CTRL_DOTCLK_MODE, mxsfb->base + LCDC_CTRL + REG_SET);
3285 +
3286 +@@ -115,6 +123,35 @@ static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
3287 + reg |= VDCTRL4_SYNC_SIGNALS_ON;
3288 + writel(reg, mxsfb->base + LCDC_VDCTRL4);
3289 +
3290 ++ /*
3291 ++ * Enable recovery on underflow.
3292 ++ *
3293 ++ * There is some sort of corner case behavior of the controller,
3294 ++ * which could rarely be triggered at least on i.MX6SX connected
3295 ++ * to 800x480 DPI panel and i.MX8MM connected to DPI->DSI->LVDS
3296 ++ * bridged 1920x1080 panel (and likely on other setups too), where
3297 ++ * the image on the panel shifts to the right and wraps around.
3298 ++ * This happens either when the controller is enabled on boot or
3299 ++ * even later during run time. The condition does not correct
3300 ++ * itself automatically, i.e. the display image remains shifted.
3301 ++ *
3302 ++ * It seems this problem is known and is due to sporadic underflows
3303 ++ * of the LCDIF FIFO. While the LCDIF IP does have underflow/overflow
3304 ++ * IRQs, neither of the IRQs trigger and neither IRQ status bit is
3305 ++ * asserted when this condition occurs.
3306 ++ *
3307 ++ * All known revisions of the LCDIF IP have CTRL1 RECOVER_ON_UNDERFLOW
3308 ++ * bit, which is described in the reference manual since i.MX23 as
3309 ++ * "
3310 ++ * Set this bit to enable the LCDIF block to recover in the next
3311 ++ * field/frame if there was an underflow in the current field/frame.
3312 ++ * "
3313 ++ * Enable this bit to mitigate the sporadic underflows.
3314 ++ */
3315 ++ reg = readl(mxsfb->base + LCDC_CTRL1);
3316 ++ reg |= CTRL1_RECOVER_ON_UNDERFLOW;
3317 ++ writel(reg, mxsfb->base + LCDC_CTRL1);
3318 ++
3319 + writel(CTRL_RUN, mxsfb->base + LCDC_CTRL + REG_SET);
3320 + }
3321 +
3322 +@@ -206,6 +243,9 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
3323 +
3324 + /* Clear the FIFOs */
3325 + writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);
3326 ++ readl(mxsfb->base + LCDC_CTRL1);
3327 ++ writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_CLR);
3328 ++ readl(mxsfb->base + LCDC_CTRL1);
3329 +
3330 + if (mxsfb->devdata->has_overlay)
3331 + writel(0, mxsfb->base + LCDC_AS_CTRL);
3332 +diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
3333 +index 55d28a27f9124..694fea13e893e 100644
3334 +--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
3335 ++++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
3336 +@@ -15,6 +15,7 @@
3337 + #define LCDC_CTRL 0x00
3338 + #define LCDC_CTRL1 0x10
3339 + #define LCDC_V3_TRANSFER_COUNT 0x20
3340 ++#define LCDC_V4_CTRL2 0x20
3341 + #define LCDC_V4_TRANSFER_COUNT 0x30
3342 + #define LCDC_V4_CUR_BUF 0x40
3343 + #define LCDC_V4_NEXT_BUF 0x50
3344 +@@ -54,12 +55,20 @@
3345 + #define CTRL_DF24 BIT(1)
3346 + #define CTRL_RUN BIT(0)
3347 +
3348 ++#define CTRL1_RECOVER_ON_UNDERFLOW BIT(24)
3349 + #define CTRL1_FIFO_CLEAR BIT(21)
3350 + #define CTRL1_SET_BYTE_PACKAGING(x) (((x) & 0xf) << 16)
3351 + #define CTRL1_GET_BYTE_PACKAGING(x) (((x) >> 16) & 0xf)
3352 + #define CTRL1_CUR_FRAME_DONE_IRQ_EN BIT(13)
3353 + #define CTRL1_CUR_FRAME_DONE_IRQ BIT(9)
3354 +
3355 ++#define CTRL2_SET_OUTSTANDING_REQS_1 0
3356 ++#define CTRL2_SET_OUTSTANDING_REQS_2 (0x1 << 21)
3357 ++#define CTRL2_SET_OUTSTANDING_REQS_4 (0x2 << 21)
3358 ++#define CTRL2_SET_OUTSTANDING_REQS_8 (0x3 << 21)
3359 ++#define CTRL2_SET_OUTSTANDING_REQS_16 (0x4 << 21)
3360 ++#define CTRL2_SET_OUTSTANDING_REQS_MASK (0x7 << 21)
3361 ++
3362 + #define TRANSFER_COUNT_SET_VCOUNT(x) (((x) & 0xffff) << 16)
3363 + #define TRANSFER_COUNT_GET_VCOUNT(x) (((x) >> 16) & 0xffff)
3364 + #define TRANSFER_COUNT_SET_HCOUNT(x) ((x) & 0xffff)
3365 +diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
3366 +index fbcf5edbe3675..9275cd0b2793e 100644
3367 +--- a/drivers/gpu/drm/panfrost/panfrost_device.c
3368 ++++ b/drivers/gpu/drm/panfrost/panfrost_device.c
3369 +@@ -54,7 +54,8 @@ static int panfrost_clk_init(struct panfrost_device *pfdev)
3370 + if (IS_ERR(pfdev->bus_clock)) {
3371 + dev_err(pfdev->dev, "get bus_clock failed %ld\n",
3372 + PTR_ERR(pfdev->bus_clock));
3373 +- return PTR_ERR(pfdev->bus_clock);
3374 ++ err = PTR_ERR(pfdev->bus_clock);
3375 ++ goto disable_clock;
3376 + }
3377 +
3378 + if (pfdev->bus_clock) {
3379 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
3380 +index bfbff90588cbf..c22551c2facb1 100644
3381 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
3382 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
3383 +@@ -556,8 +556,6 @@ static int rcar_du_remove(struct platform_device *pdev)
3384 +
3385 + drm_kms_helper_poll_fini(ddev);
3386 +
3387 +- drm_dev_put(ddev);
3388 +-
3389 + return 0;
3390 + }
3391 +
3392 +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
3393 +index 59e78bc212cf3..ae66c8ce4eef5 100644
3394 +--- a/drivers/hwmon/Makefile
3395 ++++ b/drivers/hwmon/Makefile
3396 +@@ -45,7 +45,6 @@ obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
3397 + obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
3398 + obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
3399 + obj-$(CONFIG_SENSORS_AHT10) += aht10.o
3400 +-obj-$(CONFIG_SENSORS_AMD_ENERGY) += amd_energy.o
3401 + obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
3402 + obj-$(CONFIG_SENSORS_ARM_SCMI) += scmi-hwmon.o
3403 + obj-$(CONFIG_SENSORS_ARM_SCPI) += scpi-hwmon.o
3404 +diff --git a/drivers/hwmon/pmbus/bpa-rs600.c b/drivers/hwmon/pmbus/bpa-rs600.c
3405 +index 2be69fedfa361..be76efe67d83f 100644
3406 +--- a/drivers/hwmon/pmbus/bpa-rs600.c
3407 ++++ b/drivers/hwmon/pmbus/bpa-rs600.c
3408 +@@ -12,15 +12,6 @@
3409 + #include <linux/pmbus.h>
3410 + #include "pmbus.h"
3411 +
3412 +-#define BPARS600_MFR_VIN_MIN 0xa0
3413 +-#define BPARS600_MFR_VIN_MAX 0xa1
3414 +-#define BPARS600_MFR_IIN_MAX 0xa2
3415 +-#define BPARS600_MFR_PIN_MAX 0xa3
3416 +-#define BPARS600_MFR_VOUT_MIN 0xa4
3417 +-#define BPARS600_MFR_VOUT_MAX 0xa5
3418 +-#define BPARS600_MFR_IOUT_MAX 0xa6
3419 +-#define BPARS600_MFR_POUT_MAX 0xa7
3420 +-
3421 + static int bpa_rs600_read_byte_data(struct i2c_client *client, int page, int reg)
3422 + {
3423 + int ret;
3424 +@@ -81,29 +72,13 @@ static int bpa_rs600_read_word_data(struct i2c_client *client, int page, int pha
3425 +
3426 + switch (reg) {
3427 + case PMBUS_VIN_UV_WARN_LIMIT:
3428 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VIN_MIN);
3429 +- break;
3430 + case PMBUS_VIN_OV_WARN_LIMIT:
3431 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VIN_MAX);
3432 +- break;
3433 + case PMBUS_VOUT_UV_WARN_LIMIT:
3434 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VOUT_MIN);
3435 +- break;
3436 + case PMBUS_VOUT_OV_WARN_LIMIT:
3437 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_VOUT_MAX);
3438 +- break;
3439 + case PMBUS_IIN_OC_WARN_LIMIT:
3440 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_IIN_MAX);
3441 +- break;
3442 + case PMBUS_IOUT_OC_WARN_LIMIT:
3443 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_IOUT_MAX);
3444 +- break;
3445 + case PMBUS_PIN_OP_WARN_LIMIT:
3446 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_PIN_MAX);
3447 +- break;
3448 + case PMBUS_POUT_OP_WARN_LIMIT:
3449 +- ret = pmbus_read_word_data(client, 0, 0xff, BPARS600_MFR_POUT_MAX);
3450 +- break;
3451 + case PMBUS_VIN_UV_FAULT_LIMIT:
3452 + case PMBUS_VIN_OV_FAULT_LIMIT:
3453 + case PMBUS_VOUT_UV_FAULT_LIMIT:
3454 +diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
3455 +index 803dad70e2a71..a2add128d0843 100644
3456 +--- a/drivers/i2c/busses/i2c-highlander.c
3457 ++++ b/drivers/i2c/busses/i2c-highlander.c
3458 +@@ -379,7 +379,7 @@ static int highlander_i2c_probe(struct platform_device *pdev)
3459 + platform_set_drvdata(pdev, dev);
3460 +
3461 + dev->irq = platform_get_irq(pdev, 0);
3462 +- if (iic_force_poll)
3463 ++ if (dev->irq < 0 || iic_force_poll)
3464 + dev->irq = 0;
3465 +
3466 + if (dev->irq) {
3467 +diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
3468 +index aa00ba8bcb70f..61ae58f570475 100644
3469 +--- a/drivers/i2c/busses/i2c-hix5hd2.c
3470 ++++ b/drivers/i2c/busses/i2c-hix5hd2.c
3471 +@@ -413,7 +413,7 @@ static int hix5hd2_i2c_probe(struct platform_device *pdev)
3472 + return PTR_ERR(priv->regs);
3473 +
3474 + irq = platform_get_irq(pdev, 0);
3475 +- if (irq <= 0)
3476 ++ if (irq < 0)
3477 + return irq;
3478 +
3479 + priv->clk = devm_clk_get(&pdev->dev, NULL);
3480 +diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
3481 +index cfecaf18ccbb7..4a6ff54d87fe8 100644
3482 +--- a/drivers/i2c/busses/i2c-iop3xx.c
3483 ++++ b/drivers/i2c/busses/i2c-iop3xx.c
3484 +@@ -469,16 +469,14 @@ iop3xx_i2c_probe(struct platform_device *pdev)
3485 +
3486 + irq = platform_get_irq(pdev, 0);
3487 + if (irq < 0) {
3488 +- ret = -ENXIO;
3489 ++ ret = irq;
3490 + goto unmap;
3491 + }
3492 + ret = request_irq(irq, iop3xx_i2c_irq_handler, 0,
3493 + pdev->name, adapter_data);
3494 +
3495 +- if (ret) {
3496 +- ret = -EIO;
3497 ++ if (ret)
3498 + goto unmap;
3499 +- }
3500 +
3501 + memcpy(new_adapter->name, pdev->name, strlen(pdev->name));
3502 + new_adapter->owner = THIS_MODULE;
3503 +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
3504 +index 4e9fb6b44436a..d90d80d046bd7 100644
3505 +--- a/drivers/i2c/busses/i2c-mt65xx.c
3506 ++++ b/drivers/i2c/busses/i2c-mt65xx.c
3507 +@@ -1211,7 +1211,7 @@ static int mtk_i2c_probe(struct platform_device *pdev)
3508 + return PTR_ERR(i2c->pdmabase);
3509 +
3510 + irq = platform_get_irq(pdev, 0);
3511 +- if (irq <= 0)
3512 ++ if (irq < 0)
3513 + return irq;
3514 +
3515 + init_completion(&i2c->msg_complete);
3516 +diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
3517 +index 4d82761e1585e..b49a1b170bb2f 100644
3518 +--- a/drivers/i2c/busses/i2c-s3c2410.c
3519 ++++ b/drivers/i2c/busses/i2c-s3c2410.c
3520 +@@ -1137,7 +1137,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
3521 + */
3522 + if (!(i2c->quirks & QUIRK_POLL)) {
3523 + i2c->irq = ret = platform_get_irq(pdev, 0);
3524 +- if (ret <= 0) {
3525 ++ if (ret < 0) {
3526 + dev_err(&pdev->dev, "cannot find IRQ\n");
3527 + clk_unprepare(i2c->clk);
3528 + return ret;
3529 +diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c
3530 +index 31be1811d5e66..e4026c5416b15 100644
3531 +--- a/drivers/i2c/busses/i2c-synquacer.c
3532 ++++ b/drivers/i2c/busses/i2c-synquacer.c
3533 +@@ -578,7 +578,7 @@ static int synquacer_i2c_probe(struct platform_device *pdev)
3534 +
3535 + i2c->irq = platform_get_irq(pdev, 0);
3536 + if (i2c->irq < 0)
3537 +- return -ENODEV;
3538 ++ return i2c->irq;
3539 +
3540 + ret = devm_request_irq(&pdev->dev, i2c->irq, synquacer_i2c_isr,
3541 + 0, dev_name(&pdev->dev), i2c);
3542 +diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
3543 +index f2241cedf5d3f..6d24dc3855229 100644
3544 +--- a/drivers/i2c/busses/i2c-xlp9xx.c
3545 ++++ b/drivers/i2c/busses/i2c-xlp9xx.c
3546 +@@ -517,7 +517,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev)
3547 + return PTR_ERR(priv->base);
3548 +
3549 + priv->irq = platform_get_irq(pdev, 0);
3550 +- if (priv->irq <= 0)
3551 ++ if (priv->irq < 0)
3552 + return priv->irq;
3553 + /* SMBAlert irq */
3554 + priv->alert_data.irq = platform_get_irq(pdev, 1);
3555 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
3556 +index fd113ddf6e862..8193fa5c3fedf 100644
3557 +--- a/drivers/infiniband/hw/mlx5/mr.c
3558 ++++ b/drivers/infiniband/hw/mlx5/mr.c
3559 +@@ -1022,7 +1022,7 @@ static void *mlx5_ib_alloc_xlt(size_t *nents, size_t ent_size, gfp_t gfp_mask)
3560 +
3561 + if (size > MLX5_SPARE_UMR_CHUNK) {
3562 + size = MLX5_SPARE_UMR_CHUNK;
3563 +- *nents = get_order(size) / ent_size;
3564 ++ *nents = size / ent_size;
3565 + res = (void *)__get_free_pages(gfp_mask | __GFP_NOWARN,
3566 + get_order(size));
3567 + if (res)
3568 +diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
3569 +index c179e27062fd5..151aab408fa65 100644
3570 +--- a/drivers/irqchip/irq-apple-aic.c
3571 ++++ b/drivers/irqchip/irq-apple-aic.c
3572 +@@ -225,7 +225,7 @@ static void aic_irq_eoi(struct irq_data *d)
3573 + * Reading the interrupt reason automatically acknowledges and masks
3574 + * the IRQ, so we just unmask it here if needed.
3575 + */
3576 +- if (!irqd_irq_disabled(d) && !irqd_irq_masked(d))
3577 ++ if (!irqd_irq_masked(d))
3578 + aic_irq_unmask(d);
3579 + }
3580 +
3581 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
3582 +index 66d623f91678a..20a2d606b4c98 100644
3583 +--- a/drivers/irqchip/irq-gic-v3.c
3584 ++++ b/drivers/irqchip/irq-gic-v3.c
3585 +@@ -100,6 +100,27 @@ EXPORT_SYMBOL(gic_pmr_sync);
3586 + DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
3587 + EXPORT_SYMBOL(gic_nonsecure_priorities);
3588 +
3589 ++/*
3590 ++ * When the Non-secure world has access to group 0 interrupts (as a
3591 ++ * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will
3592 ++ * return the Distributor's view of the interrupt priority.
3593 ++ *
3594 ++ * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority
3595 ++ * written by software is moved to the Non-secure range by the Distributor.
3596 ++ *
3597 ++ * If both are true (which is when gic_nonsecure_priorities gets enabled),
3598 ++ * we need to shift down the priority programmed by software to match it
3599 ++ * against the value returned by ICC_RPR_EL1.
3600 ++ */
3601 ++#define GICD_INT_RPR_PRI(priority) \
3602 ++ ({ \
3603 ++ u32 __priority = (priority); \
3604 ++ if (static_branch_unlikely(&gic_nonsecure_priorities)) \
3605 ++ __priority = 0x80 | (__priority >> 1); \
3606 ++ \
3607 ++ __priority; \
3608 ++ })
3609 ++
3610 + /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */
3611 + static refcount_t *ppi_nmi_refs;
3612 +
3613 +@@ -687,7 +708,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
3614 + return;
3615 +
3616 + if (gic_supports_nmi() &&
3617 +- unlikely(gic_read_rpr() == GICD_INT_NMI_PRI)) {
3618 ++ unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
3619 + gic_handle_nmi(irqnr, regs);
3620 + return;
3621 + }
3622 +diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
3623 +index f790ca6d78aa4..a4eb8a2181c7f 100644
3624 +--- a/drivers/irqchip/irq-loongson-pch-pic.c
3625 ++++ b/drivers/irqchip/irq-loongson-pch-pic.c
3626 +@@ -92,18 +92,22 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
3627 + case IRQ_TYPE_EDGE_RISING:
3628 + pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
3629 + pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
3630 ++ irq_set_handler_locked(d, handle_edge_irq);
3631 + break;
3632 + case IRQ_TYPE_EDGE_FALLING:
3633 + pch_pic_bitset(priv, PCH_PIC_EDGE, d->hwirq);
3634 + pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
3635 ++ irq_set_handler_locked(d, handle_edge_irq);
3636 + break;
3637 + case IRQ_TYPE_LEVEL_HIGH:
3638 + pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
3639 + pch_pic_bitclr(priv, PCH_PIC_POL, d->hwirq);
3640 ++ irq_set_handler_locked(d, handle_level_irq);
3641 + break;
3642 + case IRQ_TYPE_LEVEL_LOW:
3643 + pch_pic_bitclr(priv, PCH_PIC_EDGE, d->hwirq);
3644 + pch_pic_bitset(priv, PCH_PIC_POL, d->hwirq);
3645 ++ irq_set_handler_locked(d, handle_level_irq);
3646 + break;
3647 + default:
3648 + ret = -EINVAL;
3649 +@@ -113,11 +117,24 @@ static int pch_pic_set_type(struct irq_data *d, unsigned int type)
3650 + return ret;
3651 + }
3652 +
3653 ++static void pch_pic_ack_irq(struct irq_data *d)
3654 ++{
3655 ++ unsigned int reg;
3656 ++ struct pch_pic *priv = irq_data_get_irq_chip_data(d);
3657 ++
3658 ++ reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4);
3659 ++ if (reg & BIT(PIC_REG_BIT(d->hwirq))) {
3660 ++ writel(BIT(PIC_REG_BIT(d->hwirq)),
3661 ++ priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4);
3662 ++ }
3663 ++ irq_chip_ack_parent(d);
3664 ++}
3665 ++
3666 + static struct irq_chip pch_pic_irq_chip = {
3667 + .name = "PCH PIC",
3668 + .irq_mask = pch_pic_mask_irq,
3669 + .irq_unmask = pch_pic_unmask_irq,
3670 +- .irq_ack = irq_chip_ack_parent,
3671 ++ .irq_ack = pch_pic_ack_irq,
3672 + .irq_set_affinity = irq_chip_set_affinity_parent,
3673 + .irq_set_type = pch_pic_set_type,
3674 + };
3675 +diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
3676 +index 7d5f0bf2817ad..24736f29d3633 100644
3677 +--- a/drivers/leds/blink/leds-lgm-sso.c
3678 ++++ b/drivers/leds/blink/leds-lgm-sso.c
3679 +@@ -630,8 +630,10 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
3680 +
3681 + fwnode_for_each_child_node(fw_ssoled, fwnode_child) {
3682 + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL);
3683 +- if (!led)
3684 +- return -ENOMEM;
3685 ++ if (!led) {
3686 ++ ret = -ENOMEM;
3687 ++ goto __dt_err;
3688 ++ }
3689 +
3690 + INIT_LIST_HEAD(&led->list);
3691 + led->priv = priv;
3692 +@@ -641,7 +643,7 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
3693 + fwnode_child,
3694 + GPIOD_ASIS, NULL);
3695 + if (IS_ERR(led->gpiod)) {
3696 +- dev_err(dev, "led: get gpio fail!\n");
3697 ++ ret = dev_err_probe(dev, PTR_ERR(led->gpiod), "led: get gpio fail!\n");
3698 + goto __dt_err;
3699 + }
3700 +
3701 +@@ -661,8 +663,11 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
3702 + desc->panic_indicator = 1;
3703 +
3704 + ret = fwnode_property_read_u32(fwnode_child, "reg", &prop);
3705 +- if (ret != 0 || prop >= SSO_LED_MAX_NUM) {
3706 ++ if (ret)
3707 ++ goto __dt_err;
3708 ++ if (prop >= SSO_LED_MAX_NUM) {
3709 + dev_err(dev, "invalid LED pin:%u\n", prop);
3710 ++ ret = -EINVAL;
3711 + goto __dt_err;
3712 + }
3713 + desc->pin = prop;
3714 +@@ -698,21 +703,22 @@ __sso_led_dt_parse(struct sso_led_priv *priv, struct fwnode_handle *fw_ssoled)
3715 + desc->brightness = LED_FULL;
3716 + }
3717 +
3718 +- if (sso_create_led(priv, led, fwnode_child))
3719 ++ ret = sso_create_led(priv, led, fwnode_child);
3720 ++ if (ret)
3721 + goto __dt_err;
3722 + }
3723 +- fwnode_handle_put(fw_ssoled);
3724 +
3725 + return 0;
3726 ++
3727 + __dt_err:
3728 +- fwnode_handle_put(fw_ssoled);
3729 ++ fwnode_handle_put(fwnode_child);
3730 + /* unregister leds */
3731 + list_for_each(p, &priv->led_list) {
3732 + led = list_entry(p, struct sso_led, list);
3733 + sso_led_shutdown(led);
3734 + }
3735 +
3736 +- return -EINVAL;
3737 ++ return ret;
3738 + }
3739 +
3740 + static int sso_led_dt_parse(struct sso_led_priv *priv)
3741 +@@ -730,6 +736,7 @@ static int sso_led_dt_parse(struct sso_led_priv *priv)
3742 + fw_ssoled = fwnode_get_named_child_node(fwnode, "ssoled");
3743 + if (fw_ssoled) {
3744 + ret = __sso_led_dt_parse(priv, fw_ssoled);
3745 ++ fwnode_handle_put(fw_ssoled);
3746 + if (ret)
3747 + return ret;
3748 + }
3749 +diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c
3750 +index 590bfa180d104..44904fdee3cc0 100644
3751 +--- a/drivers/leds/flash/leds-rt8515.c
3752 ++++ b/drivers/leds/flash/leds-rt8515.c
3753 +@@ -343,8 +343,9 @@ static int rt8515_probe(struct platform_device *pdev)
3754 +
3755 + ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data);
3756 + if (ret) {
3757 +- dev_err(dev, "can't register LED %s\n", led->name);
3758 ++ fwnode_handle_put(child);
3759 + mutex_destroy(&rt->lock);
3760 ++ dev_err(dev, "can't register LED %s\n", led->name);
3761 + return ret;
3762 + }
3763 +
3764 +@@ -362,6 +363,7 @@ static int rt8515_probe(struct platform_device *pdev)
3765 + */
3766 + }
3767 +
3768 ++ fwnode_handle_put(child);
3769 + return 0;
3770 + }
3771 +
3772 +diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
3773 +index 2180255ad3393..899ed94b66876 100644
3774 +--- a/drivers/leds/leds-is31fl32xx.c
3775 ++++ b/drivers/leds/leds-is31fl32xx.c
3776 +@@ -385,6 +385,7 @@ static int is31fl32xx_parse_dt(struct device *dev,
3777 + dev_err(dev,
3778 + "Node %pOF 'reg' conflicts with another LED\n",
3779 + child);
3780 ++ ret = -EINVAL;
3781 + goto err;
3782 + }
3783 +
3784 +diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
3785 +index 68e06434ac087..7dab08773a347 100644
3786 +--- a/drivers/leds/leds-lt3593.c
3787 ++++ b/drivers/leds/leds-lt3593.c
3788 +@@ -99,10 +99,9 @@ static int lt3593_led_probe(struct platform_device *pdev)
3789 + init_data.default_label = ":";
3790 +
3791 + ret = devm_led_classdev_register_ext(dev, &led_data->cdev, &init_data);
3792 +- if (ret < 0) {
3793 +- fwnode_handle_put(child);
3794 ++ fwnode_handle_put(child);
3795 ++ if (ret < 0)
3796 + return ret;
3797 +- }
3798 +
3799 + platform_set_drvdata(pdev, led_data);
3800 +
3801 +diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c
3802 +index f76621e88482d..c6b437e6369b8 100644
3803 +--- a/drivers/leds/trigger/ledtrig-audio.c
3804 ++++ b/drivers/leds/trigger/ledtrig-audio.c
3805 +@@ -6,10 +6,33 @@
3806 + #include <linux/kernel.h>
3807 + #include <linux/leds.h>
3808 + #include <linux/module.h>
3809 ++#include "../leds.h"
3810 +
3811 +-static struct led_trigger *ledtrig_audio[NUM_AUDIO_LEDS];
3812 + static enum led_brightness audio_state[NUM_AUDIO_LEDS];
3813 +
3814 ++static int ledtrig_audio_mute_activate(struct led_classdev *led_cdev)
3815 ++{
3816 ++ led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MUTE]);
3817 ++ return 0;
3818 ++}
3819 ++
3820 ++static int ledtrig_audio_micmute_activate(struct led_classdev *led_cdev)
3821 ++{
3822 ++ led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MICMUTE]);
3823 ++ return 0;
3824 ++}
3825 ++
3826 ++static struct led_trigger ledtrig_audio[NUM_AUDIO_LEDS] = {
3827 ++ [LED_AUDIO_MUTE] = {
3828 ++ .name = "audio-mute",
3829 ++ .activate = ledtrig_audio_mute_activate,
3830 ++ },
3831 ++ [LED_AUDIO_MICMUTE] = {
3832 ++ .name = "audio-micmute",
3833 ++ .activate = ledtrig_audio_micmute_activate,
3834 ++ },
3835 ++};
3836 ++
3837 + enum led_brightness ledtrig_audio_get(enum led_audio type)
3838 + {
3839 + return audio_state[type];
3840 +@@ -19,24 +42,22 @@ EXPORT_SYMBOL_GPL(ledtrig_audio_get);
3841 + void ledtrig_audio_set(enum led_audio type, enum led_brightness state)
3842 + {
3843 + audio_state[type] = state;
3844 +- led_trigger_event(ledtrig_audio[type], state);
3845 ++ led_trigger_event(&ledtrig_audio[type], state);
3846 + }
3847 + EXPORT_SYMBOL_GPL(ledtrig_audio_set);
3848 +
3849 + static int __init ledtrig_audio_init(void)
3850 + {
3851 +- led_trigger_register_simple("audio-mute",
3852 +- &ledtrig_audio[LED_AUDIO_MUTE]);
3853 +- led_trigger_register_simple("audio-micmute",
3854 +- &ledtrig_audio[LED_AUDIO_MICMUTE]);
3855 ++ led_trigger_register(&ledtrig_audio[LED_AUDIO_MUTE]);
3856 ++ led_trigger_register(&ledtrig_audio[LED_AUDIO_MICMUTE]);
3857 + return 0;
3858 + }
3859 + module_init(ledtrig_audio_init);
3860 +
3861 + static void __exit ledtrig_audio_exit(void)
3862 + {
3863 +- led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MUTE]);
3864 +- led_trigger_unregister_simple(ledtrig_audio[LED_AUDIO_MICMUTE]);
3865 ++ led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MUTE]);
3866 ++ led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MICMUTE]);
3867 + }
3868 + module_exit(ledtrig_audio_exit);
3869 +
3870 +diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3871 +index bea8c4429ae8f..a407e3be0f170 100644
3872 +--- a/drivers/md/bcache/super.c
3873 ++++ b/drivers/md/bcache/super.c
3874 +@@ -935,20 +935,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
3875 + n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
3876 + d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
3877 + if (!d->full_dirty_stripes)
3878 +- return -ENOMEM;
3879 ++ goto out_free_stripe_sectors_dirty;
3880 +
3881 + idx = ida_simple_get(&bcache_device_idx, 0,
3882 + BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
3883 + if (idx < 0)
3884 +- return idx;
3885 ++ goto out_free_full_dirty_stripes;
3886 +
3887 + if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
3888 + BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
3889 +- goto err;
3890 ++ goto out_ida_remove;
3891 +
3892 + d->disk = alloc_disk(BCACHE_MINORS);
3893 + if (!d->disk)
3894 +- goto err;
3895 ++ goto out_bioset_exit;
3896 +
3897 + set_capacity(d->disk, sectors);
3898 + snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
3899 +@@ -994,8 +994,14 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
3900 +
3901 + return 0;
3902 +
3903 +-err:
3904 ++out_bioset_exit:
3905 ++ bioset_exit(&d->bio_split);
3906 ++out_ida_remove:
3907 + ida_simple_remove(&bcache_device_idx, idx);
3908 ++out_free_full_dirty_stripes:
3909 ++ kvfree(d->full_dirty_stripes);
3910 ++out_free_stripe_sectors_dirty:
3911 ++ kvfree(d->stripe_sectors_dirty);
3912 + return -ENOMEM;
3913 +
3914 + }
3915 +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
3916 +index 753822ca96131..6b8e58ae3f9ee 100644
3917 +--- a/drivers/md/raid1.c
3918 ++++ b/drivers/md/raid1.c
3919 +@@ -1324,6 +1324,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
3920 + struct raid1_plug_cb *plug = NULL;
3921 + int first_clone;
3922 + int max_sectors;
3923 ++ bool write_behind = false;
3924 +
3925 + if (mddev_is_clustered(mddev) &&
3926 + md_cluster_ops->area_resyncing(mddev, WRITE,
3927 +@@ -1376,6 +1377,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
3928 + max_sectors = r1_bio->sectors;
3929 + for (i = 0; i < disks; i++) {
3930 + struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
3931 ++
3932 ++ /*
3933 ++ * The write-behind io is only attempted on drives marked as
3934 ++ * write-mostly, which means we could allocate write behind
3935 ++ * bio later.
3936 ++ */
3937 ++ if (rdev && test_bit(WriteMostly, &rdev->flags))
3938 ++ write_behind = true;
3939 ++
3940 + if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
3941 + atomic_inc(&rdev->nr_pending);
3942 + blocked_rdev = rdev;
3943 +@@ -1449,6 +1459,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
3944 + goto retry_write;
3945 + }
3946 +
3947 ++ /*
3948 ++ * When using a bitmap, we may call alloc_behind_master_bio below.
3949 ++ * alloc_behind_master_bio allocates a copy of the data payload a page
3950 ++ * at a time and thus needs a new bio that can fit the whole payload
3951 ++ * this bio in page sized chunks.
3952 ++ */
3953 ++ if (write_behind && bitmap)
3954 ++ max_sectors = min_t(int, max_sectors,
3955 ++ BIO_MAX_VECS * (PAGE_SIZE >> 9));
3956 + if (max_sectors < bio_sectors(bio)) {
3957 + struct bio *split = bio_split(bio, max_sectors,
3958 + GFP_NOIO, &conf->bio_split);
3959 +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
3960 +index 40e845fb97170..92b490aac93ee 100644
3961 +--- a/drivers/md/raid10.c
3962 ++++ b/drivers/md/raid10.c
3963 +@@ -1706,6 +1706,11 @@ retry_discard:
3964 + } else
3965 + r10_bio->master_bio = (struct bio *)first_r10bio;
3966 +
3967 ++ /*
3968 ++ * first select target devices under rcu_lock and
3969 ++ * inc refcount on their rdev. Record them by setting
3970 ++ * bios[x] to bio
3971 ++ */
3972 + rcu_read_lock();
3973 + for (disk = 0; disk < geo->raid_disks; disk++) {
3974 + struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
3975 +@@ -1737,9 +1742,6 @@ retry_discard:
3976 + for (disk = 0; disk < geo->raid_disks; disk++) {
3977 + sector_t dev_start, dev_end;
3978 + struct bio *mbio, *rbio = NULL;
3979 +- struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
3980 +- struct md_rdev *rrdev = rcu_dereference(
3981 +- conf->mirrors[disk].replacement);
3982 +
3983 + /*
3984 + * Now start to calculate the start and end address for each disk.
3985 +@@ -1769,9 +1771,12 @@ retry_discard:
3986 +
3987 + /*
3988 + * It only handles discard bio which size is >= stripe size, so
3989 +- * dev_end > dev_start all the time
3990 ++ * dev_end > dev_start all the time.
3991 ++ * It doesn't need to use rcu lock to get rdev here. We already
3992 ++ * add rdev->nr_pending in the first loop.
3993 + */
3994 + if (r10_bio->devs[disk].bio) {
3995 ++ struct md_rdev *rdev = conf->mirrors[disk].rdev;
3996 + mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
3997 + mbio->bi_end_io = raid10_end_discard_request;
3998 + mbio->bi_private = r10_bio;
3999 +@@ -1784,6 +1789,7 @@ retry_discard:
4000 + bio_endio(mbio);
4001 + }
4002 + if (r10_bio->devs[disk].repl_bio) {
4003 ++ struct md_rdev *rrdev = conf->mirrors[disk].replacement;
4004 + rbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
4005 + rbio->bi_end_io = raid10_end_discard_request;
4006 + rbio->bi_private = r10_bio;
4007 +diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
4008 +index 89bb7e6dc7a42..9554c8348c020 100644
4009 +--- a/drivers/media/i2c/tda1997x.c
4010 ++++ b/drivers/media/i2c/tda1997x.c
4011 +@@ -2233,6 +2233,7 @@ static int tda1997x_core_init(struct v4l2_subdev *sd)
4012 + /* get initial HDMI status */
4013 + state->hdmi_status = io_read(sd, REG_HDMI_FLAGS);
4014 +
4015 ++ io_write(sd, REG_EDID_ENABLE, EDID_ENABLE_A_EN | EDID_ENABLE_B_EN);
4016 + return 0;
4017 + }
4018 +
4019 +diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
4020 +index 2f42808c43a4b..c484c008ab027 100644
4021 +--- a/drivers/media/platform/coda/coda-bit.c
4022 ++++ b/drivers/media/platform/coda/coda-bit.c
4023 +@@ -2053,17 +2053,25 @@ static int __coda_start_decoding(struct coda_ctx *ctx)
4024 + u32 src_fourcc, dst_fourcc;
4025 + int ret;
4026 +
4027 ++ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
4028 ++ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
4029 ++ src_fourcc = q_data_src->fourcc;
4030 ++ dst_fourcc = q_data_dst->fourcc;
4031 ++
4032 + if (!ctx->initialized) {
4033 + ret = __coda_decoder_seq_init(ctx);
4034 + if (ret < 0)
4035 + return ret;
4036 ++ } else {
4037 ++ ctx->frame_mem_ctrl &= ~(CODA_FRAME_CHROMA_INTERLEAVE | (0x3 << 9) |
4038 ++ CODA9_FRAME_TILED2LINEAR);
4039 ++ if (dst_fourcc == V4L2_PIX_FMT_NV12 || dst_fourcc == V4L2_PIX_FMT_YUYV)
4040 ++ ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE;
4041 ++ if (ctx->tiled_map_type == GDI_TILED_FRAME_MB_RASTER_MAP)
4042 ++ ctx->frame_mem_ctrl |= (0x3 << 9) |
4043 ++ ((ctx->use_vdoa) ? 0 : CODA9_FRAME_TILED2LINEAR);
4044 + }
4045 +
4046 +- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
4047 +- q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
4048 +- src_fourcc = q_data_src->fourcc;
4049 +- dst_fourcc = q_data_dst->fourcc;
4050 +-
4051 + coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
4052 +
4053 + ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
4054 +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
4055 +index 53025c8c75312..20f59c59ff8a2 100644
4056 +--- a/drivers/media/platform/omap3isp/isp.c
4057 ++++ b/drivers/media/platform/omap3isp/isp.c
4058 +@@ -2037,8 +2037,10 @@ static int isp_subdev_notifier_complete(struct v4l2_async_notifier *async)
4059 + mutex_lock(&isp->media_dev.graph_mutex);
4060 +
4061 + ret = media_entity_enum_init(&isp->crashed, &isp->media_dev);
4062 +- if (ret)
4063 ++ if (ret) {
4064 ++ mutex_unlock(&isp->media_dev.graph_mutex);
4065 + return ret;
4066 ++ }
4067 +
4068 + list_for_each_entry(sd, &v4l2_dev->subdevs, list) {
4069 + if (sd->notifier != &isp->notifier)
4070 +diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
4071 +index b813d6dba4817..3a0871f0bea67 100644
4072 +--- a/drivers/media/platform/qcom/venus/helpers.c
4073 ++++ b/drivers/media/platform/qcom/venus/helpers.c
4074 +@@ -1138,6 +1138,9 @@ int venus_helper_set_format_constraints(struct venus_inst *inst)
4075 + if (!IS_V6(inst->core))
4076 + return 0;
4077 +
4078 ++ if (inst->opb_fmt == HFI_COLOR_FORMAT_NV12_UBWC)
4079 ++ return 0;
4080 ++
4081 + pconstraint.buffer_type = HFI_BUFFER_OUTPUT2;
4082 + pconstraint.num_planes = 2;
4083 + pconstraint.plane_format[0].stride_multiples = 128;
4084 +diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
4085 +index a2d436d407b22..e8776ac45b020 100644
4086 +--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
4087 ++++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
4088 +@@ -261,7 +261,7 @@ sys_get_prop_image_version(struct device *dev,
4089 +
4090 + smem_tbl_ptr = qcom_smem_get(QCOM_SMEM_HOST_ANY,
4091 + SMEM_IMG_VER_TBL, &smem_blk_sz);
4092 +- if (smem_tbl_ptr && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
4093 ++ if (!IS_ERR(smem_tbl_ptr) && smem_blk_sz >= SMEM_IMG_OFFSET_VENUS + VER_STR_SZ)
4094 + memcpy(smem_tbl_ptr + SMEM_IMG_OFFSET_VENUS,
4095 + img_ver, VER_STR_SZ);
4096 + }
4097 +diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
4098 +index 4a7291f934b6b..2c443c1afd3a5 100644
4099 +--- a/drivers/media/platform/qcom/venus/venc.c
4100 ++++ b/drivers/media/platform/qcom/venus/venc.c
4101 +@@ -183,6 +183,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f)
4102 + else
4103 + return NULL;
4104 + fmt = find_format(inst, pixmp->pixelformat, f->type);
4105 ++ if (!fmt)
4106 ++ return NULL;
4107 + }
4108 +
4109 + pixmp->width = clamp(pixmp->width, frame_width_min(inst),
4110 +diff --git a/drivers/media/platform/rockchip/rga/rga-buf.c b/drivers/media/platform/rockchip/rga/rga-buf.c
4111 +index bf9a75b75083b..81508ed5abf34 100644
4112 +--- a/drivers/media/platform/rockchip/rga/rga-buf.c
4113 ++++ b/drivers/media/platform/rockchip/rga/rga-buf.c
4114 +@@ -79,9 +79,8 @@ static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count)
4115 + struct rockchip_rga *rga = ctx->rga;
4116 + int ret;
4117 +
4118 +- ret = pm_runtime_get_sync(rga->dev);
4119 ++ ret = pm_runtime_resume_and_get(rga->dev);
4120 + if (ret < 0) {
4121 +- pm_runtime_put_noidle(rga->dev);
4122 + rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED);
4123 + return ret;
4124 + }
4125 +diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
4126 +index 9d122429706e9..6759091b15e09 100644
4127 +--- a/drivers/media/platform/rockchip/rga/rga.c
4128 ++++ b/drivers/media/platform/rockchip/rga/rga.c
4129 +@@ -863,10 +863,12 @@ static int rga_probe(struct platform_device *pdev)
4130 + if (IS_ERR(rga->m2m_dev)) {
4131 + v4l2_err(&rga->v4l2_dev, "Failed to init mem2mem device\n");
4132 + ret = PTR_ERR(rga->m2m_dev);
4133 +- goto unreg_video_dev;
4134 ++ goto rel_vdev;
4135 + }
4136 +
4137 +- pm_runtime_get_sync(rga->dev);
4138 ++ ret = pm_runtime_resume_and_get(rga->dev);
4139 ++ if (ret < 0)
4140 ++ goto rel_vdev;
4141 +
4142 + rga->version.major = (rga_read(rga, RGA_VERSION_INFO) >> 24) & 0xFF;
4143 + rga->version.minor = (rga_read(rga, RGA_VERSION_INFO) >> 20) & 0x0F;
4144 +@@ -880,11 +882,23 @@ static int rga_probe(struct platform_device *pdev)
4145 + rga->cmdbuf_virt = dma_alloc_attrs(rga->dev, RGA_CMDBUF_SIZE,
4146 + &rga->cmdbuf_phy, GFP_KERNEL,
4147 + DMA_ATTR_WRITE_COMBINE);
4148 ++ if (!rga->cmdbuf_virt) {
4149 ++ ret = -ENOMEM;
4150 ++ goto rel_vdev;
4151 ++ }
4152 +
4153 + rga->src_mmu_pages =
4154 + (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
4155 ++ if (!rga->src_mmu_pages) {
4156 ++ ret = -ENOMEM;
4157 ++ goto free_dma;
4158 ++ }
4159 + rga->dst_mmu_pages =
4160 + (unsigned int *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3);
4161 ++ if (rga->dst_mmu_pages) {
4162 ++ ret = -ENOMEM;
4163 ++ goto free_src_pages;
4164 ++ }
4165 +
4166 + def_frame.stride = (def_frame.width * def_frame.fmt->depth) >> 3;
4167 + def_frame.size = def_frame.stride * def_frame.height;
4168 +@@ -892,7 +906,7 @@ static int rga_probe(struct platform_device *pdev)
4169 + ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
4170 + if (ret) {
4171 + v4l2_err(&rga->v4l2_dev, "Failed to register video device\n");
4172 +- goto rel_vdev;
4173 ++ goto free_dst_pages;
4174 + }
4175 +
4176 + v4l2_info(&rga->v4l2_dev, "Registered %s as /dev/%s\n",
4177 +@@ -900,10 +914,15 @@ static int rga_probe(struct platform_device *pdev)
4178 +
4179 + return 0;
4180 +
4181 ++free_dst_pages:
4182 ++ free_pages((unsigned long)rga->dst_mmu_pages, 3);
4183 ++free_src_pages:
4184 ++ free_pages((unsigned long)rga->src_mmu_pages, 3);
4185 ++free_dma:
4186 ++ dma_free_attrs(rga->dev, RGA_CMDBUF_SIZE, rga->cmdbuf_virt,
4187 ++ rga->cmdbuf_phy, DMA_ATTR_WRITE_COMBINE);
4188 + rel_vdev:
4189 + video_device_release(vfd);
4190 +-unreg_video_dev:
4191 +- video_unregister_device(rga->vfd);
4192 + unreg_v4l2_dev:
4193 + v4l2_device_unregister(&rga->v4l2_dev);
4194 + err_put_clk:
4195 +diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
4196 +index 931ec0727cd38..a280e4bd80c2f 100644
4197 +--- a/drivers/media/spi/cxd2880-spi.c
4198 ++++ b/drivers/media/spi/cxd2880-spi.c
4199 +@@ -524,13 +524,13 @@ cxd2880_spi_probe(struct spi_device *spi)
4200 + if (IS_ERR(dvb_spi->vcc_supply)) {
4201 + if (PTR_ERR(dvb_spi->vcc_supply) == -EPROBE_DEFER) {
4202 + ret = -EPROBE_DEFER;
4203 +- goto fail_adapter;
4204 ++ goto fail_regulator;
4205 + }
4206 + dvb_spi->vcc_supply = NULL;
4207 + } else {
4208 + ret = regulator_enable(dvb_spi->vcc_supply);
4209 + if (ret)
4210 +- goto fail_adapter;
4211 ++ goto fail_regulator;
4212 + }
4213 +
4214 + dvb_spi->spi = spi;
4215 +@@ -618,6 +618,9 @@ fail_frontend:
4216 + fail_attach:
4217 + dvb_unregister_adapter(&dvb_spi->adapter);
4218 + fail_adapter:
4219 ++ if (!dvb_spi->vcc_supply)
4220 ++ regulator_disable(dvb_spi->vcc_supply);
4221 ++fail_regulator:
4222 + kfree(dvb_spi);
4223 + return ret;
4224 + }
4225 +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
4226 +index 2e07106f46803..bc4b2abdde1a4 100644
4227 +--- a/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
4228 ++++ b/drivers/media/usb/dvb-usb/dvb-usb-i2c.c
4229 +@@ -17,7 +17,8 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
4230 +
4231 + if (d->props.i2c_algo == NULL) {
4232 + err("no i2c algorithm specified");
4233 +- return -EINVAL;
4234 ++ ret = -EINVAL;
4235 ++ goto err;
4236 + }
4237 +
4238 + strscpy(d->i2c_adap.name, d->desc->name, sizeof(d->i2c_adap.name));
4239 +@@ -27,11 +28,15 @@ int dvb_usb_i2c_init(struct dvb_usb_device *d)
4240 +
4241 + i2c_set_adapdata(&d->i2c_adap, d);
4242 +
4243 +- if ((ret = i2c_add_adapter(&d->i2c_adap)) < 0)
4244 ++ ret = i2c_add_adapter(&d->i2c_adap);
4245 ++ if (ret < 0) {
4246 + err("could not add i2c adapter");
4247 ++ goto err;
4248 ++ }
4249 +
4250 + d->state |= DVB_USB_STATE_I2C;
4251 +
4252 ++err:
4253 + return ret;
4254 + }
4255 +
4256 +diff --git a/drivers/media/usb/dvb-usb/dvb-usb-init.c b/drivers/media/usb/dvb-usb/dvb-usb-init.c
4257 +index 28e1fd64dd3c2..61439c8f33cab 100644
4258 +--- a/drivers/media/usb/dvb-usb/dvb-usb-init.c
4259 ++++ b/drivers/media/usb/dvb-usb/dvb-usb-init.c
4260 +@@ -194,8 +194,8 @@ static int dvb_usb_init(struct dvb_usb_device *d, short *adapter_nums)
4261 +
4262 + err_adapter_init:
4263 + dvb_usb_adapter_exit(d);
4264 +-err_i2c_init:
4265 + dvb_usb_i2c_exit(d);
4266 ++err_i2c_init:
4267 + if (d->priv && d->props.priv_destroy)
4268 + d->props.priv_destroy(d);
4269 + err_priv_init:
4270 +diff --git a/drivers/media/usb/dvb-usb/nova-t-usb2.c b/drivers/media/usb/dvb-usb/nova-t-usb2.c
4271 +index e7b290552b663..9c0eb0d40822e 100644
4272 +--- a/drivers/media/usb/dvb-usb/nova-t-usb2.c
4273 ++++ b/drivers/media/usb/dvb-usb/nova-t-usb2.c
4274 +@@ -130,7 +130,7 @@ ret:
4275 +
4276 + static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
4277 + {
4278 +- int i;
4279 ++ int i, ret;
4280 + u8 b;
4281 +
4282 + mac[0] = 0x00;
4283 +@@ -139,7 +139,9 @@ static int nova_t_read_mac_address (struct dvb_usb_device *d, u8 mac[6])
4284 +
4285 + /* this is a complete guess, but works for my box */
4286 + for (i = 136; i < 139; i++) {
4287 +- dibusb_read_eeprom_byte(d,i, &b);
4288 ++ ret = dibusb_read_eeprom_byte(d, i, &b);
4289 ++ if (ret)
4290 ++ return ret;
4291 +
4292 + mac[5 - (i - 136)] = b;
4293 + }
4294 +diff --git a/drivers/media/usb/dvb-usb/vp702x.c b/drivers/media/usb/dvb-usb/vp702x.c
4295 +index bf54747e2e01a..a1d9e4801a2ba 100644
4296 +--- a/drivers/media/usb/dvb-usb/vp702x.c
4297 ++++ b/drivers/media/usb/dvb-usb/vp702x.c
4298 +@@ -291,16 +291,22 @@ static int vp702x_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
4299 + static int vp702x_read_mac_addr(struct dvb_usb_device *d,u8 mac[6])
4300 + {
4301 + u8 i, *buf;
4302 ++ int ret;
4303 + struct vp702x_device_state *st = d->priv;
4304 +
4305 + mutex_lock(&st->buf_mutex);
4306 + buf = st->buf;
4307 +- for (i = 6; i < 12; i++)
4308 +- vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1, &buf[i - 6], 1);
4309 ++ for (i = 6; i < 12; i++) {
4310 ++ ret = vp702x_usb_in_op(d, READ_EEPROM_REQ, i, 1,
4311 ++ &buf[i - 6], 1);
4312 ++ if (ret < 0)
4313 ++ goto err;
4314 ++ }
4315 +
4316 + memcpy(mac, buf, 6);
4317 ++err:
4318 + mutex_unlock(&st->buf_mutex);
4319 +- return 0;
4320 ++ return ret;
4321 + }
4322 +
4323 + static int vp702x_frontend_attach(struct dvb_usb_adapter *adap)
4324 +diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
4325 +index 59529cbf9cd0b..0b6d77c3bec86 100644
4326 +--- a/drivers/media/usb/em28xx/em28xx-input.c
4327 ++++ b/drivers/media/usb/em28xx/em28xx-input.c
4328 +@@ -842,7 +842,6 @@ error:
4329 + kfree(ir);
4330 + ref_put:
4331 + em28xx_shutdown_buttons(dev);
4332 +- kref_put(&dev->ref, em28xx_free_device);
4333 + return err;
4334 + }
4335 +
4336 +diff --git a/drivers/media/usb/go7007/go7007-driver.c b/drivers/media/usb/go7007/go7007-driver.c
4337 +index f1767be9d8685..6650eab913d81 100644
4338 +--- a/drivers/media/usb/go7007/go7007-driver.c
4339 ++++ b/drivers/media/usb/go7007/go7007-driver.c
4340 +@@ -691,49 +691,23 @@ struct go7007 *go7007_alloc(const struct go7007_board_info *board,
4341 + struct device *dev)
4342 + {
4343 + struct go7007 *go;
4344 +- int i;
4345 +
4346 + go = kzalloc(sizeof(struct go7007), GFP_KERNEL);
4347 + if (go == NULL)
4348 + return NULL;
4349 + go->dev = dev;
4350 + go->board_info = board;
4351 +- go->board_id = 0;
4352 + go->tuner_type = -1;
4353 +- go->channel_number = 0;
4354 +- go->name[0] = 0;
4355 + mutex_init(&go->hw_lock);
4356 + init_waitqueue_head(&go->frame_waitq);
4357 + spin_lock_init(&go->spinlock);
4358 + go->status = STATUS_INIT;
4359 +- memset(&go->i2c_adapter, 0, sizeof(go->i2c_adapter));
4360 +- go->i2c_adapter_online = 0;
4361 +- go->interrupt_available = 0;
4362 + init_waitqueue_head(&go->interrupt_waitq);
4363 +- go->input = 0;
4364 + go7007_update_board(go);
4365 +- go->encoder_h_halve = 0;
4366 +- go->encoder_v_halve = 0;
4367 +- go->encoder_subsample = 0;
4368 + go->format = V4L2_PIX_FMT_MJPEG;
4369 + go->bitrate = 1500000;
4370 + go->fps_scale = 1;
4371 +- go->pali = 0;
4372 + go->aspect_ratio = GO7007_RATIO_1_1;
4373 +- go->gop_size = 0;
4374 +- go->ipb = 0;
4375 +- go->closed_gop = 0;
4376 +- go->repeat_seqhead = 0;
4377 +- go->seq_header_enable = 0;
4378 +- go->gop_header_enable = 0;
4379 +- go->dvd_mode = 0;
4380 +- go->interlace_coding = 0;
4381 +- for (i = 0; i < 4; ++i)
4382 +- go->modet[i].enable = 0;
4383 +- for (i = 0; i < 1624; ++i)
4384 +- go->modet_map[i] = 0;
4385 +- go->audio_deliver = NULL;
4386 +- go->audio_enabled = 0;
4387 +
4388 + return go;
4389 + }
4390 +diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
4391 +index dbf0455d5d50d..eeb85981e02b6 100644
4392 +--- a/drivers/media/usb/go7007/go7007-usb.c
4393 ++++ b/drivers/media/usb/go7007/go7007-usb.c
4394 +@@ -1134,7 +1134,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
4395 +
4396 + ep = usb->usbdev->ep_in[4];
4397 + if (!ep)
4398 +- return -ENODEV;
4399 ++ goto allocfail;
4400 +
4401 + /* Allocate the URB and buffer for receiving incoming interrupts */
4402 + usb->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
4403 +diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
4404 +index cd833011f2850..4757e29b42c0b 100644
4405 +--- a/drivers/misc/lkdtm/core.c
4406 ++++ b/drivers/misc/lkdtm/core.c
4407 +@@ -81,7 +81,7 @@ static struct crashpoint crashpoints[] = {
4408 + CRASHPOINT("FS_DEVRW", "ll_rw_block"),
4409 + CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"),
4410 + CRASHPOINT("TIMERADD", "hrtimer_start"),
4411 +- CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"),
4412 ++ CRASHPOINT("SCSI_QUEUE_RQ", "scsi_queue_rq"),
4413 + CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"),
4414 + #endif
4415 + };
4416 +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
4417 +index c3229d8c7041c..33cb70aa02aa8 100644
4418 +--- a/drivers/mmc/host/dw_mmc.c
4419 ++++ b/drivers/mmc/host/dw_mmc.c
4420 +@@ -782,6 +782,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host,
4421 + int ret = 0;
4422 +
4423 + /* Set external dma config: burst size, burst width */
4424 ++ memset(&cfg, 0, sizeof(cfg));
4425 + cfg.dst_addr = host->phy_regs + fifo_offset;
4426 + cfg.src_addr = cfg.dst_addr;
4427 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4428 +diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
4429 +index bde2988875797..6c9d38132f74c 100644
4430 +--- a/drivers/mmc/host/moxart-mmc.c
4431 ++++ b/drivers/mmc/host/moxart-mmc.c
4432 +@@ -628,6 +628,7 @@ static int moxart_probe(struct platform_device *pdev)
4433 + host->dma_chan_tx, host->dma_chan_rx);
4434 + host->have_dma = true;
4435 +
4436 ++ memset(&cfg, 0, sizeof(cfg));
4437 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4438 + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4439 +
4440 +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
4441 +index 6b39126fbf06c..a1df6d4e9e86e 100644
4442 +--- a/drivers/mmc/host/sdhci.c
4443 ++++ b/drivers/mmc/host/sdhci.c
4444 +@@ -1222,6 +1222,7 @@ static int sdhci_external_dma_setup(struct sdhci_host *host,
4445 + if (!host->mapbase)
4446 + return -EINVAL;
4447 +
4448 ++ memset(&cfg, 0, sizeof(cfg));
4449 + cfg.src_addr = host->mapbase + SDHCI_BUFFER;
4450 + cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
4451 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
4452 +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
4453 +index 3ca6b394dd5f5..54b273d858612 100644
4454 +--- a/drivers/net/dsa/b53/b53_common.c
4455 ++++ b/drivers/net/dsa/b53/b53_common.c
4456 +@@ -1993,15 +1993,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
4457 + }
4458 + EXPORT_SYMBOL(b53_br_flags);
4459 +
4460 +-int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
4461 +- struct netlink_ext_ack *extack)
4462 +-{
4463 +- b53_port_set_mcast_flood(ds->priv, port, mrouter);
4464 +-
4465 +- return 0;
4466 +-}
4467 +-EXPORT_SYMBOL(b53_set_mrouter);
4468 +-
4469 + static bool b53_possible_cpu_port(struct dsa_switch *ds, int port)
4470 + {
4471 + /* Broadcom switches will accept enabling Broadcom tags on the
4472 +@@ -2245,7 +2236,6 @@ static const struct dsa_switch_ops b53_switch_ops = {
4473 + .port_bridge_leave = b53_br_leave,
4474 + .port_pre_bridge_flags = b53_br_flags_pre,
4475 + .port_bridge_flags = b53_br_flags,
4476 +- .port_set_mrouter = b53_set_mrouter,
4477 + .port_stp_state_set = b53_br_set_stp_state,
4478 + .port_fast_age = b53_br_fast_age,
4479 + .port_vlan_filtering = b53_vlan_filtering,
4480 +diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
4481 +index 82700a5714c10..9bf8319342b0b 100644
4482 +--- a/drivers/net/dsa/b53/b53_priv.h
4483 ++++ b/drivers/net/dsa/b53/b53_priv.h
4484 +@@ -328,8 +328,6 @@ int b53_br_flags_pre(struct dsa_switch *ds, int port,
4485 + int b53_br_flags(struct dsa_switch *ds, int port,
4486 + struct switchdev_brport_flags flags,
4487 + struct netlink_ext_ack *extack);
4488 +-int b53_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
4489 +- struct netlink_ext_ack *extack);
4490 + int b53_setup_devlink_resources(struct dsa_switch *ds);
4491 + void b53_port_event(struct dsa_switch *ds, int port);
4492 + void b53_phylink_validate(struct dsa_switch *ds, int port,
4493 +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
4494 +index 3b018fcf44124..6ce9ec1283e05 100644
4495 +--- a/drivers/net/dsa/bcm_sf2.c
4496 ++++ b/drivers/net/dsa/bcm_sf2.c
4497 +@@ -1199,7 +1199,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
4498 + .port_pre_bridge_flags = b53_br_flags_pre,
4499 + .port_bridge_flags = b53_br_flags,
4500 + .port_stp_state_set = b53_br_set_stp_state,
4501 +- .port_set_mrouter = b53_set_mrouter,
4502 + .port_fast_age = b53_br_fast_age,
4503 + .port_vlan_filtering = b53_vlan_filtering,
4504 + .port_vlan_add = b53_vlan_add,
4505 +diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
4506 +index 2b01efad1a51c..647f8e5c16da5 100644
4507 +--- a/drivers/net/dsa/mt7530.c
4508 ++++ b/drivers/net/dsa/mt7530.c
4509 +@@ -1172,18 +1172,6 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
4510 + return 0;
4511 + }
4512 +
4513 +-static int
4514 +-mt7530_port_set_mrouter(struct dsa_switch *ds, int port, bool mrouter,
4515 +- struct netlink_ext_ack *extack)
4516 +-{
4517 +- struct mt7530_priv *priv = ds->priv;
4518 +-
4519 +- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
4520 +- mrouter ? UNM_FFP(BIT(port)) : 0);
4521 +-
4522 +- return 0;
4523 +-}
4524 +-
4525 + static int
4526 + mt7530_port_bridge_join(struct dsa_switch *ds, int port,
4527 + struct net_device *bridge)
4528 +@@ -2847,7 +2835,6 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
4529 + .port_stp_state_set = mt7530_stp_state_set,
4530 + .port_pre_bridge_flags = mt7530_port_pre_bridge_flags,
4531 + .port_bridge_flags = mt7530_port_bridge_flags,
4532 +- .port_set_mrouter = mt7530_port_set_mrouter,
4533 + .port_bridge_join = mt7530_port_bridge_join,
4534 + .port_bridge_leave = mt7530_port_bridge_leave,
4535 + .port_fdb_add = mt7530_port_fdb_add,
4536 +diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
4537 +index 272b0535d9461..111a6d5985da6 100644
4538 +--- a/drivers/net/dsa/mv88e6xxx/chip.c
4539 ++++ b/drivers/net/dsa/mv88e6xxx/chip.c
4540 +@@ -5781,23 +5781,6 @@ out:
4541 + return err;
4542 + }
4543 +
4544 +-static int mv88e6xxx_port_set_mrouter(struct dsa_switch *ds, int port,
4545 +- bool mrouter,
4546 +- struct netlink_ext_ack *extack)
4547 +-{
4548 +- struct mv88e6xxx_chip *chip = ds->priv;
4549 +- int err;
4550 +-
4551 +- if (!chip->info->ops->port_set_mcast_flood)
4552 +- return -EOPNOTSUPP;
4553 +-
4554 +- mv88e6xxx_reg_lock(chip);
4555 +- err = chip->info->ops->port_set_mcast_flood(chip, port, mrouter);
4556 +- mv88e6xxx_reg_unlock(chip);
4557 +-
4558 +- return err;
4559 +-}
4560 +-
4561 + static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
4562 + struct net_device *lag,
4563 + struct netdev_lag_upper_info *info)
4564 +@@ -6099,7 +6082,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
4565 + .port_bridge_leave = mv88e6xxx_port_bridge_leave,
4566 + .port_pre_bridge_flags = mv88e6xxx_port_pre_bridge_flags,
4567 + .port_bridge_flags = mv88e6xxx_port_bridge_flags,
4568 +- .port_set_mrouter = mv88e6xxx_port_set_mrouter,
4569 + .port_stp_state_set = mv88e6xxx_port_stp_state_set,
4570 + .port_fast_age = mv88e6xxx_port_fast_age,
4571 + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
4572 +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
4573 +index 59253846e8858..f26d037356191 100644
4574 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
4575 ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
4576 +@@ -417,6 +417,9 @@ static int atl_resume_common(struct device *dev, bool deep)
4577 + pci_restore_state(pdev);
4578 +
4579 + if (deep) {
4580 ++ /* Reinitialize Nic/Vecs objects */
4581 ++ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
4582 ++
4583 + ret = aq_nic_init(nic);
4584 + if (ret)
4585 + goto err_exit;
4586 +diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
4587 +index 53864f2005994..b175f2b2f5bcf 100644
4588 +--- a/drivers/net/ethernet/google/gve/gve_adminq.c
4589 ++++ b/drivers/net/ethernet/google/gve/gve_adminq.c
4590 +@@ -233,7 +233,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
4591 + tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
4592 +
4593 + // Check if next command will overflow the buffer.
4594 +- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
4595 ++ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
4596 ++ (tail & priv->adminq_mask)) {
4597 + int err;
4598 +
4599 + // Flush existing commands to make room.
4600 +@@ -243,7 +244,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
4601 +
4602 + // Retry.
4603 + tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
4604 +- if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
4605 ++ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
4606 ++ (tail & priv->adminq_mask)) {
4607 + // This should never happen. We just flushed the
4608 + // command queue so there should be enough space.
4609 + return -ENOMEM;
4610 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4611 +index eff0a30790dd7..472f56b360b8c 100644
4612 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4613 ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4614 +@@ -1160,12 +1160,12 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf)
4615 + }
4616 +
4617 + /**
4618 +- * i40e_getnum_vf_vsi_vlan_filters
4619 ++ * __i40e_getnum_vf_vsi_vlan_filters
4620 + * @vsi: pointer to the vsi
4621 + *
4622 + * called to get the number of VLANs offloaded on this VF
4623 + **/
4624 +-static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
4625 ++static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
4626 + {
4627 + struct i40e_mac_filter *f;
4628 + u16 num_vlans = 0, bkt;
4629 +@@ -1178,6 +1178,23 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
4630 + return num_vlans;
4631 + }
4632 +
4633 ++/**
4634 ++ * i40e_getnum_vf_vsi_vlan_filters
4635 ++ * @vsi: pointer to the vsi
4636 ++ *
4637 ++ * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held
4638 ++ **/
4639 ++static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
4640 ++{
4641 ++ int num_vlans;
4642 ++
4643 ++ spin_lock_bh(&vsi->mac_filter_hash_lock);
4644 ++ num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
4645 ++ spin_unlock_bh(&vsi->mac_filter_hash_lock);
4646 ++
4647 ++ return num_vlans;
4648 ++}
4649 ++
4650 + /**
4651 + * i40e_get_vlan_list_sync
4652 + * @vsi: pointer to the VSI
4653 +@@ -1195,7 +1212,7 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans,
4654 + int bkt;
4655 +
4656 + spin_lock_bh(&vsi->mac_filter_hash_lock);
4657 +- *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi);
4658 ++ *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi);
4659 + *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC);
4660 + if (!(*vlan_list))
4661 + goto err;
4662 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
4663 +index a7f2f5c490e30..e16d20b77a3ff 100644
4664 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
4665 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
4666 +@@ -4911,6 +4911,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
4667 + struct ice_hw *hw = &pf->hw;
4668 + struct sockaddr *addr = pi;
4669 + enum ice_status status;
4670 ++ u8 old_mac[ETH_ALEN];
4671 + u8 flags = 0;
4672 + int err = 0;
4673 + u8 *mac;
4674 +@@ -4933,8 +4934,13 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
4675 + }
4676 +
4677 + netif_addr_lock_bh(netdev);
4678 ++ ether_addr_copy(old_mac, netdev->dev_addr);
4679 ++ /* change the netdev's MAC address */
4680 ++ memcpy(netdev->dev_addr, mac, netdev->addr_len);
4681 ++ netif_addr_unlock_bh(netdev);
4682 ++
4683 + /* Clean up old MAC filter. Not an error if old filter doesn't exist */
4684 +- status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI);
4685 ++ status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI);
4686 + if (status && status != ICE_ERR_DOES_NOT_EXIST) {
4687 + err = -EADDRNOTAVAIL;
4688 + goto err_update_filters;
4689 +@@ -4957,13 +4963,12 @@ err_update_filters:
4690 + if (err) {
4691 + netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
4692 + mac);
4693 ++ netif_addr_lock_bh(netdev);
4694 ++ ether_addr_copy(netdev->dev_addr, old_mac);
4695 + netif_addr_unlock_bh(netdev);
4696 + return err;
4697 + }
4698 +
4699 +- /* change the netdev's MAC address */
4700 +- memcpy(netdev->dev_addr, mac, netdev->addr_len);
4701 +- netif_addr_unlock_bh(netdev);
4702 + netdev_dbg(vsi->netdev, "updated MAC address to %pM\n",
4703 + netdev->dev_addr);
4704 +
4705 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
4706 +index e66109367487a..c1e11cb68d265 100644
4707 +--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
4708 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
4709 +@@ -195,8 +195,6 @@ enum nix_scheduler {
4710 + #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b))
4711 + #define NIX_CHAN_SDP_CH_START (0x700ull)
4712 +
4713 +-#define SDP_CHANNELS 256
4714 +-
4715 + /* NIX LSO format indices.
4716 + * As of now TSO is the only one using, so statically assigning indices.
4717 + */
4718 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
4719 +index 7d9e71c6965fb..7a2157709dde0 100644
4720 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
4721 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
4722 +@@ -12,9 +12,10 @@
4723 +
4724 + int rvu_set_channels_base(struct rvu *rvu)
4725 + {
4726 ++ u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans;
4727 ++ u16 sdp_chan_base, cgx_chan_base, cpt_chan_base;
4728 + struct rvu_hwinfo *hw = rvu->hw;
4729 +- u16 cpt_chan_base;
4730 +- u64 nix_const;
4731 ++ u64 nix_const, nix_const1;
4732 + int blkaddr;
4733 +
4734 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
4735 +@@ -22,6 +23,7 @@ int rvu_set_channels_base(struct rvu *rvu)
4736 + return blkaddr;
4737 +
4738 + nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4739 ++ nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4740 +
4741 + hw->cgx = (nix_const >> 12) & 0xFULL;
4742 + hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL;
4743 +@@ -44,14 +46,24 @@ int rvu_set_channels_base(struct rvu *rvu)
4744 + * channels such that all channel numbers are contiguous
4745 + * leaving no holes. This way the new CPT channels can be
4746 + * accomodated. The order of channel numbers assigned is
4747 +- * LBK, SDP, CGX and CPT.
4748 ++ * LBK, SDP, CGX and CPT. Also the base channel number
4749 ++ * of a block must be multiple of number of channels
4750 ++ * of the block.
4751 + */
4752 +- hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links *
4753 +- ((nix_const >> 16) & 0xFFULL);
4754 +- hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS;
4755 ++ nr_lbk_chans = (nix_const >> 16) & 0xFFULL;
4756 ++ nr_sdp_chans = nix_const1 & 0xFFFULL;
4757 ++ nr_cgx_chans = nix_const & 0xFFULL;
4758 ++ nr_cpt_chans = (nix_const >> 32) & 0xFFFULL;
4759 +
4760 +- cpt_chan_base = hw->cgx_chan_base + hw->cgx_links *
4761 +- (nix_const & 0xFFULL);
4762 ++ sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans;
4763 ++ /* Round up base channel to multiple of number of channels */
4764 ++ hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans);
4765 ++
4766 ++ cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans;
4767 ++ hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans);
4768 ++
4769 ++ cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans;
4770 ++ hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans);
4771 +
4772 + /* Out of 4096 channels start CPT from 2048 so
4773 + * that MSB for CPT channels is always set
4774 +@@ -155,6 +167,7 @@ err_put:
4775 +
4776 + static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
4777 + {
4778 ++ u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
4779 + u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
4780 + u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans;
4781 + struct rvu_hwinfo *hw = rvu->hw;
4782 +@@ -164,7 +177,7 @@ static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr)
4783 +
4784 + cgx_chans = nix_const & 0xFFULL;
4785 + lbk_chans = (nix_const >> 16) & 0xFFULL;
4786 +- sdp_chans = SDP_CHANNELS;
4787 ++ sdp_chans = nix_const1 & 0xFFFULL;
4788 + cpt_chans = (nix_const >> 32) & 0xFFFULL;
4789 +
4790 + start = hw->cgx_chan_base;
4791 +diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
4792 +index 0bc4529691ec9..d413078fc043b 100644
4793 +--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
4794 ++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
4795 +@@ -23,7 +23,7 @@
4796 + #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */
4797 +
4798 + #define NPC_PARSE_RESULT_DMAC_OFFSET 8
4799 +-#define NPC_HW_TSTAMP_OFFSET 8
4800 ++#define NPC_HW_TSTAMP_OFFSET 8ULL
4801 + #define NPC_KEX_CHAN_MASK 0xFFFULL
4802 + #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL
4803 +
4804 +@@ -823,7 +823,7 @@ void rvu_npc_enable_bcast_entry(struct rvu *rvu, u16 pcifunc, bool enable)
4805 + static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
4806 + int blkaddr, u16 pcifunc, u64 rx_action)
4807 + {
4808 +- int actindex, index, bank;
4809 ++ int actindex, index, bank, entry;
4810 + bool enable;
4811 +
4812 + if (!(pcifunc & RVU_PFVF_FUNC_MASK))
4813 +@@ -834,7 +834,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
4814 + if (mcam->entry2target_pffunc[index] == pcifunc) {
4815 + bank = npc_get_bank(mcam, index);
4816 + actindex = index;
4817 +- index &= (mcam->banksize - 1);
4818 ++ entry = index & (mcam->banksize - 1);
4819 +
4820 + /* read vf flow entry enable status */
4821 + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr,
4822 +@@ -844,7 +844,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
4823 + false);
4824 + /* update 'action' */
4825 + rvu_write64(rvu, blkaddr,
4826 +- NPC_AF_MCAMEX_BANKX_ACTION(index, bank),
4827 ++ NPC_AF_MCAMEX_BANKX_ACTION(entry, bank),
4828 + rx_action);
4829 + if (enable)
4830 + npc_enable_mcam_entry(rvu, mcam, blkaddr,
4831 +@@ -1619,14 +1619,15 @@ int rvu_npc_init(struct rvu *rvu)
4832 +
4833 + /* Enable below for Rx pkts.
4834 + * - Outer IPv4 header checksum validation.
4835 +- * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M].
4836 ++ * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B].
4837 ++ * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M].
4838 + * - Inner IPv4 header checksum validation.
4839 + * - Set non zero checksum error code value
4840 + */
4841 + rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG,
4842 + rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) |
4843 +- BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) |
4844 +- BIT_ULL(2) | BIT_ULL(1));
4845 ++ ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) |
4846 ++ BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1));
4847 +
4848 + rvu_npc_setup_interfaces(rvu, blkaddr);
4849 +
4850 +@@ -1751,7 +1752,7 @@ static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu,
4851 + int blkaddr, u16 entry, u16 cntr)
4852 + {
4853 + u16 index = entry & (mcam->banksize - 1);
4854 +- u16 bank = npc_get_bank(mcam, entry);
4855 ++ u32 bank = npc_get_bank(mcam, entry);
4856 +
4857 + /* Remove mapping and reduce counter's refcnt */
4858 + mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP;
4859 +@@ -2365,8 +2366,8 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu,
4860 + struct npc_mcam *mcam = &rvu->hw->mcam;
4861 + u16 pcifunc = req->hdr.pcifunc;
4862 + u16 old_entry, new_entry;
4863 ++ int blkaddr, rc = 0;
4864 + u16 index, cntr;
4865 +- int blkaddr, rc;
4866 +
4867 + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
4868 + if (blkaddr < 0)
4869 +@@ -2567,10 +2568,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu,
4870 + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry);
4871 + if (index >= mcam->bmap_entries)
4872 + break;
4873 ++ entry = index + 1;
4874 ++
4875 + if (mcam->entry2cntr_map[index] != req->cntr)
4876 + continue;
4877 +
4878 +- entry = index + 1;
4879 + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr,
4880 + index, req->cntr);
4881 + }
4882 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
4883 +index 16ba457197a2b..e0d1af9e7770d 100644
4884 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
4885 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
4886 +@@ -208,7 +208,8 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
4887 + if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
4888 + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4889 + /* update dmac field in vlan offload rule */
4890 +- if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
4891 ++ if (netif_running(netdev) &&
4892 ++ pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
4893 + otx2_install_rxvlan_offload_flow(pfvf);
4894 + } else {
4895 + return -EPERM;
4896 +@@ -265,6 +266,7 @@ unlock:
4897 + int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
4898 + {
4899 + struct otx2_rss_info *rss = &pfvf->hw.rss_info;
4900 ++ struct nix_rss_flowkey_cfg_rsp *rsp;
4901 + struct nix_rss_flowkey_cfg *req;
4902 + int err;
4903 +
4904 +@@ -279,6 +281,18 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf)
4905 + req->group = DEFAULT_RSS_CONTEXT_GROUP;
4906 +
4907 + err = otx2_sync_mbox_msg(&pfvf->mbox);
4908 ++ if (err)
4909 ++ goto fail;
4910 ++
4911 ++ rsp = (struct nix_rss_flowkey_cfg_rsp *)
4912 ++ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
4913 ++ if (IS_ERR(rsp)) {
4914 ++ err = PTR_ERR(rsp);
4915 ++ goto fail;
4916 ++ }
4917 ++
4918 ++ pfvf->hw.flowkey_alg_idx = rsp->alg_idx;
4919 ++fail:
4920 + mutex_unlock(&pfvf->mbox.lock);
4921 + return err;
4922 + }
4923 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
4924 +index 45730d0d92f2b..c652c27cd3455 100644
4925 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
4926 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
4927 +@@ -195,6 +195,9 @@ struct otx2_hw {
4928 + u8 lso_udpv4_idx;
4929 + u8 lso_udpv6_idx;
4930 +
4931 ++ /* RSS */
4932 ++ u8 flowkey_alg_idx;
4933 ++
4934 + /* MSI-X */
4935 + u8 cint_cnt; /* CQ interrupt count */
4936 + u16 npa_msixoff; /* Offset of NPA vectors */
4937 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
4938 +index 0b4fa92ba8214..81265dbf91e2a 100644
4939 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
4940 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
4941 +@@ -682,6 +682,7 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
4942 + if (flow->flow_spec.flow_type & FLOW_RSS) {
4943 + req->op = NIX_RX_ACTIONOP_RSS;
4944 + req->index = flow->rss_ctx_id;
4945 ++ req->flow_key_alg = pfvf->hw.flowkey_alg_idx;
4946 + } else {
4947 + req->op = NIX_RX_ACTIONOP_UCAST;
4948 + req->index = ethtool_get_flow_spec_ring(ring_cookie);
4949 +diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
4950 +index 51157b283f6f7..463d2368c1180 100644
4951 +--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
4952 ++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
4953 +@@ -385,8 +385,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic,
4954 + match.key->vlan_priority << 13;
4955 +
4956 + vlan_tci_mask = match.mask->vlan_id |
4957 +- match.key->vlan_dei << 12 |
4958 +- match.key->vlan_priority << 13;
4959 ++ match.mask->vlan_dei << 12 |
4960 ++ match.mask->vlan_priority << 13;
4961 +
4962 + flow_spec->vlan_tci = htons(vlan_tci);
4963 + flow_mask->vlan_tci = htons(vlan_tci_mask);
4964 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
4965 +index def2156e50eeb..20bb372662541 100644
4966 +--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
4967 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
4968 +@@ -397,7 +397,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev)
4969 + void mlx5_unregister_device(struct mlx5_core_dev *dev)
4970 + {
4971 + mutex_lock(&mlx5_intf_mutex);
4972 +- dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
4973 ++ dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
4974 + mlx5_rescan_drivers_locked(dev);
4975 + mutex_unlock(&mlx5_intf_mutex);
4976 + }
4977 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
4978 +index 44c458443428c..4794173f8fdbf 100644
4979 +--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
4980 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
4981 +@@ -664,6 +664,7 @@ params_reg_err:
4982 + void mlx5_devlink_unregister(struct devlink *devlink)
4983 + {
4984 + mlx5_devlink_traps_unregister(devlink);
4985 ++ devlink_params_unpublish(devlink);
4986 + devlink_params_unregister(devlink, mlx5_devlink_params,
4987 + ARRAY_SIZE(mlx5_devlink_params));
4988 + devlink_unregister(devlink);
4989 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
4990 +index 1d5ce07b83f45..43b092f5565af 100644
4991 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
4992 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
4993 +@@ -248,18 +248,12 @@ struct ttc_params {
4994 +
4995 + void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
4996 + void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
4997 +-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
4998 +
4999 + int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
5000 + struct mlx5e_ttc_table *ttc);
5001 + void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
5002 + struct mlx5e_ttc_table *ttc);
5003 +
5004 +-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
5005 +- struct mlx5e_ttc_table *ttc);
5006 +-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
5007 +- struct mlx5e_ttc_table *ttc);
5008 +-
5009 + void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
5010 + int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type,
5011 + struct mlx5_flow_destination *new_dest);
5012 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
5013 +index 5efe3278b0f64..1fd8baf198296 100644
5014 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
5015 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
5016 +@@ -733,8 +733,8 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid)
5017 + spin_unlock_bh(qdisc_lock(qdisc));
5018 + }
5019 +
5020 +-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
5021 +- u16 *new_qid, struct netlink_ext_ack *extack)
5022 ++int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
5023 ++ struct netlink_ext_ack *extack)
5024 + {
5025 + struct mlx5e_qos_node *node;
5026 + struct netdev_queue *txq;
5027 +@@ -742,11 +742,9 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
5028 + bool opened;
5029 + int err;
5030 +
5031 +- qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid);
5032 +-
5033 +- *old_qid = *new_qid = 0;
5034 ++ qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
5035 +
5036 +- node = mlx5e_sw_node_find(priv, classid);
5037 ++ node = mlx5e_sw_node_find(priv, *classid);
5038 + if (!node)
5039 + return -ENOENT;
5040 +
5041 +@@ -764,7 +762,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
5042 + err = mlx5_qos_destroy_node(priv->mdev, node->hw_id);
5043 + if (err) /* Not fatal. */
5044 + qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n",
5045 +- node->hw_id, classid, err);
5046 ++ node->hw_id, *classid, err);
5047 +
5048 + mlx5e_sw_node_delete(priv, node);
5049 +
5050 +@@ -826,8 +824,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
5051 + if (opened)
5052 + mlx5e_reactivate_qos_sq(priv, moved_qid, txq);
5053 +
5054 +- *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid);
5055 +- *new_qid = mlx5e_qid_from_qos(&priv->channels, qid);
5056 ++ *classid = node->classid;
5057 + return 0;
5058 + }
5059 +
5060 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
5061 +index 5af7991fcd194..757682b7c0e04 100644
5062 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
5063 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
5064 +@@ -34,8 +34,8 @@ int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid,
5065 + struct netlink_ext_ack *extack);
5066 + int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid,
5067 + u64 rate, u64 ceil, struct netlink_ext_ack *extack);
5068 +-int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid,
5069 +- u16 *new_qid, struct netlink_ext_ack *extack);
5070 ++int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid,
5071 ++ struct netlink_ext_ack *extack);
5072 + int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
5073 + struct netlink_ext_ack *extack);
5074 + int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
5075 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
5076 +index 490131e06efb2..aa4dc7d624f8e 100644
5077 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
5078 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
5079 +@@ -143,7 +143,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
5080 + mlx5e_rep_queue_neigh_stats_work(priv);
5081 +
5082 + list_for_each_entry(flow, flow_list, tmp_list) {
5083 +- if (!mlx5e_is_offloaded_flow(flow))
5084 ++ if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
5085 + continue;
5086 + attr = flow->attr;
5087 + esw_attr = attr->esw_attr;
5088 +@@ -184,7 +184,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
5089 + int err;
5090 +
5091 + list_for_each_entry(flow, flow_list, tmp_list) {
5092 +- if (!mlx5e_is_offloaded_flow(flow))
5093 ++ if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
5094 + continue;
5095 + attr = flow->attr;
5096 + esw_attr = attr->esw_attr;
5097 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
5098 +index 0b75fab41ae8f..6464ac3f294e7 100644
5099 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
5100 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
5101 +@@ -1324,7 +1324,7 @@ void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
5102 + ttc_params->inner_ttc = &priv->fs.inner_ttc;
5103 + }
5104 +
5105 +-void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
5106 ++static void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
5107 + {
5108 + struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
5109 +
5110 +@@ -1343,8 +1343,8 @@ void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
5111 + ft_attr->prio = MLX5E_NIC_PRIO;
5112 + }
5113 +
5114 +-int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
5115 +- struct mlx5e_ttc_table *ttc)
5116 ++static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
5117 ++ struct mlx5e_ttc_table *ttc)
5118 + {
5119 + struct mlx5e_flow_table *ft = &ttc->ft;
5120 + int err;
5121 +@@ -1374,8 +1374,8 @@ err:
5122 + return err;
5123 + }
5124 +
5125 +-void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
5126 +- struct mlx5e_ttc_table *ttc)
5127 ++static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
5128 ++ struct mlx5e_ttc_table *ttc)
5129 + {
5130 + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
5131 + return;
5132 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5133 +index 779a4abead01b..814ff51db1a5b 100644
5134 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5135 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
5136 +@@ -2563,6 +2563,14 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
5137 + err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in);
5138 + if (err)
5139 + goto free_in;
5140 ++
5141 ++ /* Verify inner tirs resources allocated */
5142 ++ if (!priv->inner_indir_tir[0].tirn)
5143 ++ continue;
5144 ++
5145 ++ err = mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in);
5146 ++ if (err)
5147 ++ goto free_in;
5148 + }
5149 +
5150 + for (ix = 0; ix < priv->max_nch; ix++) {
5151 +@@ -3439,8 +3447,7 @@ static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offloa
5152 + return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid,
5153 + htb->rate, htb->ceil, htb->extack);
5154 + case TC_HTB_LEAF_DEL:
5155 +- return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid,
5156 +- htb->extack);
5157 ++ return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack);
5158 + case TC_HTB_LEAF_DEL_LAST:
5159 + case TC_HTB_LEAF_DEL_LAST_FORCE:
5160 + return mlx5e_htb_leaf_del_last(priv, htb->classid,
5161 +@@ -4806,7 +4813,14 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
5162 + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX;
5163 + netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX;
5164 +
5165 ++ /* Tunneled LRO is not supported in the driver, and the same RQs are
5166 ++ * shared between inner and outer TIRs, so the driver can't disable LRO
5167 ++ * for inner TIRs while having it enabled for outer TIRs. Due to this,
5168 ++ * block LRO altogether if the firmware declares tunneled LRO support.
5169 ++ */
5170 + if (!!MLX5_CAP_ETH(mdev, lro_cap) &&
5171 ++ !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) &&
5172 ++ !MLX5_CAP_ETH(mdev, tunnel_lro_gre) &&
5173 + mlx5e_check_fragmented_striding_rq_cap(mdev))
5174 + netdev->vlan_features |= NETIF_F_LRO;
5175 +
5176 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5177 +index 47bd20ad81080..ced6ff0bc9160 100644
5178 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5179 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
5180 +@@ -1310,6 +1310,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
5181 + int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
5182 + {
5183 + struct mlx5e_priv *out_priv, *route_priv;
5184 ++ struct mlx5_devcom *devcom = NULL;
5185 + struct mlx5_core_dev *route_mdev;
5186 + struct mlx5_eswitch *esw;
5187 + u16 vhca_id;
5188 +@@ -1321,7 +1322,24 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
5189 + route_mdev = route_priv->mdev;
5190 +
5191 + vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
5192 ++ if (mlx5_lag_is_active(out_priv->mdev)) {
5193 ++ /* In lag case we may get devices from different eswitch instances.
5194 ++ * If we failed to get vport num, it means, mostly, that we on the wrong
5195 ++ * eswitch.
5196 ++ */
5197 ++ err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
5198 ++ if (err != -ENOENT)
5199 ++ return err;
5200 ++
5201 ++ devcom = out_priv->mdev->priv.devcom;
5202 ++ esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
5203 ++ if (!esw)
5204 ++ return -ENODEV;
5205 ++ }
5206 ++
5207 + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
5208 ++ if (devcom)
5209 ++ mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
5210 + return err;
5211 + }
5212 +
5213 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
5214 +index 3da7becc1069f..425c91814b34f 100644
5215 +--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
5216 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c
5217 +@@ -364,6 +364,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
5218 + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
5219 + dest.vport.num = e->vport;
5220 + dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
5221 ++ dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
5222 + e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1);
5223 + if (IS_ERR(e->fwd_rule)) {
5224 + mlx5_destroy_flow_group(e->fwd_grp);
5225 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
5226 +index d0e4daa55a4a1..c6d3348d759e3 100644
5227 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
5228 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
5229 +@@ -3074,8 +3074,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
5230 +
5231 + switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
5232 + case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
5233 +- if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
5234 ++ if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
5235 ++ err = 0;
5236 + goto out;
5237 ++ }
5238 ++
5239 + fallthrough;
5240 + case MLX5_CAP_INLINE_MODE_L2:
5241 + NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
5242 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
5243 +index 7d7ed025db0da..620d638e1e8ff 100644
5244 +--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
5245 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
5246 +@@ -331,17 +331,6 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
5247 + }
5248 +
5249 + mlx5e_set_ttc_basic_params(priv, &ttc_params);
5250 +- mlx5e_set_inner_ttc_ft_params(&ttc_params);
5251 +- for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
5252 +- ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
5253 +-
5254 +- err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
5255 +- if (err) {
5256 +- netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
5257 +- err);
5258 +- goto err_destroy_arfs_tables;
5259 +- }
5260 +-
5261 + mlx5e_set_ttc_ft_params(&ttc_params);
5262 + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
5263 + ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
5264 +@@ -350,13 +339,11 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
5265 + if (err) {
5266 + netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
5267 + err);
5268 +- goto err_destroy_inner_ttc_table;
5269 ++ goto err_destroy_arfs_tables;
5270 + }
5271 +
5272 + return 0;
5273 +
5274 +-err_destroy_inner_ttc_table:
5275 +- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
5276 + err_destroy_arfs_tables:
5277 + mlx5e_arfs_destroy_tables(priv);
5278 +
5279 +@@ -366,7 +353,6 @@ err_destroy_arfs_tables:
5280 + static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
5281 + {
5282 + mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
5283 +- mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
5284 + mlx5e_arfs_destroy_tables(priv);
5285 + }
5286 +
5287 +@@ -392,7 +378,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
5288 + if (err)
5289 + goto err_destroy_indirect_rqts;
5290 +
5291 +- err = mlx5e_create_indirect_tirs(priv, true);
5292 ++ err = mlx5e_create_indirect_tirs(priv, false);
5293 + if (err)
5294 + goto err_destroy_direct_rqts;
5295 +
5296 +diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
5297 +index b41301a5b0df8..cd520e4c5522f 100644
5298 +--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
5299 ++++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
5300 +@@ -91,20 +91,20 @@ int ionic_devlink_register(struct ionic *ionic)
5301 + attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
5302 + devlink_port_attrs_set(&ionic->dl_port, &attrs);
5303 + err = devlink_port_register(dl, &ionic->dl_port, 0);
5304 +- if (err)
5305 ++ if (err) {
5306 + dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
5307 +- else
5308 +- devlink_port_type_eth_set(&ionic->dl_port,
5309 +- ionic->lif->netdev);
5310 ++ devlink_unregister(dl);
5311 ++ return err;
5312 ++ }
5313 +
5314 +- return err;
5315 ++ devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
5316 ++ return 0;
5317 + }
5318 +
5319 + void ionic_devlink_unregister(struct ionic *ionic)
5320 + {
5321 + struct devlink *dl = priv_to_devlink(ionic);
5322 +
5323 +- if (ionic->dl_port.registered)
5324 +- devlink_port_unregister(&ionic->dl_port);
5325 ++ devlink_port_unregister(&ionic->dl_port);
5326 + devlink_unregister(dl);
5327 + }
5328 +diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
5329 +index ab9b02574a152..38018f0248239 100644
5330 +--- a/drivers/net/ethernet/qualcomm/qca_spi.c
5331 ++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
5332 +@@ -434,7 +434,7 @@ qcaspi_receive(struct qcaspi *qca)
5333 + skb_put(qca->rx_skb, retcode);
5334 + qca->rx_skb->protocol = eth_type_trans(
5335 + qca->rx_skb, qca->rx_skb->dev);
5336 +- qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
5337 ++ skb_checksum_none_assert(qca->rx_skb);
5338 + netif_rx_ni(qca->rx_skb);
5339 + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
5340 + net_dev->mtu + VLAN_ETH_HLEN);
5341 +diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
5342 +index bcdeca7b33664..ce3f7ce31adc1 100644
5343 +--- a/drivers/net/ethernet/qualcomm/qca_uart.c
5344 ++++ b/drivers/net/ethernet/qualcomm/qca_uart.c
5345 +@@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
5346 + skb_put(qca->rx_skb, retcode);
5347 + qca->rx_skb->protocol = eth_type_trans(
5348 + qca->rx_skb, qca->rx_skb->dev);
5349 +- qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
5350 ++ skb_checksum_none_assert(qca->rx_skb);
5351 + netif_rx_ni(qca->rx_skb);
5352 + qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
5353 + netdev->mtu +
5354 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
5355 +index e632702675787..f83db62938dd1 100644
5356 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
5357 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
5358 +@@ -172,11 +172,12 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
5359 + x->rx_normal_irq_n++;
5360 + ret |= handle_rx;
5361 + }
5362 +- if (likely(intr_status & (DMA_CHAN_STATUS_TI |
5363 +- DMA_CHAN_STATUS_TBU))) {
5364 ++ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
5365 + x->tx_normal_irq_n++;
5366 + ret |= handle_tx;
5367 + }
5368 ++ if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
5369 ++ ret |= handle_tx;
5370 + if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
5371 + x->rx_early_irq++;
5372 +
5373 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
5374 +index 67a08cbba859d..e967cd1ade36b 100644
5375 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
5376 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
5377 +@@ -518,6 +518,10 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common,
5378 + }
5379 +
5380 + napi_enable(&common->napi_rx);
5381 ++ if (common->rx_irq_disabled) {
5382 ++ common->rx_irq_disabled = false;
5383 ++ enable_irq(common->rx_chns.irq);
5384 ++ }
5385 +
5386 + dev_dbg(common->dev, "cpsw_nuss started\n");
5387 + return 0;
5388 +@@ -871,8 +875,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
5389 +
5390 + dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
5391 +
5392 +- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
5393 +- enable_irq(common->rx_chns.irq);
5394 ++ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
5395 ++ if (common->rx_irq_disabled) {
5396 ++ common->rx_irq_disabled = false;
5397 ++ enable_irq(common->rx_chns.irq);
5398 ++ }
5399 ++ }
5400 +
5401 + return num_rx;
5402 + }
5403 +@@ -1090,6 +1098,7 @@ static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
5404 + {
5405 + struct am65_cpsw_common *common = dev_id;
5406 +
5407 ++ common->rx_irq_disabled = true;
5408 + disable_irq_nosync(irq);
5409 + napi_schedule(&common->napi_rx);
5410 +
5411 +@@ -2388,21 +2397,6 @@ static const struct devlink_param am65_cpsw_devlink_params[] = {
5412 + am65_cpsw_dl_switch_mode_set, NULL),
5413 + };
5414 +
5415 +-static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common)
5416 +-{
5417 +- struct devlink_port *dl_port;
5418 +- struct am65_cpsw_port *port;
5419 +- int i;
5420 +-
5421 +- for (i = 1; i <= common->port_num; i++) {
5422 +- port = am65_common_get_port(common, i);
5423 +- dl_port = &port->devlink_port;
5424 +-
5425 +- if (dl_port->registered)
5426 +- devlink_port_unregister(dl_port);
5427 +- }
5428 +-}
5429 +-
5430 + static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
5431 + {
5432 + struct devlink_port_attrs attrs = {};
5433 +@@ -2464,7 +2458,12 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
5434 + return ret;
5435 +
5436 + dl_port_unreg:
5437 +- am65_cpsw_unregister_devlink_ports(common);
5438 ++ for (i = i - 1; i >= 1; i--) {
5439 ++ port = am65_common_get_port(common, i);
5440 ++ dl_port = &port->devlink_port;
5441 ++
5442 ++ devlink_port_unregister(dl_port);
5443 ++ }
5444 + dl_unreg:
5445 + devlink_unregister(common->devlink);
5446 + dl_free:
5447 +@@ -2475,6 +2474,17 @@ dl_free:
5448 +
5449 + static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
5450 + {
5451 ++ struct devlink_port *dl_port;
5452 ++ struct am65_cpsw_port *port;
5453 ++ int i;
5454 ++
5455 ++ for (i = 1; i <= common->port_num; i++) {
5456 ++ port = am65_common_get_port(common, i);
5457 ++ dl_port = &port->devlink_port;
5458 ++
5459 ++ devlink_port_unregister(dl_port);
5460 ++ }
5461 ++
5462 + if (!AM65_CPSW_IS_CPSW2G(common) &&
5463 + IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
5464 + devlink_params_unpublish(common->devlink);
5465 +@@ -2482,7 +2492,6 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
5466 + ARRAY_SIZE(am65_cpsw_devlink_params));
5467 + }
5468 +
5469 +- am65_cpsw_unregister_devlink_ports(common);
5470 + devlink_unregister(common->devlink);
5471 + devlink_free(common->devlink);
5472 + }
5473 +diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
5474 +index 5d93e346f05eb..048ed10143c17 100644
5475 +--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
5476 ++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
5477 +@@ -126,6 +126,8 @@ struct am65_cpsw_common {
5478 + struct am65_cpsw_rx_chn rx_chns;
5479 + struct napi_struct napi_rx;
5480 +
5481 ++ bool rx_irq_disabled;
5482 ++
5483 + u32 nuss_ver;
5484 + u32 cpsw_ver;
5485 + unsigned long bus_freq;
5486 +diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
5487 +index 53a433442803a..f4d758f8a1ee1 100644
5488 +--- a/drivers/net/phy/marvell10g.c
5489 ++++ b/drivers/net/phy/marvell10g.c
5490 +@@ -987,11 +987,19 @@ static int mv3310_get_number_of_ports(struct phy_device *phydev)
5491 +
5492 + static int mv3310_match_phy_device(struct phy_device *phydev)
5493 + {
5494 ++ if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
5495 ++ MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
5496 ++ return 0;
5497 ++
5498 + return mv3310_get_number_of_ports(phydev) == 1;
5499 + }
5500 +
5501 + static int mv3340_match_phy_device(struct phy_device *phydev)
5502 + {
5503 ++ if ((phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] &
5504 ++ MARVELL_PHY_ID_MASK) != MARVELL_PHY_ID_88X3310)
5505 ++ return 0;
5506 ++
5507 + return mv3310_get_number_of_ports(phydev) == 4;
5508 + }
5509 +
5510 +diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
5511 +index b137e7f343979..bd1ef63349978 100644
5512 +--- a/drivers/net/wireless/ath/ath6kl/wmi.c
5513 ++++ b/drivers/net/wireless/ath/ath6kl/wmi.c
5514 +@@ -2504,8 +2504,10 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
5515 + goto free_data_skb;
5516 +
5517 + for (index = 0; index < num_pri_streams; index++) {
5518 +- if (WARN_ON(!data_sync_bufs[index].skb))
5519 ++ if (WARN_ON(!data_sync_bufs[index].skb)) {
5520 ++ ret = -ENOMEM;
5521 + goto free_data_skb;
5522 ++ }
5523 +
5524 + ep_id = ath6kl_ac2_endpoint_id(wmi->parent_dev,
5525 + data_sync_bufs[index].
5526 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
5527 +index 143a705b5cb3a..4800e19bdcc39 100644
5528 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
5529 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
5530 +@@ -2075,7 +2075,7 @@ cleanup:
5531 +
5532 + err = brcmf_pcie_probe(pdev, NULL);
5533 + if (err)
5534 +- brcmf_err(bus, "probe after resume failed, err=%d\n", err);
5535 ++ __brcmf_err(NULL, __func__, "probe after resume failed, err=%d\n", err);
5536 +
5537 + return err;
5538 + }
5539 +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
5540 +index e31bba836c6f7..dfa4047f97a03 100644
5541 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
5542 ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
5543 +@@ -243,7 +243,7 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
5544 + goto out_free;
5545 + }
5546 +
5547 +- enabled = !!wifi_pkg->package.elements[0].integer.value;
5548 ++ enabled = !!wifi_pkg->package.elements[1].integer.value;
5549 +
5550 + if (!enabled) {
5551 + *block_list_size = -1;
5552 +@@ -252,15 +252,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
5553 + goto out_free;
5554 + }
5555 +
5556 +- if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
5557 +- wifi_pkg->package.elements[1].integer.value >
5558 ++ if (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER ||
5559 ++ wifi_pkg->package.elements[2].integer.value >
5560 + APCI_WTAS_BLACK_LIST_MAX) {
5561 + IWL_DEBUG_RADIO(fwrt, "TAS invalid array size %llu\n",
5562 + wifi_pkg->package.elements[1].integer.value);
5563 + ret = -EINVAL;
5564 + goto out_free;
5565 + }
5566 +- *block_list_size = wifi_pkg->package.elements[1].integer.value;
5567 ++ *block_list_size = wifi_pkg->package.elements[2].integer.value;
5568 +
5569 + IWL_DEBUG_RADIO(fwrt, "TAS array size %d\n", *block_list_size);
5570 + if (*block_list_size > APCI_WTAS_BLACK_LIST_MAX) {
5571 +@@ -273,15 +273,15 @@ int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
5572 + for (i = 0; i < *block_list_size; i++) {
5573 + u32 country;
5574 +
5575 +- if (wifi_pkg->package.elements[2 + i].type !=
5576 ++ if (wifi_pkg->package.elements[3 + i].type !=
5577 + ACPI_TYPE_INTEGER) {
5578 + IWL_DEBUG_RADIO(fwrt,
5579 +- "TAS invalid array elem %d\n", 2 + i);
5580 ++ "TAS invalid array elem %d\n", 3 + i);
5581 + ret = -EINVAL;
5582 + goto out_free;
5583 + }
5584 +
5585 +- country = wifi_pkg->package.elements[2 + i].integer.value;
5586 ++ country = wifi_pkg->package.elements[3 + i].integer.value;
5587 + block_list_array[i] = cpu_to_le32(country);
5588 + IWL_DEBUG_RADIO(fwrt, "TAS block list country %d\n", country);
5589 + }
5590 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
5591 +index 9f11a1d5d0346..81e881da7f15d 100644
5592 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
5593 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
5594 +@@ -556,6 +556,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
5595 + IWL_DEV_INFO(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0, NULL),
5596 + IWL_DEV_INFO(0xA0F0, 0x2074, iwl_ax201_cfg_qu_hr, NULL),
5597 + IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
5598 ++ IWL_DEV_INFO(0xA0F0, 0x6074, iwl_ax201_cfg_qu_hr, NULL),
5599 + IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
5600 + IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
5601 + IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
5602 +diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
5603 +index 99b21a2c83861..f4a26f16f00f4 100644
5604 +--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
5605 ++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
5606 +@@ -1038,8 +1038,10 @@ static int rsi_load_9116_firmware(struct rsi_hw *adapter)
5607 + }
5608 +
5609 + ta_firmware = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
5610 +- if (!ta_firmware)
5611 ++ if (!ta_firmware) {
5612 ++ status = -ENOMEM;
5613 + goto fail_release_fw;
5614 ++ }
5615 + fw_p = ta_firmware;
5616 + instructions_sz = fw_entry->size;
5617 + rsi_dbg(INFO_ZONE, "FW Length = %d bytes\n", instructions_sz);
5618 +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
5619 +index 3fbe2a3c14550..416976f098882 100644
5620 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
5621 ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
5622 +@@ -816,6 +816,7 @@ static int rsi_probe(struct usb_interface *pfunction,
5623 + } else {
5624 + rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
5625 + __func__, id->idProduct);
5626 ++ status = -ENODEV;
5627 + goto err1;
5628 + }
5629 +
5630 +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
5631 +index 4697a94c09459..f80682f7df54d 100644
5632 +--- a/drivers/nvme/host/rdma.c
5633 ++++ b/drivers/nvme/host/rdma.c
5634 +@@ -736,13 +736,13 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
5635 + if (ret)
5636 + return ret;
5637 +
5638 +- ctrl->ctrl.queue_count = nr_io_queues + 1;
5639 +- if (ctrl->ctrl.queue_count < 2) {
5640 ++ if (nr_io_queues == 0) {
5641 + dev_err(ctrl->ctrl.device,
5642 + "unable to set any I/O queues\n");
5643 + return -ENOMEM;
5644 + }
5645 +
5646 ++ ctrl->ctrl.queue_count = nr_io_queues + 1;
5647 + dev_info(ctrl->ctrl.device,
5648 + "creating %d I/O queues.\n", nr_io_queues);
5649 +
5650 +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
5651 +index 79a463090dd30..ab1ea5b0888ea 100644
5652 +--- a/drivers/nvme/host/tcp.c
5653 ++++ b/drivers/nvme/host/tcp.c
5654 +@@ -1755,13 +1755,13 @@ static int nvme_tcp_alloc_io_queues(struct nvme_ctrl *ctrl)
5655 + if (ret)
5656 + return ret;
5657 +
5658 +- ctrl->queue_count = nr_io_queues + 1;
5659 +- if (ctrl->queue_count < 2) {
5660 ++ if (nr_io_queues == 0) {
5661 + dev_err(ctrl->device,
5662 + "unable to set any I/O queues\n");
5663 + return -ENOMEM;
5664 + }
5665 +
5666 ++ ctrl->queue_count = nr_io_queues + 1;
5667 + dev_info(ctrl->device,
5668 + "creating %d I/O queues.\n", nr_io_queues);
5669 +
5670 +diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
5671 +index 7d0f3523fdab2..8ef564c3b32c8 100644
5672 +--- a/drivers/nvme/target/fabrics-cmd.c
5673 ++++ b/drivers/nvme/target/fabrics-cmd.c
5674 +@@ -120,6 +120,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
5675 + if (!sqsize) {
5676 + pr_warn("queue size zero!\n");
5677 + req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
5678 ++ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
5679 + ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
5680 + goto err;
5681 + }
5682 +@@ -260,11 +261,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
5683 + }
5684 +
5685 + status = nvmet_install_queue(ctrl, req);
5686 +- if (status) {
5687 +- /* pass back cntlid that had the issue of installing queue */
5688 +- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
5689 ++ if (status)
5690 + goto out_ctrl_put;
5691 +- }
5692 ++
5693 ++ /* pass back cntlid for successful completion */
5694 ++ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
5695 +
5696 + pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
5697 +
5698 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
5699 +index 8d4ebe095d0c8..a9d0530b7846d 100644
5700 +--- a/drivers/pci/pci.c
5701 ++++ b/drivers/pci/pci.c
5702 +@@ -2495,7 +2495,14 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
5703 + if (enable) {
5704 + int error;
5705 +
5706 +- if (pci_pme_capable(dev, state))
5707 ++ /*
5708 ++ * Enable PME signaling if the device can signal PME from
5709 ++ * D3cold regardless of whether or not it can signal PME from
5710 ++ * the current target state, because that will allow it to
5711 ++ * signal PME when the hierarchy above it goes into D3cold and
5712 ++ * the device itself ends up in D3cold as a result of that.
5713 ++ */
5714 ++ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
5715 + pci_pme_active(dev, true);
5716 + else
5717 + ret = 1;
5718 +@@ -2599,16 +2606,20 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
5719 + if (dev->current_state == PCI_D3cold)
5720 + target_state = PCI_D3cold;
5721 +
5722 +- if (wakeup) {
5723 ++ if (wakeup && dev->pme_support) {
5724 ++ pci_power_t state = target_state;
5725 ++
5726 + /*
5727 + * Find the deepest state from which the device can generate
5728 + * PME#.
5729 + */
5730 +- if (dev->pme_support) {
5731 +- while (target_state
5732 +- && !(dev->pme_support & (1 << target_state)))
5733 +- target_state--;
5734 +- }
5735 ++ while (state && !(dev->pme_support & (1 << state)))
5736 ++ state--;
5737 ++
5738 ++ if (state)
5739 ++ return state;
5740 ++ else if (dev->pme_support & 1)
5741 ++ return PCI_D0;
5742 + }
5743 +
5744 + return target_state;
5745 +diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c
5746 +index 37af0e216bc3a..225adaffaa283 100644
5747 +--- a/drivers/power/supply/axp288_fuel_gauge.c
5748 ++++ b/drivers/power/supply/axp288_fuel_gauge.c
5749 +@@ -149,7 +149,7 @@ static int fuel_gauge_reg_readb(struct axp288_fg_info *info, int reg)
5750 + }
5751 +
5752 + if (ret < 0) {
5753 +- dev_err(&info->pdev->dev, "axp288 reg read err:%d\n", ret);
5754 ++ dev_err(&info->pdev->dev, "Error reading reg 0x%02x err: %d\n", reg, ret);
5755 + return ret;
5756 + }
5757 +
5758 +@@ -163,7 +163,7 @@ static int fuel_gauge_reg_writeb(struct axp288_fg_info *info, int reg, u8 val)
5759 + ret = regmap_write(info->regmap, reg, (unsigned int)val);
5760 +
5761 + if (ret < 0)
5762 +- dev_err(&info->pdev->dev, "axp288 reg write err:%d\n", ret);
5763 ++ dev_err(&info->pdev->dev, "Error writing reg 0x%02x err: %d\n", reg, ret);
5764 +
5765 + return ret;
5766 + }
5767 +diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
5768 +index d110597746b0a..091868e9e9e82 100644
5769 +--- a/drivers/power/supply/cw2015_battery.c
5770 ++++ b/drivers/power/supply/cw2015_battery.c
5771 +@@ -679,7 +679,9 @@ static int cw_bat_probe(struct i2c_client *client)
5772 + &cw2015_bat_desc,
5773 + &psy_cfg);
5774 + if (IS_ERR(cw_bat->rk_bat)) {
5775 +- dev_err(cw_bat->dev, "Failed to register power supply\n");
5776 ++ /* try again if this happens */
5777 ++ dev_err_probe(&client->dev, PTR_ERR(cw_bat->rk_bat),
5778 ++ "Failed to register power supply\n");
5779 + return PTR_ERR(cw_bat->rk_bat);
5780 + }
5781 +
5782 +diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
5783 +index ce2041b30a066..215e77d3b6d93 100644
5784 +--- a/drivers/power/supply/max17042_battery.c
5785 ++++ b/drivers/power/supply/max17042_battery.c
5786 +@@ -748,7 +748,7 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
5787 + struct max17042_config_data *config = chip->pdata->config_data;
5788 +
5789 + max17042_override_por(map, MAX17042_TGAIN, config->tgain);
5790 +- max17042_override_por(map, MAx17042_TOFF, config->toff);
5791 ++ max17042_override_por(map, MAX17042_TOFF, config->toff);
5792 + max17042_override_por(map, MAX17042_CGAIN, config->cgain);
5793 + max17042_override_por(map, MAX17042_COFF, config->coff);
5794 +
5795 +diff --git a/drivers/power/supply/smb347-charger.c b/drivers/power/supply/smb347-charger.c
5796 +index 3376f42d46c3d..25d239f2330e1 100644
5797 +--- a/drivers/power/supply/smb347-charger.c
5798 ++++ b/drivers/power/supply/smb347-charger.c
5799 +@@ -56,6 +56,7 @@
5800 + #define CFG_PIN_EN_CTRL_ACTIVE_LOW 0x60
5801 + #define CFG_PIN_EN_APSD_IRQ BIT(1)
5802 + #define CFG_PIN_EN_CHARGER_ERROR BIT(2)
5803 ++#define CFG_PIN_EN_CTRL BIT(4)
5804 + #define CFG_THERM 0x07
5805 + #define CFG_THERM_SOFT_HOT_COMPENSATION_MASK 0x03
5806 + #define CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT 0
5807 +@@ -725,6 +726,15 @@ static int smb347_hw_init(struct smb347_charger *smb)
5808 + if (ret < 0)
5809 + goto fail;
5810 +
5811 ++ /* Activate pin control, making it writable. */
5812 ++ switch (smb->enable_control) {
5813 ++ case SMB3XX_CHG_ENABLE_PIN_ACTIVE_LOW:
5814 ++ case SMB3XX_CHG_ENABLE_PIN_ACTIVE_HIGH:
5815 ++ ret = regmap_set_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL);
5816 ++ if (ret < 0)
5817 ++ goto fail;
5818 ++ }
5819 ++
5820 + /*
5821 + * Make the charging functionality controllable by a write to the
5822 + * command register unless pin control is specified in the platform
5823 +diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
5824 +index 1d5b0a1b86f78..06cbe60c990f9 100644
5825 +--- a/drivers/regulator/tps65910-regulator.c
5826 ++++ b/drivers/regulator/tps65910-regulator.c
5827 +@@ -1211,12 +1211,10 @@ static int tps65910_probe(struct platform_device *pdev)
5828 +
5829 + rdev = devm_regulator_register(&pdev->dev, &pmic->desc[i],
5830 + &config);
5831 +- if (IS_ERR(rdev)) {
5832 +- dev_err(tps65910->dev,
5833 +- "failed to register %s regulator\n",
5834 +- pdev->name);
5835 +- return PTR_ERR(rdev);
5836 +- }
5837 ++ if (IS_ERR(rdev))
5838 ++ return dev_err_probe(tps65910->dev, PTR_ERR(rdev),
5839 ++ "failed to register %s regulator\n",
5840 ++ pdev->name);
5841 +
5842 + /* Save regulator for cleanup */
5843 + pmic->rdev[i] = rdev;
5844 +diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c
5845 +index cbadb1c996790..d2a37978fc3a8 100644
5846 +--- a/drivers/regulator/vctrl-regulator.c
5847 ++++ b/drivers/regulator/vctrl-regulator.c
5848 +@@ -37,7 +37,6 @@ struct vctrl_voltage_table {
5849 + struct vctrl_data {
5850 + struct regulator_dev *rdev;
5851 + struct regulator_desc desc;
5852 +- struct regulator *ctrl_reg;
5853 + bool enabled;
5854 + unsigned int min_slew_down_rate;
5855 + unsigned int ovp_threshold;
5856 +@@ -82,7 +81,12 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV)
5857 + static int vctrl_get_voltage(struct regulator_dev *rdev)
5858 + {
5859 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
5860 +- int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
5861 ++ int ctrl_uV;
5862 ++
5863 ++ if (!rdev->supply)
5864 ++ return -EPROBE_DEFER;
5865 ++
5866 ++ ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
5867 +
5868 + return vctrl_calc_output_voltage(vctrl, ctrl_uV);
5869 + }
5870 +@@ -92,14 +96,19 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
5871 + unsigned int *selector)
5872 + {
5873 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
5874 +- struct regulator *ctrl_reg = vctrl->ctrl_reg;
5875 +- int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev);
5876 +- int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
5877 ++ int orig_ctrl_uV;
5878 ++ int uV;
5879 + int ret;
5880 +
5881 ++ if (!rdev->supply)
5882 ++ return -EPROBE_DEFER;
5883 ++
5884 ++ orig_ctrl_uV = regulator_get_voltage_rdev(rdev->supply->rdev);
5885 ++ uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV);
5886 ++
5887 + if (req_min_uV >= uV || !vctrl->ovp_threshold)
5888 + /* voltage rising or no OVP */
5889 +- return regulator_set_voltage_rdev(ctrl_reg->rdev,
5890 ++ return regulator_set_voltage_rdev(rdev->supply->rdev,
5891 + vctrl_calc_ctrl_voltage(vctrl, req_min_uV),
5892 + vctrl_calc_ctrl_voltage(vctrl, req_max_uV),
5893 + PM_SUSPEND_ON);
5894 +@@ -117,7 +126,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
5895 + next_uV = max_t(int, req_min_uV, uV - max_drop_uV);
5896 + next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV);
5897 +
5898 +- ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
5899 ++ ret = regulator_set_voltage_rdev(rdev->supply->rdev,
5900 + next_ctrl_uV,
5901 + next_ctrl_uV,
5902 + PM_SUSPEND_ON);
5903 +@@ -134,7 +143,7 @@ static int vctrl_set_voltage(struct regulator_dev *rdev,
5904 +
5905 + err:
5906 + /* Try to go back to original voltage */
5907 +- regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV,
5908 ++ regulator_set_voltage_rdev(rdev->supply->rdev, orig_ctrl_uV, orig_ctrl_uV,
5909 + PM_SUSPEND_ON);
5910 +
5911 + return ret;
5912 +@@ -151,16 +160,18 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
5913 + unsigned int selector)
5914 + {
5915 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
5916 +- struct regulator *ctrl_reg = vctrl->ctrl_reg;
5917 + unsigned int orig_sel = vctrl->sel;
5918 + int ret;
5919 +
5920 ++ if (!rdev->supply)
5921 ++ return -EPROBE_DEFER;
5922 ++
5923 + if (selector >= rdev->desc->n_voltages)
5924 + return -EINVAL;
5925 +
5926 + if (selector >= vctrl->sel || !vctrl->ovp_threshold) {
5927 + /* voltage rising or no OVP */
5928 +- ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
5929 ++ ret = regulator_set_voltage_rdev(rdev->supply->rdev,
5930 + vctrl->vtable[selector].ctrl,
5931 + vctrl->vtable[selector].ctrl,
5932 + PM_SUSPEND_ON);
5933 +@@ -179,7 +190,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
5934 + else
5935 + next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel;
5936 +
5937 +- ret = regulator_set_voltage_rdev(ctrl_reg->rdev,
5938 ++ ret = regulator_set_voltage_rdev(rdev->supply->rdev,
5939 + vctrl->vtable[next_sel].ctrl,
5940 + vctrl->vtable[next_sel].ctrl,
5941 + PM_SUSPEND_ON);
5942 +@@ -202,7 +213,7 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev,
5943 + err:
5944 + if (vctrl->sel != orig_sel) {
5945 + /* Try to go back to original voltage */
5946 +- if (!regulator_set_voltage_rdev(ctrl_reg->rdev,
5947 ++ if (!regulator_set_voltage_rdev(rdev->supply->rdev,
5948 + vctrl->vtable[orig_sel].ctrl,
5949 + vctrl->vtable[orig_sel].ctrl,
5950 + PM_SUSPEND_ON))
5951 +@@ -234,10 +245,6 @@ static int vctrl_parse_dt(struct platform_device *pdev,
5952 + u32 pval;
5953 + u32 vrange_ctrl[2];
5954 +
5955 +- vctrl->ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
5956 +- if (IS_ERR(vctrl->ctrl_reg))
5957 +- return PTR_ERR(vctrl->ctrl_reg);
5958 +-
5959 + ret = of_property_read_u32(np, "ovp-threshold-percent", &pval);
5960 + if (!ret) {
5961 + vctrl->ovp_threshold = pval;
5962 +@@ -315,11 +322,11 @@ static int vctrl_cmp_ctrl_uV(const void *a, const void *b)
5963 + return at->ctrl - bt->ctrl;
5964 + }
5965 +
5966 +-static int vctrl_init_vtable(struct platform_device *pdev)
5967 ++static int vctrl_init_vtable(struct platform_device *pdev,
5968 ++ struct regulator *ctrl_reg)
5969 + {
5970 + struct vctrl_data *vctrl = platform_get_drvdata(pdev);
5971 + struct regulator_desc *rdesc = &vctrl->desc;
5972 +- struct regulator *ctrl_reg = vctrl->ctrl_reg;
5973 + struct vctrl_voltage_range *vrange_ctrl = &vctrl->vrange.ctrl;
5974 + int n_voltages;
5975 + int ctrl_uV;
5976 +@@ -395,23 +402,19 @@ static int vctrl_init_vtable(struct platform_device *pdev)
5977 + static int vctrl_enable(struct regulator_dev *rdev)
5978 + {
5979 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
5980 +- int ret = regulator_enable(vctrl->ctrl_reg);
5981 +
5982 +- if (!ret)
5983 +- vctrl->enabled = true;
5984 ++ vctrl->enabled = true;
5985 +
5986 +- return ret;
5987 ++ return 0;
5988 + }
5989 +
5990 + static int vctrl_disable(struct regulator_dev *rdev)
5991 + {
5992 + struct vctrl_data *vctrl = rdev_get_drvdata(rdev);
5993 +- int ret = regulator_disable(vctrl->ctrl_reg);
5994 +
5995 +- if (!ret)
5996 +- vctrl->enabled = false;
5997 ++ vctrl->enabled = false;
5998 +
5999 +- return ret;
6000 ++ return 0;
6001 + }
6002 +
6003 + static int vctrl_is_enabled(struct regulator_dev *rdev)
6004 +@@ -447,6 +450,7 @@ static int vctrl_probe(struct platform_device *pdev)
6005 + struct regulator_desc *rdesc;
6006 + struct regulator_config cfg = { };
6007 + struct vctrl_voltage_range *vrange_ctrl;
6008 ++ struct regulator *ctrl_reg;
6009 + int ctrl_uV;
6010 + int ret;
6011 +
6012 +@@ -461,15 +465,20 @@ static int vctrl_probe(struct platform_device *pdev)
6013 + if (ret)
6014 + return ret;
6015 +
6016 ++ ctrl_reg = devm_regulator_get(&pdev->dev, "ctrl");
6017 ++ if (IS_ERR(ctrl_reg))
6018 ++ return PTR_ERR(ctrl_reg);
6019 ++
6020 + vrange_ctrl = &vctrl->vrange.ctrl;
6021 +
6022 + rdesc = &vctrl->desc;
6023 + rdesc->name = "vctrl";
6024 + rdesc->type = REGULATOR_VOLTAGE;
6025 + rdesc->owner = THIS_MODULE;
6026 ++ rdesc->supply_name = "ctrl";
6027 +
6028 +- if ((regulator_get_linear_step(vctrl->ctrl_reg) == 1) ||
6029 +- (regulator_count_voltages(vctrl->ctrl_reg) == -EINVAL)) {
6030 ++ if ((regulator_get_linear_step(ctrl_reg) == 1) ||
6031 ++ (regulator_count_voltages(ctrl_reg) == -EINVAL)) {
6032 + rdesc->continuous_voltage_range = true;
6033 + rdesc->ops = &vctrl_ops_cont;
6034 + } else {
6035 +@@ -486,11 +495,12 @@ static int vctrl_probe(struct platform_device *pdev)
6036 + cfg.init_data = init_data;
6037 +
6038 + if (!rdesc->continuous_voltage_range) {
6039 +- ret = vctrl_init_vtable(pdev);
6040 ++ ret = vctrl_init_vtable(pdev, ctrl_reg);
6041 + if (ret)
6042 + return ret;
6043 +
6044 +- ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev);
6045 ++ /* Use locked consumer API when not in regulator framework */
6046 ++ ctrl_uV = regulator_get_voltage(ctrl_reg);
6047 + if (ctrl_uV < 0) {
6048 + dev_err(&pdev->dev, "failed to get control voltage\n");
6049 + return ctrl_uV;
6050 +@@ -513,6 +523,9 @@ static int vctrl_probe(struct platform_device *pdev)
6051 + }
6052 + }
6053 +
6054 ++ /* Drop ctrl-supply here in favor of regulator core managed supply */
6055 ++ devm_regulator_put(ctrl_reg);
6056 ++
6057 + vctrl->rdev = devm_regulator_register(&pdev->dev, rdesc, &cfg);
6058 + if (IS_ERR(vctrl->rdev)) {
6059 + ret = PTR_ERR(vctrl->rdev);
6060 +diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
6061 +index a974943c27dac..9fcdb8d81eee6 100644
6062 +--- a/drivers/s390/cio/css.c
6063 ++++ b/drivers/s390/cio/css.c
6064 +@@ -430,9 +430,26 @@ static ssize_t pimpampom_show(struct device *dev,
6065 + }
6066 + static DEVICE_ATTR_RO(pimpampom);
6067 +
6068 ++static ssize_t dev_busid_show(struct device *dev,
6069 ++ struct device_attribute *attr,
6070 ++ char *buf)
6071 ++{
6072 ++ struct subchannel *sch = to_subchannel(dev);
6073 ++ struct pmcw *pmcw = &sch->schib.pmcw;
6074 ++
6075 ++ if ((pmcw->st == SUBCHANNEL_TYPE_IO ||
6076 ++ pmcw->st == SUBCHANNEL_TYPE_MSG) && pmcw->dnv)
6077 ++ return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
6078 ++ pmcw->dev);
6079 ++ else
6080 ++ return sysfs_emit(buf, "none\n");
6081 ++}
6082 ++static DEVICE_ATTR_RO(dev_busid);
6083 ++
6084 + static struct attribute *io_subchannel_type_attrs[] = {
6085 + &dev_attr_chpids.attr,
6086 + &dev_attr_pimpampom.attr,
6087 ++ &dev_attr_dev_busid.attr,
6088 + NULL,
6089 + };
6090 + ATTRIBUTE_GROUPS(io_subchannel_type);
6091 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
6092 +index 2758d05a802db..179ceb01e0406 100644
6093 +--- a/drivers/s390/crypto/ap_bus.c
6094 ++++ b/drivers/s390/crypto/ap_bus.c
6095 +@@ -121,22 +121,13 @@ static struct bus_type ap_bus_type;
6096 + /* Adapter interrupt definitions */
6097 + static void ap_interrupt_handler(struct airq_struct *airq, bool floating);
6098 +
6099 +-static int ap_airq_flag;
6100 ++static bool ap_irq_flag;
6101 +
6102 + static struct airq_struct ap_airq = {
6103 + .handler = ap_interrupt_handler,
6104 + .isc = AP_ISC,
6105 + };
6106 +
6107 +-/**
6108 +- * ap_using_interrupts() - Returns non-zero if interrupt support is
6109 +- * available.
6110 +- */
6111 +-static inline int ap_using_interrupts(void)
6112 +-{
6113 +- return ap_airq_flag;
6114 +-}
6115 +-
6116 + /**
6117 + * ap_airq_ptr() - Get the address of the adapter interrupt indicator
6118 + *
6119 +@@ -146,7 +137,7 @@ static inline int ap_using_interrupts(void)
6120 + */
6121 + void *ap_airq_ptr(void)
6122 + {
6123 +- if (ap_using_interrupts())
6124 ++ if (ap_irq_flag)
6125 + return ap_airq.lsi_ptr;
6126 + return NULL;
6127 + }
6128 +@@ -376,7 +367,7 @@ void ap_wait(enum ap_sm_wait wait)
6129 + switch (wait) {
6130 + case AP_SM_WAIT_AGAIN:
6131 + case AP_SM_WAIT_INTERRUPT:
6132 +- if (ap_using_interrupts())
6133 ++ if (ap_irq_flag)
6134 + break;
6135 + if (ap_poll_kthread) {
6136 + wake_up(&ap_poll_wait);
6137 +@@ -451,7 +442,7 @@ static void ap_tasklet_fn(unsigned long dummy)
6138 + * be received. Doing it in the beginning of the tasklet is therefor
6139 + * important that no requests on any AP get lost.
6140 + */
6141 +- if (ap_using_interrupts())
6142 ++ if (ap_irq_flag)
6143 + xchg(ap_airq.lsi_ptr, 0);
6144 +
6145 + spin_lock_bh(&ap_queues_lock);
6146 +@@ -521,7 +512,7 @@ static int ap_poll_thread_start(void)
6147 + {
6148 + int rc;
6149 +
6150 +- if (ap_using_interrupts() || ap_poll_kthread)
6151 ++ if (ap_irq_flag || ap_poll_kthread)
6152 + return 0;
6153 + mutex_lock(&ap_poll_thread_mutex);
6154 + ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
6155 +@@ -1119,7 +1110,7 @@ static BUS_ATTR_RO(ap_adapter_mask);
6156 + static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf)
6157 + {
6158 + return scnprintf(buf, PAGE_SIZE, "%d\n",
6159 +- ap_using_interrupts() ? 1 : 0);
6160 ++ ap_irq_flag ? 1 : 0);
6161 + }
6162 +
6163 + static BUS_ATTR_RO(ap_interrupts);
6164 +@@ -1832,7 +1823,7 @@ static int __init ap_module_init(void)
6165 + /* enable interrupts if available */
6166 + if (ap_interrupts_available()) {
6167 + rc = register_adapter_interrupt(&ap_airq);
6168 +- ap_airq_flag = (rc == 0);
6169 ++ ap_irq_flag = (rc == 0);
6170 + }
6171 +
6172 + /* Create /sys/bus/ap. */
6173 +@@ -1876,7 +1867,7 @@ out_work:
6174 + out_bus:
6175 + bus_unregister(&ap_bus_type);
6176 + out:
6177 +- if (ap_using_interrupts())
6178 ++ if (ap_irq_flag)
6179 + unregister_adapter_interrupt(&ap_airq);
6180 + kfree(ap_qci_info);
6181 + return rc;
6182 +diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
6183 +index 472efd3a755c4..2940bc8aa43d4 100644
6184 +--- a/drivers/s390/crypto/ap_bus.h
6185 ++++ b/drivers/s390/crypto/ap_bus.h
6186 +@@ -77,12 +77,6 @@ static inline int ap_test_bit(unsigned int *ptr, unsigned int nr)
6187 + #define AP_FUNC_EP11 5
6188 + #define AP_FUNC_APXA 6
6189 +
6190 +-/*
6191 +- * AP interrupt states
6192 +- */
6193 +-#define AP_INTR_DISABLED 0 /* AP interrupt disabled */
6194 +-#define AP_INTR_ENABLED 1 /* AP interrupt enabled */
6195 +-
6196 + /*
6197 + * AP queue state machine states
6198 + */
6199 +@@ -109,7 +103,7 @@ enum ap_sm_event {
6200 + * AP queue state wait behaviour
6201 + */
6202 + enum ap_sm_wait {
6203 +- AP_SM_WAIT_AGAIN, /* retry immediately */
6204 ++ AP_SM_WAIT_AGAIN = 0, /* retry immediately */
6205 + AP_SM_WAIT_TIMEOUT, /* wait for timeout */
6206 + AP_SM_WAIT_INTERRUPT, /* wait for thin interrupt (if available) */
6207 + AP_SM_WAIT_NONE, /* no wait */
6208 +@@ -182,7 +176,7 @@ struct ap_queue {
6209 + enum ap_dev_state dev_state; /* queue device state */
6210 + bool config; /* configured state */
6211 + ap_qid_t qid; /* AP queue id. */
6212 +- int interrupt; /* indicate if interrupts are enabled */
6213 ++ bool interrupt; /* indicate if interrupts are enabled */
6214 + int queue_count; /* # messages currently on AP queue. */
6215 + int pendingq_count; /* # requests on pendingq list. */
6216 + int requestq_count; /* # requests on requestq list. */
6217 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
6218 +index 337353c9655ed..639f8d25679c3 100644
6219 +--- a/drivers/s390/crypto/ap_queue.c
6220 ++++ b/drivers/s390/crypto/ap_queue.c
6221 +@@ -19,7 +19,7 @@
6222 + static void __ap_flush_queue(struct ap_queue *aq);
6223 +
6224 + /**
6225 +- * ap_queue_enable_interruption(): Enable interruption on an AP queue.
6226 ++ * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
6227 + * @qid: The AP queue number
6228 + * @ind: the notification indicator byte
6229 + *
6230 +@@ -27,7 +27,7 @@ static void __ap_flush_queue(struct ap_queue *aq);
6231 + * value it waits a while and tests the AP queue if interrupts
6232 + * have been switched on using ap_test_queue().
6233 + */
6234 +-static int ap_queue_enable_interruption(struct ap_queue *aq, void *ind)
6235 ++static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
6236 + {
6237 + struct ap_queue_status status;
6238 + struct ap_qirq_ctrl qirqctrl = { 0 };
6239 +@@ -198,7 +198,8 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
6240 + return AP_SM_WAIT_NONE;
6241 + case AP_RESPONSE_NO_PENDING_REPLY:
6242 + if (aq->queue_count > 0)
6243 +- return AP_SM_WAIT_INTERRUPT;
6244 ++ return aq->interrupt ?
6245 ++ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
6246 + aq->sm_state = AP_SM_STATE_IDLE;
6247 + return AP_SM_WAIT_NONE;
6248 + default:
6249 +@@ -252,7 +253,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
6250 + fallthrough;
6251 + case AP_RESPONSE_Q_FULL:
6252 + aq->sm_state = AP_SM_STATE_QUEUE_FULL;
6253 +- return AP_SM_WAIT_INTERRUPT;
6254 ++ return aq->interrupt ?
6255 ++ AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_TIMEOUT;
6256 + case AP_RESPONSE_RESET_IN_PROGRESS:
6257 + aq->sm_state = AP_SM_STATE_RESET_WAIT;
6258 + return AP_SM_WAIT_TIMEOUT;
6259 +@@ -302,7 +304,7 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
6260 + case AP_RESPONSE_NORMAL:
6261 + case AP_RESPONSE_RESET_IN_PROGRESS:
6262 + aq->sm_state = AP_SM_STATE_RESET_WAIT;
6263 +- aq->interrupt = AP_INTR_DISABLED;
6264 ++ aq->interrupt = false;
6265 + return AP_SM_WAIT_TIMEOUT;
6266 + default:
6267 + aq->dev_state = AP_DEV_STATE_ERROR;
6268 +@@ -335,7 +337,7 @@ static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
6269 + switch (status.response_code) {
6270 + case AP_RESPONSE_NORMAL:
6271 + lsi_ptr = ap_airq_ptr();
6272 +- if (lsi_ptr && ap_queue_enable_interruption(aq, lsi_ptr) == 0)
6273 ++ if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
6274 + aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
6275 + else
6276 + aq->sm_state = (aq->queue_count > 0) ?
6277 +@@ -376,7 +378,7 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
6278 +
6279 + if (status.irq_enabled == 1) {
6280 + /* Irqs are now enabled */
6281 +- aq->interrupt = AP_INTR_ENABLED;
6282 ++ aq->interrupt = true;
6283 + aq->sm_state = (aq->queue_count > 0) ?
6284 + AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
6285 + }
6286 +@@ -566,7 +568,7 @@ static ssize_t interrupt_show(struct device *dev,
6287 + spin_lock_bh(&aq->lock);
6288 + if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
6289 + rc = scnprintf(buf, PAGE_SIZE, "Enable Interrupt pending.\n");
6290 +- else if (aq->interrupt == AP_INTR_ENABLED)
6291 ++ else if (aq->interrupt)
6292 + rc = scnprintf(buf, PAGE_SIZE, "Interrupts enabled.\n");
6293 + else
6294 + rc = scnprintf(buf, PAGE_SIZE, "Interrupts disabled.\n");
6295 +@@ -747,7 +749,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
6296 + aq->ap_dev.device.type = &ap_queue_type;
6297 + aq->ap_dev.device_type = device_type;
6298 + aq->qid = qid;
6299 +- aq->interrupt = AP_INTR_DISABLED;
6300 ++ aq->interrupt = false;
6301 + spin_lock_init(&aq->lock);
6302 + INIT_LIST_HEAD(&aq->pendingq);
6303 + INIT_LIST_HEAD(&aq->requestq);
6304 +diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
6305 +index d68c0ed5e0dd8..f5d0212fb4fe4 100644
6306 +--- a/drivers/s390/crypto/zcrypt_ccamisc.c
6307 ++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
6308 +@@ -1724,10 +1724,10 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci)
6309 + rlen = vlen = PAGE_SIZE/2;
6310 + rc = cca_query_crypto_facility(cardnr, domain, "STATICSB",
6311 + rarray, &rlen, varray, &vlen);
6312 +- if (rc == 0 && rlen >= 10*8 && vlen >= 240) {
6313 +- ci->new_apka_mk_state = (char) rarray[7*8];
6314 +- ci->cur_apka_mk_state = (char) rarray[8*8];
6315 +- ci->old_apka_mk_state = (char) rarray[9*8];
6316 ++ if (rc == 0 && rlen >= 13*8 && vlen >= 240) {
6317 ++ ci->new_apka_mk_state = (char) rarray[10*8];
6318 ++ ci->cur_apka_mk_state = (char) rarray[11*8];
6319 ++ ci->old_apka_mk_state = (char) rarray[12*8];
6320 + if (ci->old_apka_mk_state == '2')
6321 + memcpy(&ci->old_apka_mkvp, varray + 208, 8);
6322 + if (ci->cur_apka_mk_state == '2')
6323 +diff --git a/drivers/soc/mediatek/mt8183-mmsys.h b/drivers/soc/mediatek/mt8183-mmsys.h
6324 +index 579dfc8dc8fc9..9dee485807c94 100644
6325 +--- a/drivers/soc/mediatek/mt8183-mmsys.h
6326 ++++ b/drivers/soc/mediatek/mt8183-mmsys.h
6327 +@@ -28,25 +28,32 @@
6328 + static const struct mtk_mmsys_routes mmsys_mt8183_routing_table[] = {
6329 + {
6330 + DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
6331 +- MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L
6332 ++ MT8183_DISP_OVL0_MOUT_EN, MT8183_OVL0_MOUT_EN_OVL0_2L,
6333 ++ MT8183_OVL0_MOUT_EN_OVL0_2L
6334 + }, {
6335 + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
6336 +- MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
6337 ++ MT8183_DISP_OVL0_2L_MOUT_EN, MT8183_OVL0_2L_MOUT_EN_DISP_PATH0,
6338 ++ MT8183_OVL0_2L_MOUT_EN_DISP_PATH0
6339 + }, {
6340 + DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_RDMA1,
6341 +- MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1
6342 ++ MT8183_DISP_OVL1_2L_MOUT_EN, MT8183_OVL1_2L_MOUT_EN_RDMA1,
6343 ++ MT8183_OVL1_2L_MOUT_EN_RDMA1
6344 + }, {
6345 + DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
6346 +- MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0
6347 ++ MT8183_DISP_DITHER0_MOUT_EN, MT8183_DITHER0_MOUT_IN_DSI0,
6348 ++ MT8183_DITHER0_MOUT_IN_DSI0
6349 + }, {
6350 + DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
6351 +- MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L
6352 ++ MT8183_DISP_PATH0_SEL_IN, MT8183_DISP_PATH0_SEL_IN_OVL0_2L,
6353 ++ MT8183_DISP_PATH0_SEL_IN_OVL0_2L
6354 + }, {
6355 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
6356 +- MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1
6357 ++ MT8183_DISP_DPI0_SEL_IN, MT8183_DPI0_SEL_IN_RDMA1,
6358 ++ MT8183_DPI0_SEL_IN_RDMA1
6359 + }, {
6360 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
6361 +- MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0
6362 ++ MT8183_DISP_RDMA0_SOUT_SEL_IN, MT8183_RDMA0_SOUT_COLOR0,
6363 ++ MT8183_RDMA0_SOUT_COLOR0
6364 + }
6365 + };
6366 +
6367 +diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
6368 +index 080660ef11bfa..0f949896fd064 100644
6369 +--- a/drivers/soc/mediatek/mtk-mmsys.c
6370 ++++ b/drivers/soc/mediatek/mtk-mmsys.c
6371 +@@ -68,7 +68,9 @@ void mtk_mmsys_ddp_connect(struct device *dev,
6372 +
6373 + for (i = 0; i < mmsys->data->num_routes; i++)
6374 + if (cur == routes[i].from_comp && next == routes[i].to_comp) {
6375 +- reg = readl_relaxed(mmsys->regs + routes[i].addr) | routes[i].val;
6376 ++ reg = readl_relaxed(mmsys->regs + routes[i].addr);
6377 ++ reg &= ~routes[i].mask;
6378 ++ reg |= routes[i].val;
6379 + writel_relaxed(reg, mmsys->regs + routes[i].addr);
6380 + }
6381 + }
6382 +@@ -85,7 +87,8 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
6383 +
6384 + for (i = 0; i < mmsys->data->num_routes; i++)
6385 + if (cur == routes[i].from_comp && next == routes[i].to_comp) {
6386 +- reg = readl_relaxed(mmsys->regs + routes[i].addr) & ~routes[i].val;
6387 ++ reg = readl_relaxed(mmsys->regs + routes[i].addr);
6388 ++ reg &= ~routes[i].mask;
6389 + writel_relaxed(reg, mmsys->regs + routes[i].addr);
6390 + }
6391 + }
6392 +diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
6393 +index a760a34e6eca8..5f3e2bf0c40bc 100644
6394 +--- a/drivers/soc/mediatek/mtk-mmsys.h
6395 ++++ b/drivers/soc/mediatek/mtk-mmsys.h
6396 +@@ -35,41 +35,54 @@
6397 + #define RDMA0_SOUT_DSI1 0x1
6398 + #define RDMA0_SOUT_DSI2 0x4
6399 + #define RDMA0_SOUT_DSI3 0x5
6400 ++#define RDMA0_SOUT_MASK 0x7
6401 + #define RDMA1_SOUT_DPI0 0x2
6402 + #define RDMA1_SOUT_DPI1 0x3
6403 + #define RDMA1_SOUT_DSI1 0x1
6404 + #define RDMA1_SOUT_DSI2 0x4
6405 + #define RDMA1_SOUT_DSI3 0x5
6406 ++#define RDMA1_SOUT_MASK 0x7
6407 + #define RDMA2_SOUT_DPI0 0x2
6408 + #define RDMA2_SOUT_DPI1 0x3
6409 + #define RDMA2_SOUT_DSI1 0x1
6410 + #define RDMA2_SOUT_DSI2 0x4
6411 + #define RDMA2_SOUT_DSI3 0x5
6412 ++#define RDMA2_SOUT_MASK 0x7
6413 + #define DPI0_SEL_IN_RDMA1 0x1
6414 + #define DPI0_SEL_IN_RDMA2 0x3
6415 ++#define DPI0_SEL_IN_MASK 0x3
6416 + #define DPI1_SEL_IN_RDMA1 (0x1 << 8)
6417 + #define DPI1_SEL_IN_RDMA2 (0x3 << 8)
6418 ++#define DPI1_SEL_IN_MASK (0x3 << 8)
6419 + #define DSI0_SEL_IN_RDMA1 0x1
6420 + #define DSI0_SEL_IN_RDMA2 0x4
6421 ++#define DSI0_SEL_IN_MASK 0x7
6422 + #define DSI1_SEL_IN_RDMA1 0x1
6423 + #define DSI1_SEL_IN_RDMA2 0x4
6424 ++#define DSI1_SEL_IN_MASK 0x7
6425 + #define DSI2_SEL_IN_RDMA1 (0x1 << 16)
6426 + #define DSI2_SEL_IN_RDMA2 (0x4 << 16)
6427 ++#define DSI2_SEL_IN_MASK (0x7 << 16)
6428 + #define DSI3_SEL_IN_RDMA1 (0x1 << 16)
6429 + #define DSI3_SEL_IN_RDMA2 (0x4 << 16)
6430 ++#define DSI3_SEL_IN_MASK (0x7 << 16)
6431 + #define COLOR1_SEL_IN_OVL1 0x1
6432 +
6433 + #define OVL_MOUT_EN_RDMA 0x1
6434 + #define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
6435 + #define BLS_TO_DPI_RDMA1_TO_DSI 0x2
6436 ++#define BLS_RDMA1_DSI_DPI_MASK 0xf
6437 + #define DSI_SEL_IN_BLS 0x0
6438 + #define DPI_SEL_IN_BLS 0x0
6439 ++#define DPI_SEL_IN_MASK 0x1
6440 + #define DSI_SEL_IN_RDMA 0x1
6441 ++#define DSI_SEL_IN_MASK 0x1
6442 +
6443 + struct mtk_mmsys_routes {
6444 + u32 from_comp;
6445 + u32 to_comp;
6446 + u32 addr;
6447 ++ u32 mask;
6448 + u32 val;
6449 + };
6450 +
6451 +@@ -91,124 +104,164 @@ struct mtk_mmsys_driver_data {
6452 + static const struct mtk_mmsys_routes mmsys_default_routing_table[] = {
6453 + {
6454 + DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
6455 +- DISP_REG_CONFIG_OUT_SEL, BLS_TO_DSI_RDMA1_TO_DPI1
6456 ++ DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
6457 ++ BLS_TO_DSI_RDMA1_TO_DPI1
6458 + }, {
6459 + DDP_COMPONENT_BLS, DDP_COMPONENT_DSI0,
6460 +- DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_BLS
6461 ++ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
6462 ++ DSI_SEL_IN_BLS
6463 + }, {
6464 + DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
6465 +- DISP_REG_CONFIG_OUT_SEL, BLS_TO_DPI_RDMA1_TO_DSI
6466 ++ DISP_REG_CONFIG_OUT_SEL, BLS_RDMA1_DSI_DPI_MASK,
6467 ++ BLS_TO_DPI_RDMA1_TO_DSI
6468 + }, {
6469 + DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
6470 +- DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_RDMA
6471 ++ DISP_REG_CONFIG_DSI_SEL, DSI_SEL_IN_MASK,
6472 ++ DSI_SEL_IN_RDMA
6473 + }, {
6474 + DDP_COMPONENT_BLS, DDP_COMPONENT_DPI0,
6475 +- DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_BLS
6476 ++ DISP_REG_CONFIG_DPI_SEL, DPI_SEL_IN_MASK,
6477 ++ DPI_SEL_IN_BLS
6478 + }, {
6479 + DDP_COMPONENT_GAMMA, DDP_COMPONENT_RDMA1,
6480 +- DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1
6481 ++ DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN, GAMMA_MOUT_EN_RDMA1,
6482 ++ GAMMA_MOUT_EN_RDMA1
6483 + }, {
6484 + DDP_COMPONENT_OD0, DDP_COMPONENT_RDMA0,
6485 +- DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0
6486 ++ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD_MOUT_EN_RDMA0,
6487 ++ OD_MOUT_EN_RDMA0
6488 + }, {
6489 + DDP_COMPONENT_OD1, DDP_COMPONENT_RDMA1,
6490 +- DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1
6491 ++ DISP_REG_CONFIG_DISP_OD_MOUT_EN, OD1_MOUT_EN_RDMA1,
6492 ++ OD1_MOUT_EN_RDMA1
6493 + }, {
6494 + DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
6495 +- DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0
6496 ++ DISP_REG_CONFIG_DISP_OVL0_MOUT_EN, OVL0_MOUT_EN_COLOR0,
6497 ++ OVL0_MOUT_EN_COLOR0
6498 + }, {
6499 + DDP_COMPONENT_OVL0, DDP_COMPONENT_COLOR0,
6500 +- DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0
6501 ++ DISP_REG_CONFIG_DISP_COLOR0_SEL_IN, COLOR0_SEL_IN_OVL0,
6502 ++ COLOR0_SEL_IN_OVL0
6503 + }, {
6504 + DDP_COMPONENT_OVL0, DDP_COMPONENT_RDMA0,
6505 +- DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA
6506 ++ DISP_REG_CONFIG_DISP_OVL_MOUT_EN, OVL_MOUT_EN_RDMA,
6507 ++ OVL_MOUT_EN_RDMA
6508 + }, {
6509 + DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
6510 +- DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1
6511 ++ DISP_REG_CONFIG_DISP_OVL1_MOUT_EN, OVL1_MOUT_EN_COLOR1,
6512 ++ OVL1_MOUT_EN_COLOR1
6513 + }, {
6514 + DDP_COMPONENT_OVL1, DDP_COMPONENT_COLOR1,
6515 +- DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1
6516 ++ DISP_REG_CONFIG_DISP_COLOR1_SEL_IN, COLOR1_SEL_IN_OVL1,
6517 ++ COLOR1_SEL_IN_OVL1
6518 + }, {
6519 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI0,
6520 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI0
6521 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
6522 ++ RDMA0_SOUT_DPI0
6523 + }, {
6524 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DPI1,
6525 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DPI1
6526 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
6527 ++ RDMA0_SOUT_DPI1
6528 + }, {
6529 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI1,
6530 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI1
6531 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
6532 ++ RDMA0_SOUT_DSI1
6533 + }, {
6534 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI2,
6535 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI2
6536 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
6537 ++ RDMA0_SOUT_DSI2
6538 + }, {
6539 + DDP_COMPONENT_RDMA0, DDP_COMPONENT_DSI3,
6540 +- DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_DSI3
6541 ++ DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN, RDMA0_SOUT_MASK,
6542 ++ RDMA0_SOUT_DSI3
6543 + }, {
6544 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
6545 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI0
6546 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
6547 ++ RDMA1_SOUT_DPI0
6548 + }, {
6549 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0,
6550 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA1
6551 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
6552 ++ DPI0_SEL_IN_RDMA1
6553 + }, {
6554 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
6555 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DPI1
6556 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
6557 ++ RDMA1_SOUT_DPI1
6558 + }, {
6559 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI1,
6560 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA1
6561 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
6562 ++ DPI1_SEL_IN_RDMA1
6563 + }, {
6564 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI0,
6565 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA1
6566 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
6567 ++ DSI0_SEL_IN_RDMA1
6568 + }, {
6569 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
6570 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI1
6571 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
6572 ++ RDMA1_SOUT_DSI1
6573 + }, {
6574 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI1,
6575 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA1
6576 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
6577 ++ DSI1_SEL_IN_RDMA1
6578 + }, {
6579 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
6580 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI2
6581 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
6582 ++ RDMA1_SOUT_DSI2
6583 + }, {
6584 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI2,
6585 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA1
6586 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
6587 ++ DSI2_SEL_IN_RDMA1
6588 + }, {
6589 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
6590 +- DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_DSI3
6591 ++ DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN, RDMA1_SOUT_MASK,
6592 ++ RDMA1_SOUT_DSI3
6593 + }, {
6594 + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DSI3,
6595 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA1
6596 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
6597 ++ DSI3_SEL_IN_RDMA1
6598 + }, {
6599 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
6600 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI0
6601 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
6602 ++ RDMA2_SOUT_DPI0
6603 + }, {
6604 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI0,
6605 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_RDMA2
6606 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI0_SEL_IN_MASK,
6607 ++ DPI0_SEL_IN_RDMA2
6608 + }, {
6609 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
6610 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DPI1
6611 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
6612 ++ RDMA2_SOUT_DPI1
6613 + }, {
6614 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DPI1,
6615 +- DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_RDMA2
6616 ++ DISP_REG_CONFIG_DPI_SEL_IN, DPI1_SEL_IN_MASK,
6617 ++ DPI1_SEL_IN_RDMA2
6618 + }, {
6619 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI0,
6620 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_RDMA2
6621 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI0_SEL_IN_MASK,
6622 ++ DSI0_SEL_IN_RDMA2
6623 + }, {
6624 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
6625 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI1
6626 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
6627 ++ RDMA2_SOUT_DSI1
6628 + }, {
6629 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI1,
6630 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_RDMA2
6631 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI1_SEL_IN_MASK,
6632 ++ DSI1_SEL_IN_RDMA2
6633 + }, {
6634 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
6635 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI2
6636 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
6637 ++ RDMA2_SOUT_DSI2
6638 + }, {
6639 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI2,
6640 +- DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_RDMA2
6641 ++ DISP_REG_CONFIG_DSIE_SEL_IN, DSI2_SEL_IN_MASK,
6642 ++ DSI2_SEL_IN_RDMA2
6643 + }, {
6644 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
6645 +- DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_DSI3
6646 ++ DISP_REG_CONFIG_DISP_RDMA2_SOUT, RDMA2_SOUT_MASK,
6647 ++ RDMA2_SOUT_DSI3
6648 + }, {
6649 + DDP_COMPONENT_RDMA2, DDP_COMPONENT_DSI3,
6650 +- DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_RDMA2
6651 ++ DISP_REG_CONFIG_DSIO_SEL_IN, DSI3_SEL_IN_MASK,
6652 ++ DSI3_SEL_IN_RDMA2
6653 + }
6654 + };
6655 +
6656 +diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
6657 +index bb21c4f1c0c4b..90d2e58173719 100644
6658 +--- a/drivers/soc/qcom/rpmhpd.c
6659 ++++ b/drivers/soc/qcom/rpmhpd.c
6660 +@@ -382,12 +382,11 @@ static int rpmhpd_power_on(struct generic_pm_domain *domain)
6661 + static int rpmhpd_power_off(struct generic_pm_domain *domain)
6662 + {
6663 + struct rpmhpd *pd = domain_to_rpmhpd(domain);
6664 +- int ret = 0;
6665 ++ int ret;
6666 +
6667 + mutex_lock(&rpmhpd_lock);
6668 +
6669 +- ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
6670 +-
6671 ++ ret = rpmhpd_aggregate_corner(pd, 0);
6672 + if (!ret)
6673 + pd->enabled = false;
6674 +
6675 +diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
6676 +index 1d3d5e3ec2b07..6e9a9cd28b178 100644
6677 +--- a/drivers/soc/qcom/smsm.c
6678 ++++ b/drivers/soc/qcom/smsm.c
6679 +@@ -109,7 +109,7 @@ struct smsm_entry {
6680 + DECLARE_BITMAP(irq_enabled, 32);
6681 + DECLARE_BITMAP(irq_rising, 32);
6682 + DECLARE_BITMAP(irq_falling, 32);
6683 +- u32 last_value;
6684 ++ unsigned long last_value;
6685 +
6686 + u32 *remote_state;
6687 + u32 *subscription;
6688 +@@ -204,8 +204,7 @@ static irqreturn_t smsm_intr(int irq, void *data)
6689 + u32 val;
6690 +
6691 + val = readl(entry->remote_state);
6692 +- changed = val ^ entry->last_value;
6693 +- entry->last_value = val;
6694 ++ changed = val ^ xchg(&entry->last_value, val);
6695 +
6696 + for_each_set_bit(i, entry->irq_enabled, 32) {
6697 + if (!(changed & BIT(i)))
6698 +@@ -264,6 +263,12 @@ static void smsm_unmask_irq(struct irq_data *irqd)
6699 + struct qcom_smsm *smsm = entry->smsm;
6700 + u32 val;
6701 +
6702 ++ /* Make sure our last cached state is up-to-date */
6703 ++ if (readl(entry->remote_state) & BIT(irq))
6704 ++ set_bit(irq, &entry->last_value);
6705 ++ else
6706 ++ clear_bit(irq, &entry->last_value);
6707 ++
6708 + set_bit(irq, entry->irq_enabled);
6709 +
6710 + if (entry->subscription) {
6711 +diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
6712 +index 2c13bf4dd5dbe..25eb2c1e31bb2 100644
6713 +--- a/drivers/soc/rockchip/Kconfig
6714 ++++ b/drivers/soc/rockchip/Kconfig
6715 +@@ -6,8 +6,8 @@ if ARCH_ROCKCHIP || COMPILE_TEST
6716 + #
6717 +
6718 + config ROCKCHIP_GRF
6719 +- bool
6720 +- default y
6721 ++ bool "Rockchip General Register Files support" if COMPILE_TEST
6722 ++ default y if ARCH_ROCKCHIP
6723 + help
6724 + The General Register Files are a central component providing
6725 + special additional settings registers for a lot of soc-components.
6726 +diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
6727 +index 8996115ce736a..263ce90473277 100644
6728 +--- a/drivers/spi/spi-coldfire-qspi.c
6729 ++++ b/drivers/spi/spi-coldfire-qspi.c
6730 +@@ -444,7 +444,7 @@ static int mcfqspi_remove(struct platform_device *pdev)
6731 + mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
6732 +
6733 + mcfqspi_cs_teardown(mcfqspi);
6734 +- clk_disable(mcfqspi->clk);
6735 ++ clk_disable_unprepare(mcfqspi->clk);
6736 +
6737 + return 0;
6738 + }
6739 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
6740 +index e114e6fe5ea5b..d112c2cac042b 100644
6741 +--- a/drivers/spi/spi-davinci.c
6742 ++++ b/drivers/spi/spi-davinci.c
6743 +@@ -213,12 +213,6 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
6744 + * line for the controller
6745 + */
6746 + if (spi->cs_gpiod) {
6747 +- /*
6748 +- * FIXME: is this code ever executed? This host does not
6749 +- * set SPI_MASTER_GPIO_SS so this chipselect callback should
6750 +- * not get called from the SPI core when we are using
6751 +- * GPIOs for chip select.
6752 +- */
6753 + if (value == BITBANG_CS_ACTIVE)
6754 + gpiod_set_value(spi->cs_gpiod, 1);
6755 + else
6756 +@@ -945,7 +939,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
6757 + master->bus_num = pdev->id;
6758 + master->num_chipselect = pdata->num_chipselect;
6759 + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
6760 +- master->flags = SPI_MASTER_MUST_RX;
6761 ++ master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_GPIO_SS;
6762 + master->setup = davinci_spi_setup;
6763 + master->cleanup = davinci_spi_cleanup;
6764 + master->can_dma = davinci_spi_can_dma;
6765 +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
6766 +index fb45e6af66381..fd004c9db9dc0 100644
6767 +--- a/drivers/spi/spi-fsl-dspi.c
6768 ++++ b/drivers/spi/spi-fsl-dspi.c
6769 +@@ -530,6 +530,7 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr)
6770 + goto err_rx_dma_buf;
6771 + }
6772 +
6773 ++ memset(&cfg, 0, sizeof(cfg));
6774 + cfg.src_addr = phy_addr + SPI_POPR;
6775 + cfg.dst_addr = phy_addr + SPI_PUSHR;
6776 + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
6777 +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
6778 +index 104bde153efd2..5eb7b61bbb4d8 100644
6779 +--- a/drivers/spi/spi-pic32.c
6780 ++++ b/drivers/spi/spi-pic32.c
6781 +@@ -361,6 +361,7 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
6782 + struct dma_slave_config cfg;
6783 + int ret;
6784 +
6785 ++ memset(&cfg, 0, sizeof(cfg));
6786 + cfg.device_fc = true;
6787 + cfg.src_addr = pic32s->dma_base + buf_offset;
6788 + cfg.dst_addr = pic32s->dma_base + buf_offset;
6789 +diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
6790 +index ab19068be8675..98ef17389952a 100644
6791 +--- a/drivers/spi/spi-sprd-adi.c
6792 ++++ b/drivers/spi/spi-sprd-adi.c
6793 +@@ -103,7 +103,7 @@
6794 + #define HWRST_STATUS_WATCHDOG 0xf0
6795 +
6796 + /* Use default timeout 50 ms that converts to watchdog values */
6797 +-#define WDG_LOAD_VAL ((50 * 1000) / 32768)
6798 ++#define WDG_LOAD_VAL ((50 * 32768) / 1000)
6799 + #define WDG_LOAD_MASK GENMASK(15, 0)
6800 + #define WDG_UNLOCK_KEY 0xe551
6801 +
6802 +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
6803 +index 9262c6418463b..cfa222c9bd5e7 100644
6804 +--- a/drivers/spi/spi-zynq-qspi.c
6805 ++++ b/drivers/spi/spi-zynq-qspi.c
6806 +@@ -545,7 +545,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
6807 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
6808 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
6809 + ZYNQ_QSPI_IXR_RXTX_MASK);
6810 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
6811 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
6812 + msecs_to_jiffies(1000)))
6813 + err = -ETIMEDOUT;
6814 + }
6815 +@@ -563,7 +563,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
6816 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
6817 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
6818 + ZYNQ_QSPI_IXR_RXTX_MASK);
6819 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
6820 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
6821 + msecs_to_jiffies(1000)))
6822 + err = -ETIMEDOUT;
6823 + }
6824 +@@ -579,7 +579,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
6825 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
6826 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
6827 + ZYNQ_QSPI_IXR_RXTX_MASK);
6828 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
6829 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
6830 + msecs_to_jiffies(1000)))
6831 + err = -ETIMEDOUT;
6832 +
6833 +@@ -603,7 +603,7 @@ static int zynq_qspi_exec_mem_op(struct spi_mem *mem,
6834 + zynq_qspi_write_op(xqspi, ZYNQ_QSPI_FIFO_DEPTH, true);
6835 + zynq_qspi_write(xqspi, ZYNQ_QSPI_IEN_OFFSET,
6836 + ZYNQ_QSPI_IXR_RXTX_MASK);
6837 +- if (!wait_for_completion_interruptible_timeout(&xqspi->data_completion,
6838 ++ if (!wait_for_completion_timeout(&xqspi->data_completion,
6839 + msecs_to_jiffies(1000)))
6840 + err = -ETIMEDOUT;
6841 + }
6842 +diff --git a/drivers/staging/clocking-wizard/Kconfig b/drivers/staging/clocking-wizard/Kconfig
6843 +index 69cf51445f082..2324b5d737886 100644
6844 +--- a/drivers/staging/clocking-wizard/Kconfig
6845 ++++ b/drivers/staging/clocking-wizard/Kconfig
6846 +@@ -5,6 +5,6 @@
6847 +
6848 + config COMMON_CLK_XLNX_CLKWZRD
6849 + tristate "Xilinx Clocking Wizard"
6850 +- depends on COMMON_CLK && OF && IOMEM
6851 ++ depends on COMMON_CLK && OF && HAS_IOMEM
6852 + help
6853 + Support for the Xilinx Clocking Wizard IP core clock generator.
6854 +diff --git a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
6855 +index f5de81132177d..77293579a1348 100644
6856 +--- a/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
6857 ++++ b/drivers/staging/media/atomisp/i2c/atomisp-mt9m114.c
6858 +@@ -1533,16 +1533,19 @@ static struct v4l2_ctrl_config mt9m114_controls[] = {
6859 + static int mt9m114_detect(struct mt9m114_device *dev, struct i2c_client *client)
6860 + {
6861 + struct i2c_adapter *adapter = client->adapter;
6862 +- u32 retvalue;
6863 ++ u32 model;
6864 ++ int ret;
6865 +
6866 + if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
6867 + dev_err(&client->dev, "%s: i2c error", __func__);
6868 + return -ENODEV;
6869 + }
6870 +- mt9m114_read_reg(client, MISENSOR_16BIT, (u32)MT9M114_PID, &retvalue);
6871 +- dev->real_model_id = retvalue;
6872 ++ ret = mt9m114_read_reg(client, MISENSOR_16BIT, MT9M114_PID, &model);
6873 ++ if (ret)
6874 ++ return ret;
6875 ++ dev->real_model_id = model;
6876 +
6877 +- if (retvalue != MT9M114_MOD_ID) {
6878 ++ if (model != MT9M114_MOD_ID) {
6879 + dev_err(&client->dev, "%s: failed: client->addr = %x\n",
6880 + __func__, client->addr);
6881 + return -ENODEV;
6882 +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
6883 +index 0d7ea144a4a6d..1ed4e33cc8cf0 100644
6884 +--- a/drivers/tty/serial/fsl_lpuart.c
6885 ++++ b/drivers/tty/serial/fsl_lpuart.c
6886 +@@ -2595,7 +2595,7 @@ static int lpuart_probe(struct platform_device *pdev)
6887 + return PTR_ERR(sport->port.membase);
6888 +
6889 + sport->port.membase += sdata->reg_off;
6890 +- sport->port.mapbase = res->start;
6891 ++ sport->port.mapbase = res->start + sdata->reg_off;
6892 + sport->port.dev = &pdev->dev;
6893 + sport->port.type = PORT_LPUART;
6894 + sport->devtype = sdata->devtype;
6895 +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
6896 +index 5b5e99604989d..87e3f20e120b0 100644
6897 +--- a/drivers/tty/tty_io.c
6898 ++++ b/drivers/tty/tty_io.c
6899 +@@ -2294,8 +2294,6 @@ static int tty_fasync(int fd, struct file *filp, int on)
6900 + * Locking:
6901 + * Called functions take tty_ldiscs_lock
6902 + * current->signal->tty check is safe without locks
6903 +- *
6904 +- * FIXME: may race normal receive processing
6905 + */
6906 +
6907 + static int tiocsti(struct tty_struct *tty, char __user *p)
6908 +@@ -2311,8 +2309,10 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
6909 + ld = tty_ldisc_ref_wait(tty);
6910 + if (!ld)
6911 + return -EIO;
6912 ++ tty_buffer_lock_exclusive(tty->port);
6913 + if (ld->ops->receive_buf)
6914 + ld->ops->receive_buf(tty, &ch, &mbz, 1);
6915 ++ tty_buffer_unlock_exclusive(tty->port);
6916 + tty_ldisc_deref(ld);
6917 + return 0;
6918 + }
6919 +diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
6920 +index ffe301d6ea359..d0f9b7c296b0d 100644
6921 +--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
6922 ++++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
6923 +@@ -598,6 +598,8 @@ static int dwc3_meson_g12a_otg_init(struct platform_device *pdev,
6924 + USB_R5_ID_DIG_IRQ, 0);
6925 +
6926 + irq = platform_get_irq(pdev, 0);
6927 ++ if (irq < 0)
6928 ++ return irq;
6929 + ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
6930 + dwc3_meson_g12a_irq_thread,
6931 + IRQF_ONESHOT, pdev->name, priv);
6932 +diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
6933 +index 49e6ca94486dd..cfbb96f6627e4 100644
6934 +--- a/drivers/usb/dwc3/dwc3-qcom.c
6935 ++++ b/drivers/usb/dwc3/dwc3-qcom.c
6936 +@@ -614,6 +614,10 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
6937 + qcom->acpi_pdata->dwc3_core_base_size;
6938 +
6939 + irq = platform_get_irq(pdev_irq, 0);
6940 ++ if (irq < 0) {
6941 ++ ret = irq;
6942 ++ goto out;
6943 ++ }
6944 + child_res[1].flags = IORESOURCE_IRQ;
6945 + child_res[1].start = child_res[1].end = irq;
6946 +
6947 +diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
6948 +index eede5cedacb4a..d9ad9adf7348f 100644
6949 +--- a/drivers/usb/gadget/udc/at91_udc.c
6950 ++++ b/drivers/usb/gadget/udc/at91_udc.c
6951 +@@ -1876,7 +1876,9 @@ static int at91udc_probe(struct platform_device *pdev)
6952 + clk_disable(udc->iclk);
6953 +
6954 + /* request UDC and maybe VBUS irqs */
6955 +- udc->udp_irq = platform_get_irq(pdev, 0);
6956 ++ udc->udp_irq = retval = platform_get_irq(pdev, 0);
6957 ++ if (retval < 0)
6958 ++ goto err_unprepare_iclk;
6959 + retval = devm_request_irq(dev, udc->udp_irq, at91_udc_irq, 0,
6960 + driver_name, udc);
6961 + if (retval) {
6962 +diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
6963 +index 0bef6b3f049b9..fa1a3908ec3bb 100644
6964 +--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
6965 ++++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
6966 +@@ -488,27 +488,14 @@ static int bdc_probe(struct platform_device *pdev)
6967 + int irq;
6968 + u32 temp;
6969 + struct device *dev = &pdev->dev;
6970 +- struct clk *clk;
6971 + int phy_num;
6972 +
6973 + dev_dbg(dev, "%s()\n", __func__);
6974 +
6975 +- clk = devm_clk_get_optional(dev, "sw_usbd");
6976 +- if (IS_ERR(clk))
6977 +- return PTR_ERR(clk);
6978 +-
6979 +- ret = clk_prepare_enable(clk);
6980 +- if (ret) {
6981 +- dev_err(dev, "could not enable clock\n");
6982 +- return ret;
6983 +- }
6984 +-
6985 + bdc = devm_kzalloc(dev, sizeof(*bdc), GFP_KERNEL);
6986 + if (!bdc)
6987 + return -ENOMEM;
6988 +
6989 +- bdc->clk = clk;
6990 +-
6991 + bdc->regs = devm_platform_ioremap_resource(pdev, 0);
6992 + if (IS_ERR(bdc->regs))
6993 + return PTR_ERR(bdc->regs);
6994 +@@ -545,10 +532,20 @@ static int bdc_probe(struct platform_device *pdev)
6995 + }
6996 + }
6997 +
6998 ++ bdc->clk = devm_clk_get_optional(dev, "sw_usbd");
6999 ++ if (IS_ERR(bdc->clk))
7000 ++ return PTR_ERR(bdc->clk);
7001 ++
7002 ++ ret = clk_prepare_enable(bdc->clk);
7003 ++ if (ret) {
7004 ++ dev_err(dev, "could not enable clock\n");
7005 ++ return ret;
7006 ++ }
7007 ++
7008 + ret = bdc_phy_init(bdc);
7009 + if (ret) {
7010 + dev_err(bdc->dev, "BDC phy init failure:%d\n", ret);
7011 +- return ret;
7012 ++ goto disable_clk;
7013 + }
7014 +
7015 + temp = bdc_readl(bdc->regs, BDC_BDCCAP1);
7016 +@@ -560,7 +557,8 @@ static int bdc_probe(struct platform_device *pdev)
7017 + if (ret) {
7018 + dev_err(dev,
7019 + "No suitable DMA config available, abort\n");
7020 +- return -ENOTSUPP;
7021 ++ ret = -ENOTSUPP;
7022 ++ goto phycleanup;
7023 + }
7024 + dev_dbg(dev, "Using 32-bit address\n");
7025 + }
7026 +@@ -580,6 +578,8 @@ cleanup:
7027 + bdc_hw_exit(bdc);
7028 + phycleanup:
7029 + bdc_phy_exit(bdc);
7030 ++disable_clk:
7031 ++ clk_disable_unprepare(bdc->clk);
7032 + return ret;
7033 + }
7034 +
7035 +diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
7036 +index 5486f5a708681..0db97fecf99e8 100644
7037 +--- a/drivers/usb/gadget/udc/mv_u3d_core.c
7038 ++++ b/drivers/usb/gadget/udc/mv_u3d_core.c
7039 +@@ -1921,14 +1921,6 @@ static int mv_u3d_probe(struct platform_device *dev)
7040 + goto err_get_irq;
7041 + }
7042 + u3d->irq = r->start;
7043 +- if (request_irq(u3d->irq, mv_u3d_irq,
7044 +- IRQF_SHARED, driver_name, u3d)) {
7045 +- u3d->irq = 0;
7046 +- dev_err(&dev->dev, "Request irq %d for u3d failed\n",
7047 +- u3d->irq);
7048 +- retval = -ENODEV;
7049 +- goto err_request_irq;
7050 +- }
7051 +
7052 + /* initialize gadget structure */
7053 + u3d->gadget.ops = &mv_u3d_ops; /* usb_gadget_ops */
7054 +@@ -1941,6 +1933,15 @@ static int mv_u3d_probe(struct platform_device *dev)
7055 +
7056 + mv_u3d_eps_init(u3d);
7057 +
7058 ++ if (request_irq(u3d->irq, mv_u3d_irq,
7059 ++ IRQF_SHARED, driver_name, u3d)) {
7060 ++ u3d->irq = 0;
7061 ++ dev_err(&dev->dev, "Request irq %d for u3d failed\n",
7062 ++ u3d->irq);
7063 ++ retval = -ENODEV;
7064 ++ goto err_request_irq;
7065 ++ }
7066 ++
7067 + /* external vbus detection */
7068 + if (u3d->vbus) {
7069 + u3d->clock_gating = 1;
7070 +@@ -1964,8 +1965,8 @@ static int mv_u3d_probe(struct platform_device *dev)
7071 +
7072 + err_unregister:
7073 + free_irq(u3d->irq, u3d);
7074 +-err_request_irq:
7075 + err_get_irq:
7076 ++err_request_irq:
7077 + kfree(u3d->status_req);
7078 + err_alloc_status_req:
7079 + kfree(u3d->eps);
7080 +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
7081 +index f1b35a39d1ba8..57d417a7c3e0a 100644
7082 +--- a/drivers/usb/gadget/udc/renesas_usb3.c
7083 ++++ b/drivers/usb/gadget/udc/renesas_usb3.c
7084 +@@ -2707,10 +2707,15 @@ static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
7085 +
7086 + static const struct of_device_id usb3_of_match[] = {
7087 + {
7088 ++ .compatible = "renesas,r8a774c0-usb3-peri",
7089 ++ .data = &renesas_usb3_priv_r8a77990,
7090 ++ }, {
7091 + .compatible = "renesas,r8a7795-usb3-peri",
7092 + .data = &renesas_usb3_priv_gen3,
7093 +- },
7094 +- {
7095 ++ }, {
7096 ++ .compatible = "renesas,r8a77990-usb3-peri",
7097 ++ .data = &renesas_usb3_priv_r8a77990,
7098 ++ }, {
7099 + .compatible = "renesas,rcar-gen3-usb3-peri",
7100 + .data = &renesas_usb3_priv_gen3,
7101 + },
7102 +@@ -2719,18 +2724,10 @@ static const struct of_device_id usb3_of_match[] = {
7103 + MODULE_DEVICE_TABLE(of, usb3_of_match);
7104 +
7105 + static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
7106 +- {
7107 +- .soc_id = "r8a774c0",
7108 +- .data = &renesas_usb3_priv_r8a77990,
7109 +- },
7110 + {
7111 + .soc_id = "r8a7795", .revision = "ES1.*",
7112 + .data = &renesas_usb3_priv_r8a7795_es1,
7113 + },
7114 +- {
7115 +- .soc_id = "r8a77990",
7116 +- .data = &renesas_usb3_priv_r8a77990,
7117 +- },
7118 + { /* sentinel */ },
7119 + };
7120 +
7121 +diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c
7122 +index b154b62abefa1..82c4f3fb2daec 100644
7123 +--- a/drivers/usb/gadget/udc/s3c2410_udc.c
7124 ++++ b/drivers/usb/gadget/udc/s3c2410_udc.c
7125 +@@ -1784,6 +1784,10 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
7126 + s3c2410_udc_reinit(udc);
7127 +
7128 + irq_usbd = platform_get_irq(pdev, 0);
7129 ++ if (irq_usbd < 0) {
7130 ++ retval = irq_usbd;
7131 ++ goto err_udc_clk;
7132 ++ }
7133 +
7134 + /* irq setup after old hardware state is cleaned up */
7135 + retval = request_irq(irq_usbd, s3c2410_udc_irq,
7136 +diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
7137 +index a319b1df3011c..3626758b3e2aa 100644
7138 +--- a/drivers/usb/host/ehci-orion.c
7139 ++++ b/drivers/usb/host/ehci-orion.c
7140 +@@ -264,8 +264,11 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
7141 + * the clock does not exists.
7142 + */
7143 + priv->clk = devm_clk_get(&pdev->dev, NULL);
7144 +- if (!IS_ERR(priv->clk))
7145 +- clk_prepare_enable(priv->clk);
7146 ++ if (!IS_ERR(priv->clk)) {
7147 ++ err = clk_prepare_enable(priv->clk);
7148 ++ if (err)
7149 ++ goto err_put_hcd;
7150 ++ }
7151 +
7152 + priv->phy = devm_phy_optional_get(&pdev->dev, "usb");
7153 + if (IS_ERR(priv->phy)) {
7154 +@@ -311,6 +314,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
7155 + err_dis_clk:
7156 + if (!IS_ERR(priv->clk))
7157 + clk_disable_unprepare(priv->clk);
7158 ++err_put_hcd:
7159 + usb_put_hcd(hcd);
7160 + err:
7161 + dev_err(&pdev->dev, "init %s fail, %d\n",
7162 +diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
7163 +index 7f857bad9e95b..08ec2ab0d95a5 100644
7164 +--- a/drivers/usb/host/ohci-tmio.c
7165 ++++ b/drivers/usb/host/ohci-tmio.c
7166 +@@ -202,6 +202,9 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
7167 + if (!cell)
7168 + return -EINVAL;
7169 +
7170 ++ if (irq < 0)
7171 ++ return irq;
7172 ++
7173 + hcd = usb_create_hcd(&ohci_tmio_hc_driver, &dev->dev, dev_name(&dev->dev));
7174 + if (!hcd) {
7175 + ret = -ENOMEM;
7176 +diff --git a/drivers/usb/misc/brcmstb-usb-pinmap.c b/drivers/usb/misc/brcmstb-usb-pinmap.c
7177 +index 336653091e3b3..2b2019c19cdeb 100644
7178 +--- a/drivers/usb/misc/brcmstb-usb-pinmap.c
7179 ++++ b/drivers/usb/misc/brcmstb-usb-pinmap.c
7180 +@@ -293,6 +293,8 @@ static int __init brcmstb_usb_pinmap_probe(struct platform_device *pdev)
7181 +
7182 + /* Enable interrupt for out pins */
7183 + irq = platform_get_irq(pdev, 0);
7184 ++ if (irq < 0)
7185 ++ return irq;
7186 + err = devm_request_irq(&pdev->dev, irq,
7187 + brcmstb_usb_pinmap_ovr_isr,
7188 + IRQF_TRIGGER_RISING,
7189 +diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
7190 +index f34c9437a182c..972704262b02b 100644
7191 +--- a/drivers/usb/phy/phy-fsl-usb.c
7192 ++++ b/drivers/usb/phy/phy-fsl-usb.c
7193 +@@ -873,6 +873,8 @@ int usb_otg_start(struct platform_device *pdev)
7194 +
7195 + /* request irq */
7196 + p_otg->irq = platform_get_irq(pdev, 0);
7197 ++ if (p_otg->irq < 0)
7198 ++ return p_otg->irq;
7199 + status = request_irq(p_otg->irq, fsl_otg_isr,
7200 + IRQF_SHARED, driver_name, p_otg);
7201 + if (status) {
7202 +diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
7203 +index baebb1f5a9737..a3e043e3e4aae 100644
7204 +--- a/drivers/usb/phy/phy-tahvo.c
7205 ++++ b/drivers/usb/phy/phy-tahvo.c
7206 +@@ -393,7 +393,9 @@ static int tahvo_usb_probe(struct platform_device *pdev)
7207 +
7208 + dev_set_drvdata(&pdev->dev, tu);
7209 +
7210 +- tu->irq = platform_get_irq(pdev, 0);
7211 ++ tu->irq = ret = platform_get_irq(pdev, 0);
7212 ++ if (ret < 0)
7213 ++ return ret;
7214 + ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
7215 + IRQF_ONESHOT,
7216 + "tahvo-vbus", tu);
7217 +diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
7218 +index 8ba6c5a915570..ab3c38a7d8ac0 100644
7219 +--- a/drivers/usb/phy/phy-twl6030-usb.c
7220 ++++ b/drivers/usb/phy/phy-twl6030-usb.c
7221 +@@ -348,6 +348,11 @@ static int twl6030_usb_probe(struct platform_device *pdev)
7222 + twl->irq2 = platform_get_irq(pdev, 1);
7223 + twl->linkstat = MUSB_UNKNOWN;
7224 +
7225 ++ if (twl->irq1 < 0)
7226 ++ return twl->irq1;
7227 ++ if (twl->irq2 < 0)
7228 ++ return twl->irq2;
7229 ++
7230 + twl->comparator.set_vbus = twl6030_set_vbus;
7231 + twl->comparator.start_srp = twl6030_start_srp;
7232 +
7233 +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
7234 +index e48fded3e414c..8d8959a70e440 100644
7235 +--- a/drivers/video/backlight/pwm_bl.c
7236 ++++ b/drivers/video/backlight/pwm_bl.c
7237 +@@ -409,6 +409,33 @@ static bool pwm_backlight_is_linear(struct platform_pwm_backlight_data *data)
7238 + static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
7239 + {
7240 + struct device_node *node = pb->dev->of_node;
7241 ++ bool active = true;
7242 ++
7243 ++ /*
7244 ++ * If the enable GPIO is present, observable (either as input
7245 ++ * or output) and off then the backlight is not currently active.
7246 ++ * */
7247 ++ if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
7248 ++ active = false;
7249 ++
7250 ++ if (!regulator_is_enabled(pb->power_supply))
7251 ++ active = false;
7252 ++
7253 ++ if (!pwm_is_enabled(pb->pwm))
7254 ++ active = false;
7255 ++
7256 ++ /*
7257 ++ * Synchronize the enable_gpio with the observed state of the
7258 ++ * hardware.
7259 ++ */
7260 ++ if (pb->enable_gpio)
7261 ++ gpiod_direction_output(pb->enable_gpio, active);
7262 ++
7263 ++ /*
7264 ++ * Do not change pb->enabled here! pb->enabled essentially
7265 ++ * tells us if we own one of the regulator's use counts and
7266 ++ * right now we do not.
7267 ++ */
7268 +
7269 + /* Not booted with device tree or no phandle link to the node */
7270 + if (!node || !node->phandle)
7271 +@@ -420,20 +447,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
7272 + * assume that another driver will enable the backlight at the
7273 + * appropriate time. Therefore, if it is disabled, keep it so.
7274 + */
7275 +-
7276 +- /* if the enable GPIO is disabled, do not enable the backlight */
7277 +- if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
7278 +- return FB_BLANK_POWERDOWN;
7279 +-
7280 +- /* The regulator is disabled, do not enable the backlight */
7281 +- if (!regulator_is_enabled(pb->power_supply))
7282 +- return FB_BLANK_POWERDOWN;
7283 +-
7284 +- /* The PWM is disabled, keep it like this */
7285 +- if (!pwm_is_enabled(pb->pwm))
7286 +- return FB_BLANK_POWERDOWN;
7287 +-
7288 +- return FB_BLANK_UNBLANK;
7289 ++ return active ? FB_BLANK_UNBLANK: FB_BLANK_POWERDOWN;
7290 + }
7291 +
7292 + static int pwm_backlight_probe(struct platform_device *pdev)
7293 +@@ -486,18 +500,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
7294 + goto err_alloc;
7295 + }
7296 +
7297 +- /*
7298 +- * If the GPIO is not known to be already configured as output, that
7299 +- * is, if gpiod_get_direction returns either 1 or -EINVAL, change the
7300 +- * direction to output and set the GPIO as active.
7301 +- * Do not force the GPIO to active when it was already output as it
7302 +- * could cause backlight flickering or we would enable the backlight too
7303 +- * early. Leave the decision of the initial backlight state for later.
7304 +- */
7305 +- if (pb->enable_gpio &&
7306 +- gpiod_get_direction(pb->enable_gpio) != 0)
7307 +- gpiod_direction_output(pb->enable_gpio, 1);
7308 +-
7309 + pb->power_supply = devm_regulator_get(&pdev->dev, "power");
7310 + if (IS_ERR(pb->power_supply)) {
7311 + ret = PTR_ERR(pb->power_supply);
7312 +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
7313 +index 1c855145711ba..63e2f17f3c619 100644
7314 +--- a/drivers/video/fbdev/core/fbmem.c
7315 ++++ b/drivers/video/fbdev/core/fbmem.c
7316 +@@ -962,6 +962,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
7317 + struct fb_var_screeninfo old_var;
7318 + struct fb_videomode mode;
7319 + struct fb_event event;
7320 ++ u32 unused;
7321 +
7322 + if (var->activate & FB_ACTIVATE_INV_MODE) {
7323 + struct fb_videomode mode1, mode2;
7324 +@@ -1008,6 +1009,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
7325 + if (var->xres < 8 || var->yres < 8)
7326 + return -EINVAL;
7327 +
7328 ++ /* Too huge resolution causes multiplication overflow. */
7329 ++ if (check_mul_overflow(var->xres, var->yres, &unused) ||
7330 ++ check_mul_overflow(var->xres_virtual, var->yres_virtual, &unused))
7331 ++ return -EINVAL;
7332 ++
7333 + ret = info->fbops->fb_check_var(var, info);
7334 +
7335 + if (ret)
7336 +diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
7337 +index 9bd03a2310328..171ad8b42107e 100644
7338 +--- a/fs/cifs/cifs_unicode.c
7339 ++++ b/fs/cifs/cifs_unicode.c
7340 +@@ -358,14 +358,9 @@ cifs_strndup_from_utf16(const char *src, const int maxlen,
7341 + if (!dst)
7342 + return NULL;
7343 + cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
7344 +- NO_MAP_UNI_RSVD);
7345 ++ NO_MAP_UNI_RSVD);
7346 + } else {
7347 +- len = strnlen(src, maxlen);
7348 +- len++;
7349 +- dst = kmalloc(len, GFP_KERNEL);
7350 +- if (!dst)
7351 +- return NULL;
7352 +- strlcpy(dst, src, len);
7353 ++ dst = kstrndup(src, maxlen, GFP_KERNEL);
7354 + }
7355 +
7356 + return dst;
7357 +diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
7358 +index 72742eb1df4a7..626bb7c552065 100644
7359 +--- a/fs/cifs/fs_context.c
7360 ++++ b/fs/cifs/fs_context.c
7361 +@@ -1259,10 +1259,17 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
7362 + ctx->posix_paths = 1;
7363 + break;
7364 + case Opt_unix:
7365 +- if (result.negated)
7366 ++ if (result.negated) {
7367 ++ if (ctx->linux_ext == 1)
7368 ++ pr_warn_once("conflicting posix mount options specified\n");
7369 + ctx->linux_ext = 0;
7370 +- else
7371 + ctx->no_linux_ext = 1;
7372 ++ } else {
7373 ++ if (ctx->no_linux_ext == 1)
7374 ++ pr_warn_once("conflicting posix mount options specified\n");
7375 ++ ctx->linux_ext = 1;
7376 ++ ctx->no_linux_ext = 0;
7377 ++ }
7378 + break;
7379 + case Opt_nocase:
7380 + ctx->nocase = 1;
7381 +diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
7382 +index 63bfc533c9fb6..6927b68fe8528 100644
7383 +--- a/fs/cifs/readdir.c
7384 ++++ b/fs/cifs/readdir.c
7385 +@@ -381,7 +381,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
7386 + */
7387 +
7388 + static int
7389 +-initiate_cifs_search(const unsigned int xid, struct file *file,
7390 ++_initiate_cifs_search(const unsigned int xid, struct file *file,
7391 + const char *full_path)
7392 + {
7393 + __u16 search_flags;
7394 +@@ -463,6 +463,27 @@ error_exit:
7395 + return rc;
7396 + }
7397 +
7398 ++static int
7399 ++initiate_cifs_search(const unsigned int xid, struct file *file,
7400 ++ const char *full_path)
7401 ++{
7402 ++ int rc, retry_count = 0;
7403 ++
7404 ++ do {
7405 ++ rc = _initiate_cifs_search(xid, file, full_path);
7406 ++ /*
7407 ++ * If we don't have enough credits to start reading the
7408 ++ * directory just try again after short wait.
7409 ++ */
7410 ++ if (rc != -EDEADLK)
7411 ++ break;
7412 ++
7413 ++ usleep_range(512, 2048);
7414 ++ } while (retry_count++ < 5);
7415 ++
7416 ++ return rc;
7417 ++}
7418 ++
7419 + /* return length of unicode string in bytes */
7420 + static int cifs_unicode_bytelen(const char *str)
7421 + {
7422 +diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
7423 +index ba7c01cd9a5d2..36f2dbe6061fe 100644
7424 +--- a/fs/debugfs/file.c
7425 ++++ b/fs/debugfs/file.c
7426 +@@ -179,8 +179,10 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
7427 + if (!fops_get(real_fops)) {
7428 + #ifdef CONFIG_MODULES
7429 + if (real_fops->owner &&
7430 +- real_fops->owner->state == MODULE_STATE_GOING)
7431 ++ real_fops->owner->state == MODULE_STATE_GOING) {
7432 ++ r = -ENXIO;
7433 + goto out;
7434 ++ }
7435 + #endif
7436 +
7437 + /* Huh? Module did not clean up after itself at exit? */
7438 +@@ -314,8 +316,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
7439 + if (!fops_get(real_fops)) {
7440 + #ifdef CONFIG_MODULES
7441 + if (real_fops->owner &&
7442 +- real_fops->owner->state == MODULE_STATE_GOING)
7443 ++ real_fops->owner->state == MODULE_STATE_GOING) {
7444 ++ r = -ENXIO;
7445 + goto out;
7446 ++ }
7447 + #endif
7448 +
7449 + /* Huh? Module did not cleanup after itself at exit? */
7450 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
7451 +index ceb575f99048c..fb27d49e4da72 100644
7452 +--- a/fs/f2fs/file.c
7453 ++++ b/fs/f2fs/file.c
7454 +@@ -263,8 +263,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
7455 + };
7456 + unsigned int seq_id = 0;
7457 +
7458 +- if (unlikely(f2fs_readonly(inode->i_sb) ||
7459 +- is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
7460 ++ if (unlikely(f2fs_readonly(inode->i_sb)))
7461 + return 0;
7462 +
7463 + trace_f2fs_sync_file_enter(inode);
7464 +@@ -278,7 +277,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
7465 + ret = file_write_and_wait_range(file, start, end);
7466 + clear_inode_flag(inode, FI_NEED_IPU);
7467 +
7468 +- if (ret) {
7469 ++ if (ret || is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
7470 + trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
7471 + return ret;
7472 + }
7473 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
7474 +index b29de80ab60e8..8553e8e5de0da 100644
7475 +--- a/fs/f2fs/super.c
7476 ++++ b/fs/f2fs/super.c
7477 +@@ -1923,8 +1923,17 @@ restore_flag:
7478 +
7479 + static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi)
7480 + {
7481 ++ int retry = DEFAULT_RETRY_IO_COUNT;
7482 ++
7483 + /* we should flush all the data to keep data consistency */
7484 +- sync_inodes_sb(sbi->sb);
7485 ++ do {
7486 ++ sync_inodes_sb(sbi->sb);
7487 ++ cond_resched();
7488 ++ congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
7489 ++ } while (get_pages(sbi, F2FS_DIRTY_DATA) && retry--);
7490 ++
7491 ++ if (unlikely(retry < 0))
7492 ++ f2fs_warn(sbi, "checkpoint=enable has some unwritten data.");
7493 +
7494 + down_write(&sbi->gc_lock);
7495 + f2fs_dirty_to_prefree(sbi);
7496 +diff --git a/fs/fcntl.c b/fs/fcntl.c
7497 +index dfc72f15be7fc..887db4918a899 100644
7498 +--- a/fs/fcntl.c
7499 ++++ b/fs/fcntl.c
7500 +@@ -150,7 +150,8 @@ void f_delown(struct file *filp)
7501 + pid_t f_getown(struct file *filp)
7502 + {
7503 + pid_t pid = 0;
7504 +- read_lock(&filp->f_owner.lock);
7505 ++
7506 ++ read_lock_irq(&filp->f_owner.lock);
7507 + rcu_read_lock();
7508 + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type)) {
7509 + pid = pid_vnr(filp->f_owner.pid);
7510 +@@ -158,7 +159,7 @@ pid_t f_getown(struct file *filp)
7511 + pid = -pid;
7512 + }
7513 + rcu_read_unlock();
7514 +- read_unlock(&filp->f_owner.lock);
7515 ++ read_unlock_irq(&filp->f_owner.lock);
7516 + return pid;
7517 + }
7518 +
7519 +@@ -208,7 +209,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
7520 + struct f_owner_ex owner = {};
7521 + int ret = 0;
7522 +
7523 +- read_lock(&filp->f_owner.lock);
7524 ++ read_lock_irq(&filp->f_owner.lock);
7525 + rcu_read_lock();
7526 + if (pid_task(filp->f_owner.pid, filp->f_owner.pid_type))
7527 + owner.pid = pid_vnr(filp->f_owner.pid);
7528 +@@ -231,7 +232,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
7529 + ret = -EINVAL;
7530 + break;
7531 + }
7532 +- read_unlock(&filp->f_owner.lock);
7533 ++ read_unlock_irq(&filp->f_owner.lock);
7534 +
7535 + if (!ret) {
7536 + ret = copy_to_user(owner_p, &owner, sizeof(owner));
7537 +@@ -249,10 +250,10 @@ static int f_getowner_uids(struct file *filp, unsigned long arg)
7538 + uid_t src[2];
7539 + int err;
7540 +
7541 +- read_lock(&filp->f_owner.lock);
7542 ++ read_lock_irq(&filp->f_owner.lock);
7543 + src[0] = from_kuid(user_ns, filp->f_owner.uid);
7544 + src[1] = from_kuid(user_ns, filp->f_owner.euid);
7545 +- read_unlock(&filp->f_owner.lock);
7546 ++ read_unlock_irq(&filp->f_owner.lock);
7547 +
7548 + err = put_user(src[0], &dst[0]);
7549 + err |= put_user(src[1], &dst[1]);
7550 +@@ -1003,13 +1004,14 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
7551 + {
7552 + while (fa) {
7553 + struct fown_struct *fown;
7554 ++ unsigned long flags;
7555 +
7556 + if (fa->magic != FASYNC_MAGIC) {
7557 + printk(KERN_ERR "kill_fasync: bad magic number in "
7558 + "fasync_struct!\n");
7559 + return;
7560 + }
7561 +- read_lock(&fa->fa_lock);
7562 ++ read_lock_irqsave(&fa->fa_lock, flags);
7563 + if (fa->fa_file) {
7564 + fown = &fa->fa_file->f_owner;
7565 + /* Don't send SIGURG to processes which have not set a
7566 +@@ -1018,7 +1020,7 @@ static void kill_fasync_rcu(struct fasync_struct *fa, int sig, int band)
7567 + if (!(sig == SIGURG && fown->signum == 0))
7568 + send_sigio(fown, fa->fa_fd, band);
7569 + }
7570 +- read_unlock(&fa->fa_lock);
7571 ++ read_unlock_irqrestore(&fa->fa_lock, flags);
7572 + fa = rcu_dereference(fa->fa_next);
7573 + }
7574 + }
7575 +diff --git a/fs/fuse/file.c b/fs/fuse/file.c
7576 +index 09ef2a4d25edc..f4e1a6387f90d 100644
7577 +--- a/fs/fuse/file.c
7578 ++++ b/fs/fuse/file.c
7579 +@@ -198,12 +198,11 @@ void fuse_finish_open(struct inode *inode, struct file *file)
7580 + struct fuse_file *ff = file->private_data;
7581 + struct fuse_conn *fc = get_fuse_conn(inode);
7582 +
7583 +- if (!(ff->open_flags & FOPEN_KEEP_CACHE))
7584 +- invalidate_inode_pages2(inode->i_mapping);
7585 + if (ff->open_flags & FOPEN_STREAM)
7586 + stream_open(inode, file);
7587 + else if (ff->open_flags & FOPEN_NONSEEKABLE)
7588 + nonseekable_open(inode, file);
7589 ++
7590 + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
7591 + struct fuse_inode *fi = get_fuse_inode(inode);
7592 +
7593 +@@ -211,10 +210,14 @@ void fuse_finish_open(struct inode *inode, struct file *file)
7594 + fi->attr_version = atomic64_inc_return(&fc->attr_version);
7595 + i_size_write(inode, 0);
7596 + spin_unlock(&fi->lock);
7597 ++ truncate_pagecache(inode, 0);
7598 + fuse_invalidate_attr(inode);
7599 + if (fc->writeback_cache)
7600 + file_update_time(file);
7601 ++ } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) {
7602 ++ invalidate_inode_pages2(inode->i_mapping);
7603 + }
7604 ++
7605 + if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
7606 + fuse_link_write_file(file);
7607 + }
7608 +@@ -389,6 +392,7 @@ struct fuse_writepage_args {
7609 + struct list_head queue_entry;
7610 + struct fuse_writepage_args *next;
7611 + struct inode *inode;
7612 ++ struct fuse_sync_bucket *bucket;
7613 + };
7614 +
7615 + static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi,
7616 +@@ -1610,6 +1614,9 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa)
7617 + struct fuse_args_pages *ap = &wpa->ia.ap;
7618 + int i;
7619 +
7620 ++ if (wpa->bucket)
7621 ++ fuse_sync_bucket_dec(wpa->bucket);
7622 ++
7623 + for (i = 0; i < ap->num_pages; i++)
7624 + __free_page(ap->pages[i]);
7625 +
7626 +@@ -1873,6 +1880,20 @@ static struct fuse_writepage_args *fuse_writepage_args_alloc(void)
7627 +
7628 + }
7629 +
7630 ++static void fuse_writepage_add_to_bucket(struct fuse_conn *fc,
7631 ++ struct fuse_writepage_args *wpa)
7632 ++{
7633 ++ if (!fc->sync_fs)
7634 ++ return;
7635 ++
7636 ++ rcu_read_lock();
7637 ++ /* Prevent resurrection of dead bucket in unlikely race with syncfs */
7638 ++ do {
7639 ++ wpa->bucket = rcu_dereference(fc->curr_bucket);
7640 ++ } while (unlikely(!atomic_inc_not_zero(&wpa->bucket->count)));
7641 ++ rcu_read_unlock();
7642 ++}
7643 ++
7644 + static int fuse_writepage_locked(struct page *page)
7645 + {
7646 + struct address_space *mapping = page->mapping;
7647 +@@ -1900,6 +1921,7 @@ static int fuse_writepage_locked(struct page *page)
7648 + if (!wpa->ia.ff)
7649 + goto err_nofile;
7650 +
7651 ++ fuse_writepage_add_to_bucket(fc, wpa);
7652 + fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0);
7653 +
7654 + copy_highpage(tmp_page, page);
7655 +@@ -2150,6 +2172,8 @@ static int fuse_writepages_fill(struct page *page,
7656 + __free_page(tmp_page);
7657 + goto out_unlock;
7658 + }
7659 ++ fuse_writepage_add_to_bucket(fc, wpa);
7660 ++
7661 + data->max_pages = 1;
7662 +
7663 + ap = &wpa->ia.ap;
7664 +@@ -2883,7 +2907,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
7665 +
7666 + static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
7667 + {
7668 +- int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
7669 ++ int err = filemap_write_and_wait_range(inode->i_mapping, start, -1);
7670 +
7671 + if (!err)
7672 + fuse_sync_writes(inode);
7673 +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
7674 +index 120f9c5908d19..e840c4a5f9f5b 100644
7675 +--- a/fs/fuse/fuse_i.h
7676 ++++ b/fs/fuse/fuse_i.h
7677 +@@ -515,6 +515,13 @@ struct fuse_fs_context {
7678 + void **fudptr;
7679 + };
7680 +
7681 ++struct fuse_sync_bucket {
7682 ++ /* count is a possible scalability bottleneck */
7683 ++ atomic_t count;
7684 ++ wait_queue_head_t waitq;
7685 ++ struct rcu_head rcu;
7686 ++};
7687 ++
7688 + /**
7689 + * A Fuse connection.
7690 + *
7691 +@@ -807,6 +814,9 @@ struct fuse_conn {
7692 +
7693 + /** List of filesystems using this connection */
7694 + struct list_head mounts;
7695 ++
7696 ++ /* New writepages go into this bucket */
7697 ++ struct fuse_sync_bucket __rcu *curr_bucket;
7698 + };
7699 +
7700 + /*
7701 +@@ -910,6 +920,15 @@ static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs,
7702 + descs[i].length = PAGE_SIZE - descs[i].offset;
7703 + }
7704 +
7705 ++static inline void fuse_sync_bucket_dec(struct fuse_sync_bucket *bucket)
7706 ++{
7707 ++ /* Need RCU protection to prevent use after free after the decrement */
7708 ++ rcu_read_lock();
7709 ++ if (atomic_dec_and_test(&bucket->count))
7710 ++ wake_up(&bucket->waitq);
7711 ++ rcu_read_unlock();
7712 ++}
7713 ++
7714 + /** Device operations */
7715 + extern const struct file_operations fuse_dev_operations;
7716 +
7717 +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
7718 +index cf16d6d3a6038..eda92e3d26b87 100644
7719 +--- a/fs/fuse/inode.c
7720 ++++ b/fs/fuse/inode.c
7721 +@@ -506,6 +506,57 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
7722 + return err;
7723 + }
7724 +
7725 ++static struct fuse_sync_bucket *fuse_sync_bucket_alloc(void)
7726 ++{
7727 ++ struct fuse_sync_bucket *bucket;
7728 ++
7729 ++ bucket = kzalloc(sizeof(*bucket), GFP_KERNEL | __GFP_NOFAIL);
7730 ++ if (bucket) {
7731 ++ init_waitqueue_head(&bucket->waitq);
7732 ++ /* Initial active count */
7733 ++ atomic_set(&bucket->count, 1);
7734 ++ }
7735 ++ return bucket;
7736 ++}
7737 ++
7738 ++static void fuse_sync_fs_writes(struct fuse_conn *fc)
7739 ++{
7740 ++ struct fuse_sync_bucket *bucket, *new_bucket;
7741 ++ int count;
7742 ++
7743 ++ new_bucket = fuse_sync_bucket_alloc();
7744 ++ spin_lock(&fc->lock);
7745 ++ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
7746 ++ count = atomic_read(&bucket->count);
7747 ++ WARN_ON(count < 1);
7748 ++ /* No outstanding writes? */
7749 ++ if (count == 1) {
7750 ++ spin_unlock(&fc->lock);
7751 ++ kfree(new_bucket);
7752 ++ return;
7753 ++ }
7754 ++
7755 ++ /*
7756 ++ * Completion of new bucket depends on completion of this bucket, so add
7757 ++ * one more count.
7758 ++ */
7759 ++ atomic_inc(&new_bucket->count);
7760 ++ rcu_assign_pointer(fc->curr_bucket, new_bucket);
7761 ++ spin_unlock(&fc->lock);
7762 ++ /*
7763 ++ * Drop initial active count. At this point if all writes in this and
7764 ++ * ancestor buckets complete, the count will go to zero and this task
7765 ++ * will be woken up.
7766 ++ */
7767 ++ atomic_dec(&bucket->count);
7768 ++
7769 ++ wait_event(bucket->waitq, atomic_read(&bucket->count) == 0);
7770 ++
7771 ++ /* Drop temp count on descendant bucket */
7772 ++ fuse_sync_bucket_dec(new_bucket);
7773 ++ kfree_rcu(bucket, rcu);
7774 ++}
7775 ++
7776 + static int fuse_sync_fs(struct super_block *sb, int wait)
7777 + {
7778 + struct fuse_mount *fm = get_fuse_mount_super(sb);
7779 +@@ -528,6 +579,8 @@ static int fuse_sync_fs(struct super_block *sb, int wait)
7780 + if (!fc->sync_fs)
7781 + return 0;
7782 +
7783 ++ fuse_sync_fs_writes(fc);
7784 ++
7785 + memset(&inarg, 0, sizeof(inarg));
7786 + args.in_numargs = 1;
7787 + args.in_args[0].size = sizeof(inarg);
7788 +@@ -763,6 +816,7 @@ void fuse_conn_put(struct fuse_conn *fc)
7789 + {
7790 + if (refcount_dec_and_test(&fc->count)) {
7791 + struct fuse_iqueue *fiq = &fc->iq;
7792 ++ struct fuse_sync_bucket *bucket;
7793 +
7794 + if (IS_ENABLED(CONFIG_FUSE_DAX))
7795 + fuse_dax_conn_free(fc);
7796 +@@ -770,6 +824,11 @@ void fuse_conn_put(struct fuse_conn *fc)
7797 + fiq->ops->release(fiq);
7798 + put_pid_ns(fc->pid_ns);
7799 + put_user_ns(fc->user_ns);
7800 ++ bucket = rcu_dereference_protected(fc->curr_bucket, 1);
7801 ++ if (bucket) {
7802 ++ WARN_ON(atomic_read(&bucket->count) != 1);
7803 ++ kfree(bucket);
7804 ++ }
7805 + fc->release(fc);
7806 + }
7807 + }
7808 +@@ -1366,6 +1425,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
7809 + if (sb->s_flags & SB_MANDLOCK)
7810 + goto err;
7811 +
7812 ++ rcu_assign_pointer(fc->curr_bucket, fuse_sync_bucket_alloc());
7813 + fuse_sb_defaults(sb);
7814 +
7815 + if (ctx->is_bdev) {
7816 +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
7817 +index 5f4504dd0875a..ca76e3b8792ce 100644
7818 +--- a/fs/gfs2/ops_fstype.c
7819 ++++ b/fs/gfs2/ops_fstype.c
7820 +@@ -677,6 +677,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
7821 + error = PTR_ERR(lsi->si_sc_inode);
7822 + fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
7823 + jd->jd_jid, error);
7824 ++ kfree(lsi);
7825 + goto free_local;
7826 + }
7827 + lsi->si_jid = jd->jd_jid;
7828 +@@ -1088,6 +1089,34 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
7829 + kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
7830 + }
7831 +
7832 ++static int init_threads(struct gfs2_sbd *sdp)
7833 ++{
7834 ++ struct task_struct *p;
7835 ++ int error = 0;
7836 ++
7837 ++ p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
7838 ++ if (IS_ERR(p)) {
7839 ++ error = PTR_ERR(p);
7840 ++ fs_err(sdp, "can't start logd thread: %d\n", error);
7841 ++ return error;
7842 ++ }
7843 ++ sdp->sd_logd_process = p;
7844 ++
7845 ++ p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
7846 ++ if (IS_ERR(p)) {
7847 ++ error = PTR_ERR(p);
7848 ++ fs_err(sdp, "can't start quotad thread: %d\n", error);
7849 ++ goto fail;
7850 ++ }
7851 ++ sdp->sd_quotad_process = p;
7852 ++ return 0;
7853 ++
7854 ++fail:
7855 ++ kthread_stop(sdp->sd_logd_process);
7856 ++ sdp->sd_logd_process = NULL;
7857 ++ return error;
7858 ++}
7859 ++
7860 + /**
7861 + * gfs2_fill_super - Read in superblock
7862 + * @sb: The VFS superblock
7863 +@@ -1216,6 +1245,14 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
7864 + goto fail_per_node;
7865 + }
7866 +
7867 ++ if (!sb_rdonly(sb)) {
7868 ++ error = init_threads(sdp);
7869 ++ if (error) {
7870 ++ gfs2_withdraw_delayed(sdp);
7871 ++ goto fail_per_node;
7872 ++ }
7873 ++ }
7874 ++
7875 + error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
7876 + if (error)
7877 + goto fail_per_node;
7878 +@@ -1225,6 +1262,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
7879 +
7880 + gfs2_freeze_unlock(&freeze_gh);
7881 + if (error) {
7882 ++ if (sdp->sd_quotad_process)
7883 ++ kthread_stop(sdp->sd_quotad_process);
7884 ++ sdp->sd_quotad_process = NULL;
7885 ++ if (sdp->sd_logd_process)
7886 ++ kthread_stop(sdp->sd_logd_process);
7887 ++ sdp->sd_logd_process = NULL;
7888 + fs_err(sdp, "can't make FS RW: %d\n", error);
7889 + goto fail_per_node;
7890 + }
7891 +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
7892 +index 4d4ceb0b69031..2bdbba5ea8d79 100644
7893 +--- a/fs/gfs2/super.c
7894 ++++ b/fs/gfs2/super.c
7895 +@@ -119,34 +119,6 @@ int gfs2_jdesc_check(struct gfs2_jdesc *jd)
7896 + return 0;
7897 + }
7898 +
7899 +-static int init_threads(struct gfs2_sbd *sdp)
7900 +-{
7901 +- struct task_struct *p;
7902 +- int error = 0;
7903 +-
7904 +- p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
7905 +- if (IS_ERR(p)) {
7906 +- error = PTR_ERR(p);
7907 +- fs_err(sdp, "can't start logd thread: %d\n", error);
7908 +- return error;
7909 +- }
7910 +- sdp->sd_logd_process = p;
7911 +-
7912 +- p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
7913 +- if (IS_ERR(p)) {
7914 +- error = PTR_ERR(p);
7915 +- fs_err(sdp, "can't start quotad thread: %d\n", error);
7916 +- goto fail;
7917 +- }
7918 +- sdp->sd_quotad_process = p;
7919 +- return 0;
7920 +-
7921 +-fail:
7922 +- kthread_stop(sdp->sd_logd_process);
7923 +- sdp->sd_logd_process = NULL;
7924 +- return error;
7925 +-}
7926 +-
7927 + /**
7928 + * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
7929 + * @sdp: the filesystem
7930 +@@ -161,26 +133,17 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
7931 + struct gfs2_log_header_host head;
7932 + int error;
7933 +
7934 +- error = init_threads(sdp);
7935 +- if (error) {
7936 +- gfs2_withdraw_delayed(sdp);
7937 +- return error;
7938 +- }
7939 +-
7940 + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
7941 +- if (gfs2_withdrawn(sdp)) {
7942 +- error = -EIO;
7943 +- goto fail;
7944 +- }
7945 ++ if (gfs2_withdrawn(sdp))
7946 ++ return -EIO;
7947 +
7948 + error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
7949 + if (error || gfs2_withdrawn(sdp))
7950 +- goto fail;
7951 ++ return error;
7952 +
7953 + if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
7954 + gfs2_consist(sdp);
7955 +- error = -EIO;
7956 +- goto fail;
7957 ++ return -EIO;
7958 + }
7959 +
7960 + /* Initialize some head of the log stuff */
7961 +@@ -188,20 +151,8 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
7962 + gfs2_log_pointers_init(sdp, head.lh_blkno);
7963 +
7964 + error = gfs2_quota_init(sdp);
7965 +- if (error || gfs2_withdrawn(sdp))
7966 +- goto fail;
7967 +-
7968 +- set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
7969 +-
7970 +- return 0;
7971 +-
7972 +-fail:
7973 +- if (sdp->sd_quotad_process)
7974 +- kthread_stop(sdp->sd_quotad_process);
7975 +- sdp->sd_quotad_process = NULL;
7976 +- if (sdp->sd_logd_process)
7977 +- kthread_stop(sdp->sd_logd_process);
7978 +- sdp->sd_logd_process = NULL;
7979 ++ if (!error && !gfs2_withdrawn(sdp))
7980 ++ set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
7981 + return error;
7982 + }
7983 +
7984 +diff --git a/fs/io-wq.c b/fs/io-wq.c
7985 +index 91b0d1fb90eb3..c7171d9758968 100644
7986 +--- a/fs/io-wq.c
7987 ++++ b/fs/io-wq.c
7988 +@@ -53,6 +53,10 @@ struct io_worker {
7989 +
7990 + struct completion ref_done;
7991 +
7992 ++ unsigned long create_state;
7993 ++ struct callback_head create_work;
7994 ++ int create_index;
7995 ++
7996 + struct rcu_head rcu;
7997 + };
7998 +
7999 +@@ -273,24 +277,18 @@ static void io_wqe_inc_running(struct io_worker *worker)
8000 + atomic_inc(&acct->nr_running);
8001 + }
8002 +
8003 +-struct create_worker_data {
8004 +- struct callback_head work;
8005 +- struct io_wqe *wqe;
8006 +- int index;
8007 +-};
8008 +-
8009 + static void create_worker_cb(struct callback_head *cb)
8010 + {
8011 +- struct create_worker_data *cwd;
8012 ++ struct io_worker *worker;
8013 + struct io_wq *wq;
8014 + struct io_wqe *wqe;
8015 + struct io_wqe_acct *acct;
8016 + bool do_create = false, first = false;
8017 +
8018 +- cwd = container_of(cb, struct create_worker_data, work);
8019 +- wqe = cwd->wqe;
8020 ++ worker = container_of(cb, struct io_worker, create_work);
8021 ++ wqe = worker->wqe;
8022 + wq = wqe->wq;
8023 +- acct = &wqe->acct[cwd->index];
8024 ++ acct = &wqe->acct[worker->create_index];
8025 + raw_spin_lock_irq(&wqe->lock);
8026 + if (acct->nr_workers < acct->max_workers) {
8027 + if (!acct->nr_workers)
8028 +@@ -300,33 +298,42 @@ static void create_worker_cb(struct callback_head *cb)
8029 + }
8030 + raw_spin_unlock_irq(&wqe->lock);
8031 + if (do_create) {
8032 +- create_io_worker(wq, wqe, cwd->index, first);
8033 ++ create_io_worker(wq, wqe, worker->create_index, first);
8034 + } else {
8035 + atomic_dec(&acct->nr_running);
8036 + io_worker_ref_put(wq);
8037 + }
8038 +- kfree(cwd);
8039 ++ clear_bit_unlock(0, &worker->create_state);
8040 ++ io_worker_release(worker);
8041 + }
8042 +
8043 +-static void io_queue_worker_create(struct io_wqe *wqe, struct io_wqe_acct *acct)
8044 ++static void io_queue_worker_create(struct io_wqe *wqe, struct io_worker *worker,
8045 ++ struct io_wqe_acct *acct)
8046 + {
8047 +- struct create_worker_data *cwd;
8048 + struct io_wq *wq = wqe->wq;
8049 +
8050 + /* raced with exit, just ignore create call */
8051 + if (test_bit(IO_WQ_BIT_EXIT, &wq->state))
8052 + goto fail;
8053 ++ if (!io_worker_get(worker))
8054 ++ goto fail;
8055 ++ /*
8056 ++ * create_state manages ownership of create_work/index. We should
8057 ++ * only need one entry per worker, as the worker going to sleep
8058 ++ * will trigger the condition, and waking will clear it once it
8059 ++ * runs the task_work.
8060 ++ */
8061 ++ if (test_bit(0, &worker->create_state) ||
8062 ++ test_and_set_bit_lock(0, &worker->create_state))
8063 ++ goto fail_release;
8064 +
8065 +- cwd = kmalloc(sizeof(*cwd), GFP_ATOMIC);
8066 +- if (cwd) {
8067 +- init_task_work(&cwd->work, create_worker_cb);
8068 +- cwd->wqe = wqe;
8069 +- cwd->index = acct->index;
8070 +- if (!task_work_add(wq->task, &cwd->work, TWA_SIGNAL))
8071 +- return;
8072 +-
8073 +- kfree(cwd);
8074 +- }
8075 ++ init_task_work(&worker->create_work, create_worker_cb);
8076 ++ worker->create_index = acct->index;
8077 ++ if (!task_work_add(wq->task, &worker->create_work, TWA_SIGNAL))
8078 ++ return;
8079 ++ clear_bit_unlock(0, &worker->create_state);
8080 ++fail_release:
8081 ++ io_worker_release(worker);
8082 + fail:
8083 + atomic_dec(&acct->nr_running);
8084 + io_worker_ref_put(wq);
8085 +@@ -344,7 +351,7 @@ static void io_wqe_dec_running(struct io_worker *worker)
8086 + if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe)) {
8087 + atomic_inc(&acct->nr_running);
8088 + atomic_inc(&wqe->wq->worker_refs);
8089 +- io_queue_worker_create(wqe, acct);
8090 ++ io_queue_worker_create(wqe, worker, acct);
8091 + }
8092 + }
8093 +
8094 +@@ -417,7 +424,28 @@ static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
8095 + spin_unlock(&wq->hash->wait.lock);
8096 + }
8097 +
8098 +-static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
8099 ++/*
8100 ++ * We can always run the work if the worker is currently the same type as
8101 ++ * the work (eg both are bound, or both are unbound). If they are not the
8102 ++ * same, only allow it if incrementing the worker count would be allowed.
8103 ++ */
8104 ++static bool io_worker_can_run_work(struct io_worker *worker,
8105 ++ struct io_wq_work *work)
8106 ++{
8107 ++ struct io_wqe_acct *acct;
8108 ++
8109 ++ if (!(worker->flags & IO_WORKER_F_BOUND) !=
8110 ++ !(work->flags & IO_WQ_WORK_UNBOUND))
8111 ++ return true;
8112 ++
8113 ++ /* not the same type, check if we'd go over the limit */
8114 ++ acct = io_work_get_acct(worker->wqe, work);
8115 ++ return acct->nr_workers < acct->max_workers;
8116 ++}
8117 ++
8118 ++static struct io_wq_work *io_get_next_work(struct io_wqe *wqe,
8119 ++ struct io_worker *worker,
8120 ++ bool *stalled)
8121 + __must_hold(wqe->lock)
8122 + {
8123 + struct io_wq_work_node *node, *prev;
8124 +@@ -429,6 +457,9 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
8125 +
8126 + work = container_of(node, struct io_wq_work, list);
8127 +
8128 ++ if (!io_worker_can_run_work(worker, work))
8129 ++ break;
8130 ++
8131 + /* not hashed, can run anytime */
8132 + if (!io_wq_is_hashed(work)) {
8133 + wq_list_del(&wqe->work_list, node, prev);
8134 +@@ -455,6 +486,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
8135 + raw_spin_unlock(&wqe->lock);
8136 + io_wait_on_hash(wqe, stall_hash);
8137 + raw_spin_lock(&wqe->lock);
8138 ++ *stalled = true;
8139 + }
8140 +
8141 + return NULL;
8142 +@@ -494,6 +526,7 @@ static void io_worker_handle_work(struct io_worker *worker)
8143 +
8144 + do {
8145 + struct io_wq_work *work;
8146 ++ bool stalled;
8147 + get_next:
8148 + /*
8149 + * If we got some work, mark us as busy. If we didn't, but
8150 +@@ -502,10 +535,11 @@ get_next:
8151 + * can't make progress, any work completion or insertion will
8152 + * clear the stalled flag.
8153 + */
8154 +- work = io_get_next_work(wqe);
8155 ++ stalled = false;
8156 ++ work = io_get_next_work(wqe, worker, &stalled);
8157 + if (work)
8158 + __io_worker_busy(wqe, worker, work);
8159 +- else if (!wq_list_empty(&wqe->work_list))
8160 ++ else if (stalled)
8161 + wqe->flags |= IO_WQE_FLAG_STALLED;
8162 +
8163 + raw_spin_unlock_irq(&wqe->lock);
8164 +@@ -1010,12 +1044,12 @@ err_wq:
8165 +
8166 + static bool io_task_work_match(struct callback_head *cb, void *data)
8167 + {
8168 +- struct create_worker_data *cwd;
8169 ++ struct io_worker *worker;
8170 +
8171 + if (cb->func != create_worker_cb)
8172 + return false;
8173 +- cwd = container_of(cb, struct create_worker_data, work);
8174 +- return cwd->wqe->wq == data;
8175 ++ worker = container_of(cb, struct io_worker, create_work);
8176 ++ return worker->wqe->wq == data;
8177 + }
8178 +
8179 + void io_wq_exit_start(struct io_wq *wq)
8180 +@@ -1032,12 +1066,13 @@ static void io_wq_exit_workers(struct io_wq *wq)
8181 + return;
8182 +
8183 + while ((cb = task_work_cancel_match(wq->task, io_task_work_match, wq)) != NULL) {
8184 +- struct create_worker_data *cwd;
8185 ++ struct io_worker *worker;
8186 +
8187 +- cwd = container_of(cb, struct create_worker_data, work);
8188 +- atomic_dec(&cwd->wqe->acct[cwd->index].nr_running);
8189 ++ worker = container_of(cb, struct io_worker, create_work);
8190 ++ atomic_dec(&worker->wqe->acct[worker->create_index].nr_running);
8191 + io_worker_ref_put(wq);
8192 +- kfree(cwd);
8193 ++ clear_bit_unlock(0, &worker->create_state);
8194 ++ io_worker_release(worker);
8195 + }
8196 +
8197 + rcu_read_lock();
8198 +diff --git a/fs/io_uring.c b/fs/io_uring.c
8199 +index f6ddc7182943d..58ae2eab99efa 100644
8200 +--- a/fs/io_uring.c
8201 ++++ b/fs/io_uring.c
8202 +@@ -990,6 +990,7 @@ static const struct io_op_def io_op_defs[] = {
8203 + },
8204 + [IORING_OP_WRITE] = {
8205 + .needs_file = 1,
8206 ++ .hash_reg_file = 1,
8207 + .unbound_nonreg_file = 1,
8208 + .pollout = 1,
8209 + .plug = 1,
8210 +@@ -1060,8 +1061,7 @@ static void __io_queue_sqe(struct io_kiocb *req);
8211 + static void io_rsrc_put_work(struct work_struct *work);
8212 +
8213 + static void io_req_task_queue(struct io_kiocb *req);
8214 +-static void io_submit_flush_completions(struct io_comp_state *cs,
8215 +- struct io_ring_ctx *ctx);
8216 ++static void io_submit_flush_completions(struct io_ring_ctx *ctx);
8217 + static bool io_poll_remove_waitqs(struct io_kiocb *req);
8218 + static int io_req_prep_async(struct io_kiocb *req);
8219 +
8220 +@@ -1901,7 +1901,7 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx)
8221 + return;
8222 + if (ctx->submit_state.comp.nr) {
8223 + mutex_lock(&ctx->uring_lock);
8224 +- io_submit_flush_completions(&ctx->submit_state.comp, ctx);
8225 ++ io_submit_flush_completions(ctx);
8226 + mutex_unlock(&ctx->uring_lock);
8227 + }
8228 + percpu_ref_put(&ctx->refs);
8229 +@@ -2147,9 +2147,9 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
8230 + list_add(&req->compl.list, &state->comp.free_list);
8231 + }
8232 +
8233 +-static void io_submit_flush_completions(struct io_comp_state *cs,
8234 +- struct io_ring_ctx *ctx)
8235 ++static void io_submit_flush_completions(struct io_ring_ctx *ctx)
8236 + {
8237 ++ struct io_comp_state *cs = &ctx->submit_state.comp;
8238 + int i, nr = cs->nr;
8239 + struct io_kiocb *req;
8240 + struct req_batch rb;
8241 +@@ -6462,7 +6462,7 @@ static void __io_queue_sqe(struct io_kiocb *req)
8242 +
8243 + cs->reqs[cs->nr++] = req;
8244 + if (cs->nr == ARRAY_SIZE(cs->reqs))
8245 +- io_submit_flush_completions(cs, ctx);
8246 ++ io_submit_flush_completions(ctx);
8247 + } else {
8248 + io_put_req(req);
8249 + }
8250 +@@ -6676,7 +6676,7 @@ static void io_submit_state_end(struct io_submit_state *state,
8251 + if (state->link.head)
8252 + io_queue_sqe(state->link.head);
8253 + if (state->comp.nr)
8254 +- io_submit_flush_completions(&state->comp, ctx);
8255 ++ io_submit_flush_completions(ctx);
8256 + if (state->plug_started)
8257 + blk_finish_plug(&state->plug);
8258 + io_state_file_put(state);
8259 +@@ -7670,6 +7670,8 @@ static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
8260 + return -EINVAL;
8261 + if (nr_args > IORING_MAX_FIXED_FILES)
8262 + return -EMFILE;
8263 ++ if (nr_args > rlimit(RLIMIT_NOFILE))
8264 ++ return -EMFILE;
8265 + ret = io_rsrc_node_switch_start(ctx);
8266 + if (ret)
8267 + return ret;
8268 +diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
8269 +index 6250ca6a1f851..4ecf4e1f68ef9 100644
8270 +--- a/fs/iomap/swapfile.c
8271 ++++ b/fs/iomap/swapfile.c
8272 +@@ -31,11 +31,16 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
8273 + {
8274 + struct iomap *iomap = &isi->iomap;
8275 + unsigned long nr_pages;
8276 ++ unsigned long max_pages;
8277 + uint64_t first_ppage;
8278 + uint64_t first_ppage_reported;
8279 + uint64_t next_ppage;
8280 + int error;
8281 +
8282 ++ if (unlikely(isi->nr_pages >= isi->sis->max))
8283 ++ return 0;
8284 ++ max_pages = isi->sis->max - isi->nr_pages;
8285 ++
8286 + /*
8287 + * Round the start up and the end down so that the physical
8288 + * extent aligns to a page boundary.
8289 +@@ -48,6 +53,7 @@ static int iomap_swapfile_add_extent(struct iomap_swapfile_info *isi)
8290 + if (first_ppage >= next_ppage)
8291 + return 0;
8292 + nr_pages = next_ppage - first_ppage;
8293 ++ nr_pages = min(nr_pages, max_pages);
8294 +
8295 + /*
8296 + * Calculate how much swap space we're adding; the first page contains
8297 +diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
8298 +index 21edc423b79fa..678e2c51b855c 100644
8299 +--- a/fs/isofs/inode.c
8300 ++++ b/fs/isofs/inode.c
8301 +@@ -155,7 +155,6 @@ struct iso9660_options{
8302 + unsigned int overriderockperm:1;
8303 + unsigned int uid_set:1;
8304 + unsigned int gid_set:1;
8305 +- unsigned int utf8:1;
8306 + unsigned char map;
8307 + unsigned char check;
8308 + unsigned int blocksize;
8309 +@@ -356,7 +355,6 @@ static int parse_options(char *options, struct iso9660_options *popt)
8310 + popt->gid = GLOBAL_ROOT_GID;
8311 + popt->uid = GLOBAL_ROOT_UID;
8312 + popt->iocharset = NULL;
8313 +- popt->utf8 = 0;
8314 + popt->overriderockperm = 0;
8315 + popt->session=-1;
8316 + popt->sbsector=-1;
8317 +@@ -389,10 +387,13 @@ static int parse_options(char *options, struct iso9660_options *popt)
8318 + case Opt_cruft:
8319 + popt->cruft = 1;
8320 + break;
8321 ++#ifdef CONFIG_JOLIET
8322 + case Opt_utf8:
8323 +- popt->utf8 = 1;
8324 ++ kfree(popt->iocharset);
8325 ++ popt->iocharset = kstrdup("utf8", GFP_KERNEL);
8326 ++ if (!popt->iocharset)
8327 ++ return 0;
8328 + break;
8329 +-#ifdef CONFIG_JOLIET
8330 + case Opt_iocharset:
8331 + kfree(popt->iocharset);
8332 + popt->iocharset = match_strdup(&args[0]);
8333 +@@ -495,7 +496,6 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
8334 + if (sbi->s_nocompress) seq_puts(m, ",nocompress");
8335 + if (sbi->s_overriderockperm) seq_puts(m, ",overriderockperm");
8336 + if (sbi->s_showassoc) seq_puts(m, ",showassoc");
8337 +- if (sbi->s_utf8) seq_puts(m, ",utf8");
8338 +
8339 + if (sbi->s_check) seq_printf(m, ",check=%c", sbi->s_check);
8340 + if (sbi->s_mapping) seq_printf(m, ",map=%c", sbi->s_mapping);
8341 +@@ -518,9 +518,10 @@ static int isofs_show_options(struct seq_file *m, struct dentry *root)
8342 + seq_printf(m, ",fmode=%o", sbi->s_fmode);
8343 +
8344 + #ifdef CONFIG_JOLIET
8345 +- if (sbi->s_nls_iocharset &&
8346 +- strcmp(sbi->s_nls_iocharset->charset, CONFIG_NLS_DEFAULT) != 0)
8347 ++ if (sbi->s_nls_iocharset)
8348 + seq_printf(m, ",iocharset=%s", sbi->s_nls_iocharset->charset);
8349 ++ else
8350 ++ seq_puts(m, ",iocharset=utf8");
8351 + #endif
8352 + return 0;
8353 + }
8354 +@@ -863,14 +864,13 @@ root_found:
8355 + sbi->s_nls_iocharset = NULL;
8356 +
8357 + #ifdef CONFIG_JOLIET
8358 +- if (joliet_level && opt.utf8 == 0) {
8359 ++ if (joliet_level) {
8360 + char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
8361 +- sbi->s_nls_iocharset = load_nls(p);
8362 +- if (! sbi->s_nls_iocharset) {
8363 +- /* Fail only if explicit charset specified */
8364 +- if (opt.iocharset)
8365 ++ if (strcmp(p, "utf8") != 0) {
8366 ++ sbi->s_nls_iocharset = opt.iocharset ?
8367 ++ load_nls(opt.iocharset) : load_nls_default();
8368 ++ if (!sbi->s_nls_iocharset)
8369 + goto out_freesbi;
8370 +- sbi->s_nls_iocharset = load_nls_default();
8371 + }
8372 + }
8373 + #endif
8374 +@@ -886,7 +886,6 @@ root_found:
8375 + sbi->s_gid = opt.gid;
8376 + sbi->s_uid_set = opt.uid_set;
8377 + sbi->s_gid_set = opt.gid_set;
8378 +- sbi->s_utf8 = opt.utf8;
8379 + sbi->s_nocompress = opt.nocompress;
8380 + sbi->s_overriderockperm = opt.overriderockperm;
8381 + /*
8382 +diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
8383 +index 055ec6c586f7f..dcdc191ed1834 100644
8384 +--- a/fs/isofs/isofs.h
8385 ++++ b/fs/isofs/isofs.h
8386 +@@ -44,7 +44,6 @@ struct isofs_sb_info {
8387 + unsigned char s_session;
8388 + unsigned int s_high_sierra:1;
8389 + unsigned int s_rock:2;
8390 +- unsigned int s_utf8:1;
8391 + unsigned int s_cruft:1; /* Broken disks with high byte of length
8392 + * containing junk */
8393 + unsigned int s_nocompress:1;
8394 +diff --git a/fs/isofs/joliet.c b/fs/isofs/joliet.c
8395 +index be8b6a9d0b926..c0f04a1e7f695 100644
8396 +--- a/fs/isofs/joliet.c
8397 ++++ b/fs/isofs/joliet.c
8398 +@@ -41,14 +41,12 @@ uni16_to_x8(unsigned char *ascii, __be16 *uni, int len, struct nls_table *nls)
8399 + int
8400 + get_joliet_filename(struct iso_directory_record * de, unsigned char *outname, struct inode * inode)
8401 + {
8402 +- unsigned char utf8;
8403 + struct nls_table *nls;
8404 + unsigned char len = 0;
8405 +
8406 +- utf8 = ISOFS_SB(inode->i_sb)->s_utf8;
8407 + nls = ISOFS_SB(inode->i_sb)->s_nls_iocharset;
8408 +
8409 +- if (utf8) {
8410 ++ if (!nls) {
8411 + len = utf16s_to_utf8s((const wchar_t *) de->name,
8412 + de->name_len[0] >> 1, UTF16_BIG_ENDIAN,
8413 + outname, PAGE_SIZE);
8414 +diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
8415 +index 61d3cc2283dc8..498cb70c2c0d0 100644
8416 +--- a/fs/lockd/svclock.c
8417 ++++ b/fs/lockd/svclock.c
8418 +@@ -634,7 +634,7 @@ nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file,
8419 + conflock->caller = "somehost"; /* FIXME */
8420 + conflock->len = strlen(conflock->caller);
8421 + conflock->oh.len = 0; /* don't return OH info */
8422 +- conflock->svid = ((struct nlm_lockowner *)lock->fl.fl_owner)->pid;
8423 ++ conflock->svid = lock->fl.fl_pid;
8424 + conflock->fl.fl_type = lock->fl.fl_type;
8425 + conflock->fl.fl_start = lock->fl.fl_start;
8426 + conflock->fl.fl_end = lock->fl.fl_end;
8427 +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
8428 +index 90e81f6491ff5..ab81e8ae32659 100644
8429 +--- a/fs/nfsd/nfs4state.c
8430 ++++ b/fs/nfsd/nfs4state.c
8431 +@@ -2665,9 +2665,9 @@ static void force_expire_client(struct nfs4_client *clp)
8432 + struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
8433 + bool already_expired;
8434 +
8435 +- spin_lock(&clp->cl_lock);
8436 ++ spin_lock(&nn->client_lock);
8437 + clp->cl_time = 0;
8438 +- spin_unlock(&clp->cl_lock);
8439 ++ spin_unlock(&nn->client_lock);
8440 +
8441 + wait_event(expiry_wq, atomic_read(&clp->cl_rpc_users) == 0);
8442 + spin_lock(&nn->client_lock);
8443 +diff --git a/fs/udf/misc.c b/fs/udf/misc.c
8444 +index eab94527340dc..1614d308d0f06 100644
8445 +--- a/fs/udf/misc.c
8446 ++++ b/fs/udf/misc.c
8447 +@@ -173,13 +173,22 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
8448 + else
8449 + offset = le32_to_cpu(eahd->appAttrLocation);
8450 +
8451 +- while (offset < iinfo->i_lenEAttr) {
8452 ++ while (offset + sizeof(*gaf) < iinfo->i_lenEAttr) {
8453 ++ uint32_t attrLength;
8454 ++
8455 + gaf = (struct genericFormat *)&ea[offset];
8456 ++ attrLength = le32_to_cpu(gaf->attrLength);
8457 ++
8458 ++ /* Detect undersized elements and buffer overflows */
8459 ++ if ((attrLength < sizeof(*gaf)) ||
8460 ++ (attrLength > (iinfo->i_lenEAttr - offset)))
8461 ++ break;
8462 ++
8463 + if (le32_to_cpu(gaf->attrType) == type &&
8464 + gaf->attrSubtype == subtype)
8465 + return gaf;
8466 + else
8467 +- offset += le32_to_cpu(gaf->attrLength);
8468 ++ offset += attrLength;
8469 + }
8470 + }
8471 +
8472 +diff --git a/fs/udf/super.c b/fs/udf/super.c
8473 +index 2f83c1204e20c..b2d7c57d06881 100644
8474 +--- a/fs/udf/super.c
8475 ++++ b/fs/udf/super.c
8476 +@@ -108,16 +108,10 @@ struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
8477 + return NULL;
8478 + lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
8479 + partnum = le32_to_cpu(lvid->numOfPartitions);
8480 +- if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
8481 +- offsetof(struct logicalVolIntegrityDesc, impUse)) /
8482 +- (2 * sizeof(uint32_t)) < partnum) {
8483 +- udf_err(sb, "Logical volume integrity descriptor corrupted "
8484 +- "(numOfPartitions = %u)!\n", partnum);
8485 +- return NULL;
8486 +- }
8487 + /* The offset is to skip freeSpaceTable and sizeTable arrays */
8488 + offset = partnum * 2 * sizeof(uint32_t);
8489 +- return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
8490 ++ return (struct logicalVolIntegrityDescImpUse *)
8491 ++ (((uint8_t *)(lvid + 1)) + offset);
8492 + }
8493 +
8494 + /* UDF filesystem type */
8495 +@@ -349,10 +343,10 @@ static int udf_show_options(struct seq_file *seq, struct dentry *root)
8496 + seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
8497 + if (sbi->s_anchor != 0)
8498 + seq_printf(seq, ",anchor=%u", sbi->s_anchor);
8499 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
8500 +- seq_puts(seq, ",utf8");
8501 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
8502 ++ if (sbi->s_nls_map)
8503 + seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
8504 ++ else
8505 ++ seq_puts(seq, ",iocharset=utf8");
8506 +
8507 + return 0;
8508 + }
8509 +@@ -558,19 +552,24 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
8510 + /* Ignored (never implemented properly) */
8511 + break;
8512 + case Opt_utf8:
8513 +- uopt->flags |= (1 << UDF_FLAG_UTF8);
8514 ++ if (!remount) {
8515 ++ unload_nls(uopt->nls_map);
8516 ++ uopt->nls_map = NULL;
8517 ++ }
8518 + break;
8519 + case Opt_iocharset:
8520 + if (!remount) {
8521 +- if (uopt->nls_map)
8522 +- unload_nls(uopt->nls_map);
8523 +- /*
8524 +- * load_nls() failure is handled later in
8525 +- * udf_fill_super() after all options are
8526 +- * parsed.
8527 +- */
8528 ++ unload_nls(uopt->nls_map);
8529 ++ uopt->nls_map = NULL;
8530 ++ }
8531 ++ /* When nls_map is not loaded then UTF-8 is used */
8532 ++ if (!remount && strcmp(args[0].from, "utf8") != 0) {
8533 + uopt->nls_map = load_nls(args[0].from);
8534 +- uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
8535 ++ if (!uopt->nls_map) {
8536 ++ pr_err("iocharset %s not found\n",
8537 ++ args[0].from);
8538 ++ return 0;
8539 ++ }
8540 + }
8541 + break;
8542 + case Opt_uforget:
8543 +@@ -1542,6 +1541,7 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
8544 + struct udf_sb_info *sbi = UDF_SB(sb);
8545 + struct logicalVolIntegrityDesc *lvid;
8546 + int indirections = 0;
8547 ++ u32 parts, impuselen;
8548 +
8549 + while (++indirections <= UDF_MAX_LVID_NESTING) {
8550 + final_bh = NULL;
8551 +@@ -1568,15 +1568,27 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
8552 +
8553 + lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
8554 + if (lvid->nextIntegrityExt.extLength == 0)
8555 +- return;
8556 ++ goto check;
8557 +
8558 + loc = leea_to_cpu(lvid->nextIntegrityExt);
8559 + }
8560 +
8561 + udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
8562 + UDF_MAX_LVID_NESTING);
8563 ++out_err:
8564 + brelse(sbi->s_lvid_bh);
8565 + sbi->s_lvid_bh = NULL;
8566 ++ return;
8567 ++check:
8568 ++ parts = le32_to_cpu(lvid->numOfPartitions);
8569 ++ impuselen = le32_to_cpu(lvid->lengthOfImpUse);
8570 ++ if (parts >= sb->s_blocksize || impuselen >= sb->s_blocksize ||
8571 ++ sizeof(struct logicalVolIntegrityDesc) + impuselen +
8572 ++ 2 * parts * sizeof(u32) > sb->s_blocksize) {
8573 ++ udf_warn(sb, "Corrupted LVID (parts=%u, impuselen=%u), "
8574 ++ "ignoring.\n", parts, impuselen);
8575 ++ goto out_err;
8576 ++ }
8577 + }
8578 +
8579 + /*
8580 +@@ -2139,21 +2151,6 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
8581 + if (!udf_parse_options((char *)options, &uopt, false))
8582 + goto parse_options_failure;
8583 +
8584 +- if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
8585 +- uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
8586 +- udf_err(sb, "utf8 cannot be combined with iocharset\n");
8587 +- goto parse_options_failure;
8588 +- }
8589 +- if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
8590 +- uopt.nls_map = load_nls_default();
8591 +- if (!uopt.nls_map)
8592 +- uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
8593 +- else
8594 +- udf_debug("Using default NLS map\n");
8595 +- }
8596 +- if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
8597 +- uopt.flags |= (1 << UDF_FLAG_UTF8);
8598 +-
8599 + fileset.logicalBlockNum = 0xFFFFFFFF;
8600 + fileset.partitionReferenceNum = 0xFFFF;
8601 +
8602 +@@ -2308,8 +2305,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
8603 + error_out:
8604 + iput(sbi->s_vat_inode);
8605 + parse_options_failure:
8606 +- if (uopt.nls_map)
8607 +- unload_nls(uopt.nls_map);
8608 ++ unload_nls(uopt.nls_map);
8609 + if (lvid_open)
8610 + udf_close_lvid(sb);
8611 + brelse(sbi->s_lvid_bh);
8612 +@@ -2359,8 +2355,7 @@ static void udf_put_super(struct super_block *sb)
8613 + sbi = UDF_SB(sb);
8614 +
8615 + iput(sbi->s_vat_inode);
8616 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
8617 +- unload_nls(sbi->s_nls_map);
8618 ++ unload_nls(sbi->s_nls_map);
8619 + if (!sb_rdonly(sb))
8620 + udf_close_lvid(sb);
8621 + brelse(sbi->s_lvid_bh);
8622 +diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
8623 +index 758efe557a199..4fa620543d302 100644
8624 +--- a/fs/udf/udf_sb.h
8625 ++++ b/fs/udf/udf_sb.h
8626 +@@ -20,8 +20,6 @@
8627 + #define UDF_FLAG_UNDELETE 6
8628 + #define UDF_FLAG_UNHIDE 7
8629 + #define UDF_FLAG_VARCONV 8
8630 +-#define UDF_FLAG_NLS_MAP 9
8631 +-#define UDF_FLAG_UTF8 10
8632 + #define UDF_FLAG_UID_FORGET 11 /* save -1 for uid to disk */
8633 + #define UDF_FLAG_GID_FORGET 12
8634 + #define UDF_FLAG_UID_SET 13
8635 +diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
8636 +index 5fcfa96463ebb..622569007b530 100644
8637 +--- a/fs/udf/unicode.c
8638 ++++ b/fs/udf/unicode.c
8639 +@@ -177,7 +177,7 @@ static int udf_name_from_CS0(struct super_block *sb,
8640 + return 0;
8641 + }
8642 +
8643 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
8644 ++ if (UDF_SB(sb)->s_nls_map)
8645 + conv_f = UDF_SB(sb)->s_nls_map->uni2char;
8646 + else
8647 + conv_f = NULL;
8648 +@@ -285,7 +285,7 @@ static int udf_name_to_CS0(struct super_block *sb,
8649 + if (ocu_max_len <= 0)
8650 + return 0;
8651 +
8652 +- if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
8653 ++ if (UDF_SB(sb)->s_nls_map)
8654 + conv_f = UDF_SB(sb)->s_nls_map->char2uni;
8655 + else
8656 + conv_f = NULL;
8657 +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
8658 +index f69c75bd6d276..9bfb2f65534b0 100644
8659 +--- a/include/linux/blkdev.h
8660 ++++ b/include/linux/blkdev.h
8661 +@@ -1531,6 +1531,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
8662 + return offset << SECTOR_SHIFT;
8663 + }
8664 +
8665 ++/*
8666 ++ * Two cases of handling DISCARD merge:
8667 ++ * If max_discard_segments > 1, the driver takes every bio
8668 ++ * as a range and send them to controller together. The ranges
8669 ++ * needn't to be contiguous.
8670 ++ * Otherwise, the bios/requests will be handled as same as
8671 ++ * others which should be contiguous.
8672 ++ */
8673 ++static inline bool blk_discard_mergable(struct request *req)
8674 ++{
8675 ++ if (req_op(req) == REQ_OP_DISCARD &&
8676 ++ queue_max_discard_segments(req->q) > 1)
8677 ++ return true;
8678 ++ return false;
8679 ++}
8680 ++
8681 + static inline int bdev_discard_alignment(struct block_device *bdev)
8682 + {
8683 + struct request_queue *q = bdev_get_queue(bdev);
8684 +diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
8685 +index 757fc60658fa6..d1e9823d99c82 100644
8686 +--- a/include/linux/energy_model.h
8687 ++++ b/include/linux/energy_model.h
8688 +@@ -53,6 +53,22 @@ struct em_perf_domain {
8689 + #ifdef CONFIG_ENERGY_MODEL
8690 + #define EM_MAX_POWER 0xFFFF
8691 +
8692 ++/*
8693 ++ * Increase resolution of energy estimation calculations for 64-bit
8694 ++ * architectures. The extra resolution improves decision made by EAS for the
8695 ++ * task placement when two Performance Domains might provide similar energy
8696 ++ * estimation values (w/o better resolution the values could be equal).
8697 ++ *
8698 ++ * We increase resolution only if we have enough bits to allow this increased
8699 ++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
8700 ++ * are pretty high and the returns do not justify the increased costs.
8701 ++ */
8702 ++#ifdef CONFIG_64BIT
8703 ++#define em_scale_power(p) ((p) * 1000)
8704 ++#else
8705 ++#define em_scale_power(p) (p)
8706 ++#endif
8707 ++
8708 + struct em_data_callback {
8709 + /**
8710 + * active_power() - Provide power at the next performance state of
8711 +diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
8712 +index bb5e7b0a42746..77295af724264 100644
8713 +--- a/include/linux/hrtimer.h
8714 ++++ b/include/linux/hrtimer.h
8715 +@@ -318,16 +318,12 @@ struct clock_event_device;
8716 +
8717 + extern void hrtimer_interrupt(struct clock_event_device *dev);
8718 +
8719 +-extern void clock_was_set_delayed(void);
8720 +-
8721 + extern unsigned int hrtimer_resolution;
8722 +
8723 + #else
8724 +
8725 + #define hrtimer_resolution (unsigned int)LOW_RES_NSEC
8726 +
8727 +-static inline void clock_was_set_delayed(void) { }
8728 +-
8729 + #endif
8730 +
8731 + static inline ktime_t
8732 +@@ -351,7 +347,6 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
8733 + timer->base->get_time());
8734 + }
8735 +
8736 +-extern void clock_was_set(void);
8737 + #ifdef CONFIG_TIMERFD
8738 + extern void timerfd_clock_was_set(void);
8739 + #else
8740 +diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
8741 +index ded90b097e6e8..3f02b818625ef 100644
8742 +--- a/include/linux/local_lock_internal.h
8743 ++++ b/include/linux/local_lock_internal.h
8744 +@@ -14,29 +14,14 @@ typedef struct {
8745 + } local_lock_t;
8746 +
8747 + #ifdef CONFIG_DEBUG_LOCK_ALLOC
8748 +-# define LL_DEP_MAP_INIT(lockname) \
8749 ++# define LOCAL_LOCK_DEBUG_INIT(lockname) \
8750 + .dep_map = { \
8751 + .name = #lockname, \
8752 + .wait_type_inner = LD_WAIT_CONFIG, \
8753 +- .lock_type = LD_LOCK_PERCPU, \
8754 +- }
8755 +-#else
8756 +-# define LL_DEP_MAP_INIT(lockname)
8757 +-#endif
8758 +-
8759 +-#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
8760 +-
8761 +-#define __local_lock_init(lock) \
8762 +-do { \
8763 +- static struct lock_class_key __key; \
8764 +- \
8765 +- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
8766 +- lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
8767 +- LD_WAIT_CONFIG, LD_WAIT_INV, \
8768 +- LD_LOCK_PERCPU); \
8769 +-} while (0)
8770 ++ .lock_type = LD_LOCK_PERCPU, \
8771 ++ }, \
8772 ++ .owner = NULL,
8773 +
8774 +-#ifdef CONFIG_DEBUG_LOCK_ALLOC
8775 + static inline void local_lock_acquire(local_lock_t *l)
8776 + {
8777 + lock_map_acquire(&l->dep_map);
8778 +@@ -51,11 +36,30 @@ static inline void local_lock_release(local_lock_t *l)
8779 + lock_map_release(&l->dep_map);
8780 + }
8781 +
8782 ++static inline void local_lock_debug_init(local_lock_t *l)
8783 ++{
8784 ++ l->owner = NULL;
8785 ++}
8786 + #else /* CONFIG_DEBUG_LOCK_ALLOC */
8787 ++# define LOCAL_LOCK_DEBUG_INIT(lockname)
8788 + static inline void local_lock_acquire(local_lock_t *l) { }
8789 + static inline void local_lock_release(local_lock_t *l) { }
8790 ++static inline void local_lock_debug_init(local_lock_t *l) { }
8791 + #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
8792 +
8793 ++#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
8794 ++
8795 ++#define __local_lock_init(lock) \
8796 ++do { \
8797 ++ static struct lock_class_key __key; \
8798 ++ \
8799 ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
8800 ++ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
8801 ++ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
8802 ++ LD_LOCK_PERCPU); \
8803 ++ local_lock_debug_init(lock); \
8804 ++} while (0)
8805 ++
8806 + #define __local_lock(lock) \
8807 + do { \
8808 + preempt_disable(); \
8809 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
8810 +index eb86e80e4643f..857529a5568d7 100644
8811 +--- a/include/linux/mlx5/mlx5_ifc.h
8812 ++++ b/include/linux/mlx5/mlx5_ifc.h
8813 +@@ -918,7 +918,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
8814 + u8 scatter_fcs[0x1];
8815 + u8 enhanced_multi_pkt_send_wqe[0x1];
8816 + u8 tunnel_lso_const_out_ip_id[0x1];
8817 +- u8 reserved_at_1c[0x2];
8818 ++ u8 tunnel_lro_gre[0x1];
8819 ++ u8 tunnel_lro_vxlan[0x1];
8820 + u8 tunnel_stateless_gre[0x1];
8821 + u8 tunnel_stateless_vxlan[0x1];
8822 +
8823 +diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
8824 +index d55c746ac56e2..e00ad1cfb1f1d 100644
8825 +--- a/include/linux/power/max17042_battery.h
8826 ++++ b/include/linux/power/max17042_battery.h
8827 +@@ -69,7 +69,7 @@ enum max17042_register {
8828 + MAX17042_RelaxCFG = 0x2A,
8829 + MAX17042_MiscCFG = 0x2B,
8830 + MAX17042_TGAIN = 0x2C,
8831 +- MAx17042_TOFF = 0x2D,
8832 ++ MAX17042_TOFF = 0x2D,
8833 + MAX17042_CGAIN = 0x2E,
8834 + MAX17042_COFF = 0x2F,
8835 +
8836 +diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
8837 +index e91d51ea028bb..65185d1e07ea6 100644
8838 +--- a/include/linux/sunrpc/svc.h
8839 ++++ b/include/linux/sunrpc/svc.h
8840 +@@ -523,6 +523,7 @@ void svc_wake_up(struct svc_serv *);
8841 + void svc_reserve(struct svc_rqst *rqstp, int space);
8842 + struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
8843 + char * svc_print_addr(struct svc_rqst *, char *, size_t);
8844 ++const char * svc_proc_name(const struct svc_rqst *rqstp);
8845 + int svc_encode_result_payload(struct svc_rqst *rqstp,
8846 + unsigned int offset,
8847 + unsigned int length);
8848 +diff --git a/include/linux/time64.h b/include/linux/time64.h
8849 +index 5117cb5b56561..81b9686a20799 100644
8850 +--- a/include/linux/time64.h
8851 ++++ b/include/linux/time64.h
8852 +@@ -25,7 +25,9 @@ struct itimerspec64 {
8853 + #define TIME64_MIN (-TIME64_MAX - 1)
8854 +
8855 + #define KTIME_MAX ((s64)~((u64)1 << 63))
8856 ++#define KTIME_MIN (-KTIME_MAX - 1)
8857 + #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
8858 ++#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC)
8859 +
8860 + /*
8861 + * Limits for settimeofday():
8862 +@@ -124,10 +126,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts)
8863 + */
8864 + static inline s64 timespec64_to_ns(const struct timespec64 *ts)
8865 + {
8866 +- /* Prevent multiplication overflow */
8867 +- if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
8868 ++ /* Prevent multiplication overflow / underflow */
8869 ++ if (ts->tv_sec >= KTIME_SEC_MAX)
8870 + return KTIME_MAX;
8871 +
8872 ++ if (ts->tv_sec <= KTIME_SEC_MIN)
8873 ++ return KTIME_MIN;
8874 ++
8875 + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
8876 + }
8877 +
8878 +diff --git a/include/net/dsa.h b/include/net/dsa.h
8879 +index e1a2610a0e06e..f91317d2df9d6 100644
8880 +--- a/include/net/dsa.h
8881 ++++ b/include/net/dsa.h
8882 +@@ -643,8 +643,6 @@ struct dsa_switch_ops {
8883 + int (*port_bridge_flags)(struct dsa_switch *ds, int port,
8884 + struct switchdev_brport_flags flags,
8885 + struct netlink_ext_ack *extack);
8886 +- int (*port_set_mrouter)(struct dsa_switch *ds, int port, bool mrouter,
8887 +- struct netlink_ext_ack *extack);
8888 +
8889 + /*
8890 + * VLAN support
8891 +diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
8892 +index ec7823921bd26..b420f905f4f6e 100644
8893 +--- a/include/net/pkt_cls.h
8894 ++++ b/include/net/pkt_cls.h
8895 +@@ -820,10 +820,9 @@ enum tc_htb_command {
8896 + struct tc_htb_qopt_offload {
8897 + struct netlink_ext_ack *extack;
8898 + enum tc_htb_command command;
8899 +- u16 classid;
8900 + u32 parent_classid;
8901 ++ u16 classid;
8902 + u16 qid;
8903 +- u16 moved_qid;
8904 + u64 rate;
8905 + u64 ceil;
8906 + };
8907 +diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
8908 +index abb8b24744fdb..08e65dcbd1e35 100644
8909 +--- a/include/trace/events/io_uring.h
8910 ++++ b/include/trace/events/io_uring.h
8911 +@@ -295,14 +295,14 @@ TRACE_EVENT(io_uring_fail_link,
8912 + */
8913 + TRACE_EVENT(io_uring_complete,
8914 +
8915 +- TP_PROTO(void *ctx, u64 user_data, long res, unsigned cflags),
8916 ++ TP_PROTO(void *ctx, u64 user_data, int res, unsigned cflags),
8917 +
8918 + TP_ARGS(ctx, user_data, res, cflags),
8919 +
8920 + TP_STRUCT__entry (
8921 + __field( void *, ctx )
8922 + __field( u64, user_data )
8923 +- __field( long, res )
8924 ++ __field( int, res )
8925 + __field( unsigned, cflags )
8926 + ),
8927 +
8928 +@@ -313,7 +313,7 @@ TRACE_EVENT(io_uring_complete,
8929 + __entry->cflags = cflags;
8930 + ),
8931 +
8932 +- TP_printk("ring %p, user_data 0x%llx, result %ld, cflags %x",
8933 ++ TP_printk("ring %p, user_data 0x%llx, result %d, cflags %x",
8934 + __entry->ctx, (unsigned long long)__entry->user_data,
8935 + __entry->res, __entry->cflags)
8936 + );
8937 +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
8938 +index d02e01a27b690..87569dbf2fe78 100644
8939 +--- a/include/trace/events/sunrpc.h
8940 ++++ b/include/trace/events/sunrpc.h
8941 +@@ -1642,7 +1642,7 @@ TRACE_EVENT(svc_process,
8942 + __field(u32, vers)
8943 + __field(u32, proc)
8944 + __string(service, name)
8945 +- __string(procedure, rqst->rq_procinfo->pc_name)
8946 ++ __string(procedure, svc_proc_name(rqst))
8947 + __string(addr, rqst->rq_xprt ?
8948 + rqst->rq_xprt->xpt_remotebuf : "(null)")
8949 + ),
8950 +@@ -1652,7 +1652,7 @@ TRACE_EVENT(svc_process,
8951 + __entry->vers = rqst->rq_vers;
8952 + __entry->proc = rqst->rq_proc;
8953 + __assign_str(service, name);
8954 +- __assign_str(procedure, rqst->rq_procinfo->pc_name);
8955 ++ __assign_str(procedure, svc_proc_name(rqst));
8956 + __assign_str(addr, rqst->rq_xprt ?
8957 + rqst->rq_xprt->xpt_remotebuf : "(null)");
8958 + ),
8959 +@@ -1918,7 +1918,7 @@ TRACE_EVENT(svc_stats_latency,
8960 + TP_STRUCT__entry(
8961 + __field(u32, xid)
8962 + __field(unsigned long, execute)
8963 +- __string(procedure, rqst->rq_procinfo->pc_name)
8964 ++ __string(procedure, svc_proc_name(rqst))
8965 + __string(addr, rqst->rq_xprt->xpt_remotebuf)
8966 + ),
8967 +
8968 +@@ -1926,7 +1926,7 @@ TRACE_EVENT(svc_stats_latency,
8969 + __entry->xid = be32_to_cpu(rqst->rq_xid);
8970 + __entry->execute = ktime_to_us(ktime_sub(ktime_get(),
8971 + rqst->rq_stime));
8972 +- __assign_str(procedure, rqst->rq_procinfo->pc_name);
8973 ++ __assign_str(procedure, svc_proc_name(rqst));
8974 + __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
8975 + ),
8976 +
8977 +diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
8978 +index ec6d85a817449..353f06cf210e9 100644
8979 +--- a/include/uapi/linux/bpf.h
8980 ++++ b/include/uapi/linux/bpf.h
8981 +@@ -3222,7 +3222,7 @@ union bpf_attr {
8982 + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
8983 + * Description
8984 + * Select a **SO_REUSEPORT** socket from a
8985 +- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
8986 ++ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
8987 + * It checks the selected socket is matching the incoming
8988 + * request in the socket buffer.
8989 + * Return
8990 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
8991 +index 6b58fd978b703..d810f9e0ed9d5 100644
8992 +--- a/kernel/bpf/verifier.c
8993 ++++ b/kernel/bpf/verifier.c
8994 +@@ -11383,10 +11383,11 @@ static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env)
8995 + * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying
8996 + * [0, off) and [off, end) to new locations, so the patched range stays zero
8997 + */
8998 +-static int adjust_insn_aux_data(struct bpf_verifier_env *env,
8999 +- struct bpf_prog *new_prog, u32 off, u32 cnt)
9000 ++static void adjust_insn_aux_data(struct bpf_verifier_env *env,
9001 ++ struct bpf_insn_aux_data *new_data,
9002 ++ struct bpf_prog *new_prog, u32 off, u32 cnt)
9003 + {
9004 +- struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
9005 ++ struct bpf_insn_aux_data *old_data = env->insn_aux_data;
9006 + struct bpf_insn *insn = new_prog->insnsi;
9007 + u32 old_seen = old_data[off].seen;
9008 + u32 prog_len;
9009 +@@ -11399,12 +11400,9 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
9010 + old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1);
9011 +
9012 + if (cnt == 1)
9013 +- return 0;
9014 ++ return;
9015 + prog_len = new_prog->len;
9016 +- new_data = vzalloc(array_size(prog_len,
9017 +- sizeof(struct bpf_insn_aux_data)));
9018 +- if (!new_data)
9019 +- return -ENOMEM;
9020 ++
9021 + memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
9022 + memcpy(new_data + off + cnt - 1, old_data + off,
9023 + sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
9024 +@@ -11415,7 +11413,6 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
9025 + }
9026 + env->insn_aux_data = new_data;
9027 + vfree(old_data);
9028 +- return 0;
9029 + }
9030 +
9031 + static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len)
9032 +@@ -11450,6 +11447,14 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
9033 + const struct bpf_insn *patch, u32 len)
9034 + {
9035 + struct bpf_prog *new_prog;
9036 ++ struct bpf_insn_aux_data *new_data = NULL;
9037 ++
9038 ++ if (len > 1) {
9039 ++ new_data = vzalloc(array_size(env->prog->len + len - 1,
9040 ++ sizeof(struct bpf_insn_aux_data)));
9041 ++ if (!new_data)
9042 ++ return NULL;
9043 ++ }
9044 +
9045 + new_prog = bpf_patch_insn_single(env->prog, off, patch, len);
9046 + if (IS_ERR(new_prog)) {
9047 +@@ -11457,10 +11462,10 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
9048 + verbose(env,
9049 + "insn %d cannot be patched due to 16-bit range\n",
9050 + env->insn_aux_data[off].orig_idx);
9051 ++ vfree(new_data);
9052 + return NULL;
9053 + }
9054 +- if (adjust_insn_aux_data(env, new_prog, off, len))
9055 +- return NULL;
9056 ++ adjust_insn_aux_data(env, new_data, new_prog, off, len);
9057 + adjust_subprog_starts(env, off, len);
9058 + adjust_poke_descs(new_prog, off, len);
9059 + return new_prog;
9060 +@@ -11977,6 +11982,10 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
9061 + if (is_narrower_load && size < target_size) {
9062 + u8 shift = bpf_ctx_narrow_access_offset(
9063 + off, size, size_default) * 8;
9064 ++ if (shift && cnt + 1 >= ARRAY_SIZE(insn_buf)) {
9065 ++ verbose(env, "bpf verifier narrow ctx load misconfigured\n");
9066 ++ return -EINVAL;
9067 ++ }
9068 + if (ctx_field_size <= 4) {
9069 + if (shift)
9070 + insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH,
9071 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
9072 +index adb5190c44296..13b5be6df4da2 100644
9073 +--- a/kernel/cgroup/cpuset.c
9074 ++++ b/kernel/cgroup/cpuset.c
9075 +@@ -1114,7 +1114,7 @@ enum subparts_cmd {
9076 + * cpus_allowed can be granted or an error code will be returned.
9077 + *
9078 + * For partcmd_disable, the cpuset is being transofrmed from a partition
9079 +- * root back to a non-partition root. any CPUs in cpus_allowed that are in
9080 ++ * root back to a non-partition root. Any CPUs in cpus_allowed that are in
9081 + * parent's subparts_cpus will be taken away from that cpumask and put back
9082 + * into parent's effective_cpus. 0 should always be returned.
9083 + *
9084 +@@ -1148,6 +1148,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9085 + struct cpuset *parent = parent_cs(cpuset);
9086 + int adding; /* Moving cpus from effective_cpus to subparts_cpus */
9087 + int deleting; /* Moving cpus from subparts_cpus to effective_cpus */
9088 ++ int new_prs;
9089 + bool part_error = false; /* Partition error? */
9090 +
9091 + percpu_rwsem_assert_held(&cpuset_rwsem);
9092 +@@ -1183,6 +1184,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9093 + * A cpumask update cannot make parent's effective_cpus become empty.
9094 + */
9095 + adding = deleting = false;
9096 ++ new_prs = cpuset->partition_root_state;
9097 + if (cmd == partcmd_enable) {
9098 + cpumask_copy(tmp->addmask, cpuset->cpus_allowed);
9099 + adding = true;
9100 +@@ -1225,7 +1227,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9101 + /*
9102 + * partcmd_update w/o newmask:
9103 + *
9104 +- * addmask = cpus_allowed & parent->effectiveb_cpus
9105 ++ * addmask = cpus_allowed & parent->effective_cpus
9106 + *
9107 + * Note that parent's subparts_cpus may have been
9108 + * pre-shrunk in case there is a change in the cpu list.
9109 +@@ -1247,11 +1249,11 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9110 + switch (cpuset->partition_root_state) {
9111 + case PRS_ENABLED:
9112 + if (part_error)
9113 +- cpuset->partition_root_state = PRS_ERROR;
9114 ++ new_prs = PRS_ERROR;
9115 + break;
9116 + case PRS_ERROR:
9117 + if (!part_error)
9118 +- cpuset->partition_root_state = PRS_ENABLED;
9119 ++ new_prs = PRS_ENABLED;
9120 + break;
9121 + }
9122 + /*
9123 +@@ -1260,10 +1262,10 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9124 + part_error = (prev_prs == PRS_ERROR);
9125 + }
9126 +
9127 +- if (!part_error && (cpuset->partition_root_state == PRS_ERROR))
9128 ++ if (!part_error && (new_prs == PRS_ERROR))
9129 + return 0; /* Nothing need to be done */
9130 +
9131 +- if (cpuset->partition_root_state == PRS_ERROR) {
9132 ++ if (new_prs == PRS_ERROR) {
9133 + /*
9134 + * Remove all its cpus from parent's subparts_cpus.
9135 + */
9136 +@@ -1272,7 +1274,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9137 + parent->subparts_cpus);
9138 + }
9139 +
9140 +- if (!adding && !deleting)
9141 ++ if (!adding && !deleting && (new_prs == cpuset->partition_root_state))
9142 + return 0;
9143 +
9144 + /*
9145 +@@ -1299,6 +1301,9 @@ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd,
9146 + }
9147 +
9148 + parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
9149 ++
9150 ++ if (cpuset->partition_root_state != new_prs)
9151 ++ cpuset->partition_root_state = new_prs;
9152 + spin_unlock_irq(&callback_lock);
9153 +
9154 + return cmd == partcmd_update;
9155 +@@ -1321,6 +1326,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
9156 + struct cpuset *cp;
9157 + struct cgroup_subsys_state *pos_css;
9158 + bool need_rebuild_sched_domains = false;
9159 ++ int new_prs;
9160 +
9161 + rcu_read_lock();
9162 + cpuset_for_each_descendant_pre(cp, pos_css, cs) {
9163 +@@ -1360,17 +1366,18 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
9164 + * update_tasks_cpumask() again for tasks in the parent
9165 + * cpuset if the parent's subparts_cpus changes.
9166 + */
9167 +- if ((cp != cs) && cp->partition_root_state) {
9168 ++ new_prs = cp->partition_root_state;
9169 ++ if ((cp != cs) && new_prs) {
9170 + switch (parent->partition_root_state) {
9171 + case PRS_DISABLED:
9172 + /*
9173 + * If parent is not a partition root or an
9174 +- * invalid partition root, clear the state
9175 +- * state and the CS_CPU_EXCLUSIVE flag.
9176 ++ * invalid partition root, clear its state
9177 ++ * and its CS_CPU_EXCLUSIVE flag.
9178 + */
9179 + WARN_ON_ONCE(cp->partition_root_state
9180 + != PRS_ERROR);
9181 +- cp->partition_root_state = 0;
9182 ++ new_prs = PRS_DISABLED;
9183 +
9184 + /*
9185 + * clear_bit() is an atomic operation and
9186 +@@ -1391,11 +1398,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
9187 + /*
9188 + * When parent is invalid, it has to be too.
9189 + */
9190 +- cp->partition_root_state = PRS_ERROR;
9191 +- if (cp->nr_subparts_cpus) {
9192 +- cp->nr_subparts_cpus = 0;
9193 +- cpumask_clear(cp->subparts_cpus);
9194 +- }
9195 ++ new_prs = PRS_ERROR;
9196 + break;
9197 + }
9198 + }
9199 +@@ -1407,8 +1410,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
9200 + spin_lock_irq(&callback_lock);
9201 +
9202 + cpumask_copy(cp->effective_cpus, tmp->new_cpus);
9203 +- if (cp->nr_subparts_cpus &&
9204 +- (cp->partition_root_state != PRS_ENABLED)) {
9205 ++ if (cp->nr_subparts_cpus && (new_prs != PRS_ENABLED)) {
9206 + cp->nr_subparts_cpus = 0;
9207 + cpumask_clear(cp->subparts_cpus);
9208 + } else if (cp->nr_subparts_cpus) {
9209 +@@ -1435,6 +1437,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp)
9210 + = cpumask_weight(cp->subparts_cpus);
9211 + }
9212 + }
9213 ++
9214 ++ if (new_prs != cp->partition_root_state)
9215 ++ cp->partition_root_state = new_prs;
9216 ++
9217 + spin_unlock_irq(&callback_lock);
9218 +
9219 + WARN_ON(!is_in_v2_mode() &&
9220 +@@ -1937,34 +1943,32 @@ out:
9221 +
9222 + /*
9223 + * update_prstate - update partititon_root_state
9224 +- * cs: the cpuset to update
9225 +- * val: 0 - disabled, 1 - enabled
9226 ++ * cs: the cpuset to update
9227 ++ * new_prs: new partition root state
9228 + *
9229 + * Call with cpuset_mutex held.
9230 + */
9231 +-static int update_prstate(struct cpuset *cs, int val)
9232 ++static int update_prstate(struct cpuset *cs, int new_prs)
9233 + {
9234 +- int err;
9235 ++ int err, old_prs = cs->partition_root_state;
9236 + struct cpuset *parent = parent_cs(cs);
9237 +- struct tmpmasks tmp;
9238 ++ struct tmpmasks tmpmask;
9239 +
9240 +- if ((val != 0) && (val != 1))
9241 +- return -EINVAL;
9242 +- if (val == cs->partition_root_state)
9243 ++ if (old_prs == new_prs)
9244 + return 0;
9245 +
9246 + /*
9247 + * Cannot force a partial or invalid partition root to a full
9248 + * partition root.
9249 + */
9250 +- if (val && cs->partition_root_state)
9251 ++ if (new_prs && (old_prs == PRS_ERROR))
9252 + return -EINVAL;
9253 +
9254 +- if (alloc_cpumasks(NULL, &tmp))
9255 ++ if (alloc_cpumasks(NULL, &tmpmask))
9256 + return -ENOMEM;
9257 +
9258 + err = -EINVAL;
9259 +- if (!cs->partition_root_state) {
9260 ++ if (!old_prs) {
9261 + /*
9262 + * Turning on partition root requires setting the
9263 + * CS_CPU_EXCLUSIVE bit implicitly as well and cpus_allowed
9264 +@@ -1978,31 +1982,27 @@ static int update_prstate(struct cpuset *cs, int val)
9265 + goto out;
9266 +
9267 + err = update_parent_subparts_cpumask(cs, partcmd_enable,
9268 +- NULL, &tmp);
9269 ++ NULL, &tmpmask);
9270 + if (err) {
9271 + update_flag(CS_CPU_EXCLUSIVE, cs, 0);
9272 + goto out;
9273 + }
9274 +- cs->partition_root_state = PRS_ENABLED;
9275 + } else {
9276 + /*
9277 + * Turning off partition root will clear the
9278 + * CS_CPU_EXCLUSIVE bit.
9279 + */
9280 +- if (cs->partition_root_state == PRS_ERROR) {
9281 +- cs->partition_root_state = 0;
9282 ++ if (old_prs == PRS_ERROR) {
9283 + update_flag(CS_CPU_EXCLUSIVE, cs, 0);
9284 + err = 0;
9285 + goto out;
9286 + }
9287 +
9288 + err = update_parent_subparts_cpumask(cs, partcmd_disable,
9289 +- NULL, &tmp);
9290 ++ NULL, &tmpmask);
9291 + if (err)
9292 + goto out;
9293 +
9294 +- cs->partition_root_state = 0;
9295 +-
9296 + /* Turning off CS_CPU_EXCLUSIVE will not return error */
9297 + update_flag(CS_CPU_EXCLUSIVE, cs, 0);
9298 + }
9299 +@@ -2015,11 +2015,17 @@ static int update_prstate(struct cpuset *cs, int val)
9300 + update_tasks_cpumask(parent);
9301 +
9302 + if (parent->child_ecpus_count)
9303 +- update_sibling_cpumasks(parent, cs, &tmp);
9304 ++ update_sibling_cpumasks(parent, cs, &tmpmask);
9305 +
9306 + rebuild_sched_domains_locked();
9307 + out:
9308 +- free_cpumasks(NULL, &tmp);
9309 ++ if (!err) {
9310 ++ spin_lock_irq(&callback_lock);
9311 ++ cs->partition_root_state = new_prs;
9312 ++ spin_unlock_irq(&callback_lock);
9313 ++ }
9314 ++
9315 ++ free_cpumasks(NULL, &tmpmask);
9316 + return err;
9317 + }
9318 +
9319 +@@ -3060,7 +3066,7 @@ retry:
9320 + goto retry;
9321 + }
9322 +
9323 +- parent = parent_cs(cs);
9324 ++ parent = parent_cs(cs);
9325 + compute_effective_cpumask(&new_cpus, cs, parent);
9326 + nodes_and(new_mems, cs->mems_allowed, parent->effective_mems);
9327 +
9328 +@@ -3082,8 +3088,10 @@ retry:
9329 + if (is_partition_root(cs) && (cpumask_empty(&new_cpus) ||
9330 + (parent->partition_root_state == PRS_ERROR))) {
9331 + if (cs->nr_subparts_cpus) {
9332 ++ spin_lock_irq(&callback_lock);
9333 + cs->nr_subparts_cpus = 0;
9334 + cpumask_clear(cs->subparts_cpus);
9335 ++ spin_unlock_irq(&callback_lock);
9336 + compute_effective_cpumask(&new_cpus, cs, parent);
9337 + }
9338 +
9339 +@@ -3097,7 +3105,9 @@ retry:
9340 + cpumask_empty(&new_cpus)) {
9341 + update_parent_subparts_cpumask(cs, partcmd_disable,
9342 + NULL, tmp);
9343 ++ spin_lock_irq(&callback_lock);
9344 + cs->partition_root_state = PRS_ERROR;
9345 ++ spin_unlock_irq(&callback_lock);
9346 + }
9347 + cpuset_force_rebuild();
9348 + }
9349 +@@ -3168,6 +3178,13 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
9350 + cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
9351 + mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
9352 +
9353 ++ /*
9354 ++ * In the rare case that hotplug removes all the cpus in subparts_cpus,
9355 ++ * we assumed that cpus are updated.
9356 ++ */
9357 ++ if (!cpus_updated && top_cpuset.nr_subparts_cpus)
9358 ++ cpus_updated = true;
9359 ++
9360 + /* synchronize cpus_allowed to cpu_active_mask */
9361 + if (cpus_updated) {
9362 + spin_lock_irq(&callback_lock);
9363 +diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
9364 +index f7e1d0eccdbc6..246efc74e3f34 100644
9365 +--- a/kernel/cpu_pm.c
9366 ++++ b/kernel/cpu_pm.c
9367 +@@ -13,19 +13,32 @@
9368 + #include <linux/spinlock.h>
9369 + #include <linux/syscore_ops.h>
9370 +
9371 +-static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
9372 ++/*
9373 ++ * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT.
9374 ++ * Notifications for cpu_pm will be issued by the idle task itself, which can
9375 ++ * never block, IOW it requires using a raw_spinlock_t.
9376 ++ */
9377 ++static struct {
9378 ++ struct raw_notifier_head chain;
9379 ++ raw_spinlock_t lock;
9380 ++} cpu_pm_notifier = {
9381 ++ .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain),
9382 ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock),
9383 ++};
9384 +
9385 + static int cpu_pm_notify(enum cpu_pm_event event)
9386 + {
9387 + int ret;
9388 +
9389 + /*
9390 +- * atomic_notifier_call_chain has a RCU read critical section, which
9391 +- * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
9392 +- * RCU know this.
9393 ++ * This introduces a RCU read critical section, which could be
9394 ++ * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know
9395 ++ * this.
9396 + */
9397 + rcu_irq_enter_irqson();
9398 +- ret = atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL);
9399 ++ rcu_read_lock();
9400 ++ ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL);
9401 ++ rcu_read_unlock();
9402 + rcu_irq_exit_irqson();
9403 +
9404 + return notifier_to_errno(ret);
9405 +@@ -33,10 +46,13 @@ static int cpu_pm_notify(enum cpu_pm_event event)
9406 +
9407 + static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down)
9408 + {
9409 ++ unsigned long flags;
9410 + int ret;
9411 +
9412 + rcu_irq_enter_irqson();
9413 +- ret = atomic_notifier_call_chain_robust(&cpu_pm_notifier_chain, event_up, event_down, NULL);
9414 ++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
9415 ++ ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL);
9416 ++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
9417 + rcu_irq_exit_irqson();
9418 +
9419 + return notifier_to_errno(ret);
9420 +@@ -49,12 +65,17 @@ static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event ev
9421 + * Add a driver to a list of drivers that are notified about
9422 + * CPU and CPU cluster low power entry and exit.
9423 + *
9424 +- * This function may sleep, and has the same return conditions as
9425 +- * raw_notifier_chain_register.
9426 ++ * This function has the same return conditions as raw_notifier_chain_register.
9427 + */
9428 + int cpu_pm_register_notifier(struct notifier_block *nb)
9429 + {
9430 +- return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
9431 ++ unsigned long flags;
9432 ++ int ret;
9433 ++
9434 ++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
9435 ++ ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb);
9436 ++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
9437 ++ return ret;
9438 + }
9439 + EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
9440 +
9441 +@@ -64,12 +85,17 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
9442 + *
9443 + * Remove a driver from the CPU PM notifier list.
9444 + *
9445 +- * This function may sleep, and has the same return conditions as
9446 +- * raw_notifier_chain_unregister.
9447 ++ * This function has the same return conditions as raw_notifier_chain_unregister.
9448 + */
9449 + int cpu_pm_unregister_notifier(struct notifier_block *nb)
9450 + {
9451 +- return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
9452 ++ unsigned long flags;
9453 ++ int ret;
9454 ++
9455 ++ raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags);
9456 ++ ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb);
9457 ++ raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags);
9458 ++ return ret;
9459 + }
9460 + EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
9461 +
9462 +diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
9463 +index 4d2a702d7aa95..c43e2ac2f8def 100644
9464 +--- a/kernel/irq/timings.c
9465 ++++ b/kernel/irq/timings.c
9466 +@@ -799,12 +799,14 @@ static int __init irq_timings_test_irqs(struct timings_intervals *ti)
9467 +
9468 + __irq_timings_store(irq, irqs, ti->intervals[i]);
9469 + if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
9470 ++ ret = -EBADSLT;
9471 + pr_err("Failed to store in the circular buffer\n");
9472 + goto out;
9473 + }
9474 + }
9475 +
9476 + if (irqs->count != ti->count) {
9477 ++ ret = -ERANGE;
9478 + pr_err("Count differs\n");
9479 + goto out;
9480 + }
9481 +diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
9482 +index 013e1b08a1bfb..a03d3d3ff8866 100644
9483 +--- a/kernel/locking/mutex.c
9484 ++++ b/kernel/locking/mutex.c
9485 +@@ -928,7 +928,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
9486 + struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
9487 + {
9488 + struct mutex_waiter waiter;
9489 +- bool first = false;
9490 + struct ww_mutex *ww;
9491 + int ret;
9492 +
9493 +@@ -1007,6 +1006,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
9494 +
9495 + set_current_state(state);
9496 + for (;;) {
9497 ++ bool first;
9498 ++
9499 + /*
9500 + * Once we hold wait_lock, we're serialized against
9501 + * mutex_unlock() handing the lock off to us, do a trylock
9502 +@@ -1035,15 +1036,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
9503 + spin_unlock(&lock->wait_lock);
9504 + schedule_preempt_disabled();
9505 +
9506 +- /*
9507 +- * ww_mutex needs to always recheck its position since its waiter
9508 +- * list is not FIFO ordered.
9509 +- */
9510 +- if (ww_ctx || !first) {
9511 +- first = __mutex_waiter_is_first(lock, &waiter);
9512 +- if (first)
9513 +- __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
9514 +- }
9515 ++ first = __mutex_waiter_is_first(lock, &waiter);
9516 ++ if (first)
9517 ++ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
9518 +
9519 + set_current_state(state);
9520 + /*
9521 +diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
9522 +index 0f4530b3a8cd9..a332ccd829e24 100644
9523 +--- a/kernel/power/energy_model.c
9524 ++++ b/kernel/power/energy_model.c
9525 +@@ -170,7 +170,9 @@ static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
9526 + /* Compute the cost of each performance state. */
9527 + fmax = (u64) table[nr_states - 1].frequency;
9528 + for (i = 0; i < nr_states; i++) {
9529 +- table[i].cost = div64_u64(fmax * table[i].power,
9530 ++ unsigned long power_res = em_scale_power(table[i].power);
9531 ++
9532 ++ table[i].cost = div64_u64(fmax * power_res,
9533 + table[i].frequency);
9534 + }
9535 +
9536 +diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
9537 +index 59b95cc5cbdf1..c615fd153cb29 100644
9538 +--- a/kernel/rcu/tree_stall.h
9539 ++++ b/kernel/rcu/tree_stall.h
9540 +@@ -7,6 +7,8 @@
9541 + * Author: Paul E. McKenney <paulmck@×××××××××.com>
9542 + */
9543 +
9544 ++#include <linux/kvm_para.h>
9545 ++
9546 + //////////////////////////////////////////////////////////////////////////////
9547 + //
9548 + // Controlling CPU stall warnings, including delay calculation.
9549 +@@ -267,8 +269,10 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
9550 + struct task_struct *ts[8];
9551 +
9552 + lockdep_assert_irqs_disabled();
9553 +- if (!rcu_preempt_blocked_readers_cgp(rnp))
9554 ++ if (!rcu_preempt_blocked_readers_cgp(rnp)) {
9555 ++ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
9556 + return 0;
9557 ++ }
9558 + pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
9559 + rnp->level, rnp->grplo, rnp->grphi);
9560 + t = list_entry(rnp->gp_tasks->prev,
9561 +@@ -280,8 +284,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
9562 + break;
9563 + }
9564 + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
9565 +- for (i--; i; i--) {
9566 +- t = ts[i];
9567 ++ while (i) {
9568 ++ t = ts[--i];
9569 + if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
9570 + pr_cont(" P%d", t->pid);
9571 + else
9572 +@@ -695,6 +699,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
9573 + (READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
9574 + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
9575 +
9576 ++ /*
9577 ++ * If a virtual machine is stopped by the host it can look to
9578 ++ * the watchdog like an RCU stall. Check to see if the host
9579 ++ * stopped the vm.
9580 ++ */
9581 ++ if (kvm_check_and_clear_guest_paused())
9582 ++ return;
9583 ++
9584 + /* We haven't checked in, so go dump stack. */
9585 + print_cpu_stall(gps);
9586 + if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
9587 +@@ -704,6 +716,14 @@ static void check_cpu_stall(struct rcu_data *rdp)
9588 + ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
9589 + cmpxchg(&rcu_state.jiffies_stall, js, jn) == js) {
9590 +
9591 ++ /*
9592 ++ * If a virtual machine is stopped by the host it can look to
9593 ++ * the watchdog like an RCU stall. Check to see if the host
9594 ++ * stopped the vm.
9595 ++ */
9596 ++ if (kvm_check_and_clear_guest_paused())
9597 ++ return;
9598 ++
9599 + /* They had a few time units to dump stack, so complain. */
9600 + print_other_cpu_stall(gs2, gps);
9601 + if (READ_ONCE(rcu_cpu_stall_ftrace_dump))
9602 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
9603 +index 15b4d2fb6be38..1e9672d609f7e 100644
9604 +--- a/kernel/sched/core.c
9605 ++++ b/kernel/sched/core.c
9606 +@@ -1281,6 +1281,23 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
9607 + uclamp_rq_dec_id(rq, p, clamp_id);
9608 + }
9609 +
9610 ++static inline void uclamp_rq_reinc_id(struct rq *rq, struct task_struct *p,
9611 ++ enum uclamp_id clamp_id)
9612 ++{
9613 ++ if (!p->uclamp[clamp_id].active)
9614 ++ return;
9615 ++
9616 ++ uclamp_rq_dec_id(rq, p, clamp_id);
9617 ++ uclamp_rq_inc_id(rq, p, clamp_id);
9618 ++
9619 ++ /*
9620 ++ * Make sure to clear the idle flag if we've transiently reached 0
9621 ++ * active tasks on rq.
9622 ++ */
9623 ++ if (clamp_id == UCLAMP_MAX && (rq->uclamp_flags & UCLAMP_FLAG_IDLE))
9624 ++ rq->uclamp_flags &= ~UCLAMP_FLAG_IDLE;
9625 ++}
9626 ++
9627 + static inline void
9628 + uclamp_update_active(struct task_struct *p)
9629 + {
9630 +@@ -1304,12 +1321,8 @@ uclamp_update_active(struct task_struct *p)
9631 + * affecting a valid clamp bucket, the next time it's enqueued,
9632 + * it will already see the updated clamp bucket value.
9633 + */
9634 +- for_each_clamp_id(clamp_id) {
9635 +- if (p->uclamp[clamp_id].active) {
9636 +- uclamp_rq_dec_id(rq, p, clamp_id);
9637 +- uclamp_rq_inc_id(rq, p, clamp_id);
9638 +- }
9639 +- }
9640 ++ for_each_clamp_id(clamp_id)
9641 ++ uclamp_rq_reinc_id(rq, p, clamp_id);
9642 +
9643 + task_rq_unlock(rq, p, &rf);
9644 + }
9645 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
9646 +index 2f9964b467e03..fa29a69e14c9f 100644
9647 +--- a/kernel/sched/deadline.c
9648 ++++ b/kernel/sched/deadline.c
9649 +@@ -1733,6 +1733,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
9650 + */
9651 + raw_spin_lock(&rq->lock);
9652 + if (p->dl.dl_non_contending) {
9653 ++ update_rq_clock(rq);
9654 + sub_running_bw(&p->dl, &rq->dl);
9655 + p->dl.dl_non_contending = 0;
9656 + /*
9657 +@@ -2729,7 +2730,7 @@ void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
9658 + dl_se->dl_runtime = attr->sched_runtime;
9659 + dl_se->dl_deadline = attr->sched_deadline;
9660 + dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
9661 +- dl_se->flags = attr->sched_flags;
9662 ++ dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
9663 + dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
9664 + dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
9665 + }
9666 +@@ -2742,7 +2743,8 @@ void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
9667 + attr->sched_runtime = dl_se->dl_runtime;
9668 + attr->sched_deadline = dl_se->dl_deadline;
9669 + attr->sched_period = dl_se->dl_period;
9670 +- attr->sched_flags = dl_se->flags;
9671 ++ attr->sched_flags &= ~SCHED_DL_FLAGS;
9672 ++ attr->sched_flags |= dl_se->flags;
9673 + }
9674 +
9675 + /*
9676 +@@ -2839,7 +2841,7 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
9677 + if (dl_se->dl_runtime != attr->sched_runtime ||
9678 + dl_se->dl_deadline != attr->sched_deadline ||
9679 + dl_se->dl_period != attr->sched_period ||
9680 +- dl_se->flags != attr->sched_flags)
9681 ++ dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
9682 + return true;
9683 +
9684 + return false;
9685 +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
9686 +index c5aacbd492a19..f8c94bebd17db 100644
9687 +--- a/kernel/sched/debug.c
9688 ++++ b/kernel/sched/debug.c
9689 +@@ -388,6 +388,13 @@ void update_sched_domain_debugfs(void)
9690 + {
9691 + int cpu, i;
9692 +
9693 ++ /*
9694 ++ * This can unfortunately be invoked before sched_debug_init() creates
9695 ++ * the debug directory. Don't touch sd_sysctl_cpus until then.
9696 ++ */
9697 ++ if (!debugfs_sched)
9698 ++ return;
9699 ++
9700 + if (!cpumask_available(sd_sysctl_cpus)) {
9701 + if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
9702 + return;
9703 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
9704 +index f60ef0b4ec330..3889fee98d11c 100644
9705 +--- a/kernel/sched/fair.c
9706 ++++ b/kernel/sched/fair.c
9707 +@@ -1529,7 +1529,7 @@ static inline bool is_core_idle(int cpu)
9708 + if (cpu == sibling)
9709 + continue;
9710 +
9711 +- if (!idle_cpu(cpu))
9712 ++ if (!idle_cpu(sibling))
9713 + return false;
9714 + }
9715 + #endif
9716 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
9717 +index f2bc99ca01e57..9aa157c20722a 100644
9718 +--- a/kernel/sched/sched.h
9719 ++++ b/kernel/sched/sched.h
9720 +@@ -227,6 +227,8 @@ static inline void update_avg(u64 *avg, u64 sample)
9721 + */
9722 + #define SCHED_FLAG_SUGOV 0x10000000
9723 +
9724 ++#define SCHED_DL_FLAGS (SCHED_FLAG_RECLAIM | SCHED_FLAG_DL_OVERRUN | SCHED_FLAG_SUGOV)
9725 ++
9726 + static inline bool dl_entity_is_special(struct sched_dl_entity *dl_se)
9727 + {
9728 + #ifdef CONFIG_CPU_FREQ_GOV_SCHEDUTIL
9729 +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
9730 +index 55a0a243e8718..41f778f3db057 100644
9731 +--- a/kernel/sched/topology.c
9732 ++++ b/kernel/sched/topology.c
9733 +@@ -1372,6 +1372,8 @@ int sched_max_numa_distance;
9734 + static int *sched_domains_numa_distance;
9735 + static struct cpumask ***sched_domains_numa_masks;
9736 + int __read_mostly node_reclaim_distance = RECLAIM_DISTANCE;
9737 ++
9738 ++static unsigned long __read_mostly *sched_numa_onlined_nodes;
9739 + #endif
9740 +
9741 + /*
9742 +@@ -1719,6 +1721,16 @@ void sched_init_numa(void)
9743 + sched_domains_numa_masks[i][j] = mask;
9744 +
9745 + for_each_node(k) {
9746 ++ /*
9747 ++ * Distance information can be unreliable for
9748 ++ * offline nodes, defer building the node
9749 ++ * masks to its bringup.
9750 ++ * This relies on all unique distance values
9751 ++ * still being visible at init time.
9752 ++ */
9753 ++ if (!node_online(j))
9754 ++ continue;
9755 ++
9756 + if (sched_debug() && (node_distance(j, k) != node_distance(k, j)))
9757 + sched_numa_warn("Node-distance not symmetric");
9758 +
9759 +@@ -1772,6 +1784,53 @@ void sched_init_numa(void)
9760 + sched_max_numa_distance = sched_domains_numa_distance[nr_levels - 1];
9761 +
9762 + init_numa_topology_type();
9763 ++
9764 ++ sched_numa_onlined_nodes = bitmap_alloc(nr_node_ids, GFP_KERNEL);
9765 ++ if (!sched_numa_onlined_nodes)
9766 ++ return;
9767 ++
9768 ++ bitmap_zero(sched_numa_onlined_nodes, nr_node_ids);
9769 ++ for_each_online_node(i)
9770 ++ bitmap_set(sched_numa_onlined_nodes, i, 1);
9771 ++}
9772 ++
9773 ++static void __sched_domains_numa_masks_set(unsigned int node)
9774 ++{
9775 ++ int i, j;
9776 ++
9777 ++ /*
9778 ++ * NUMA masks are not built for offline nodes in sched_init_numa().
9779 ++ * Thus, when a CPU of a never-onlined-before node gets plugged in,
9780 ++ * adding that new CPU to the right NUMA masks is not sufficient: the
9781 ++ * masks of that CPU's node must also be updated.
9782 ++ */
9783 ++ if (test_bit(node, sched_numa_onlined_nodes))
9784 ++ return;
9785 ++
9786 ++ bitmap_set(sched_numa_onlined_nodes, node, 1);
9787 ++
9788 ++ for (i = 0; i < sched_domains_numa_levels; i++) {
9789 ++ for (j = 0; j < nr_node_ids; j++) {
9790 ++ if (!node_online(j) || node == j)
9791 ++ continue;
9792 ++
9793 ++ if (node_distance(j, node) > sched_domains_numa_distance[i])
9794 ++ continue;
9795 ++
9796 ++ /* Add remote nodes in our masks */
9797 ++ cpumask_or(sched_domains_numa_masks[i][node],
9798 ++ sched_domains_numa_masks[i][node],
9799 ++ sched_domains_numa_masks[0][j]);
9800 ++ }
9801 ++ }
9802 ++
9803 ++ /*
9804 ++ * A new node has been brought up, potentially changing the topology
9805 ++ * classification.
9806 ++ *
9807 ++ * Note that this is racy vs any use of sched_numa_topology_type :/
9808 ++ */
9809 ++ init_numa_topology_type();
9810 + }
9811 +
9812 + void sched_domains_numa_masks_set(unsigned int cpu)
9813 +@@ -1779,8 +1838,14 @@ void sched_domains_numa_masks_set(unsigned int cpu)
9814 + int node = cpu_to_node(cpu);
9815 + int i, j;
9816 +
9817 ++ __sched_domains_numa_masks_set(node);
9818 ++
9819 + for (i = 0; i < sched_domains_numa_levels; i++) {
9820 + for (j = 0; j < nr_node_ids; j++) {
9821 ++ if (!node_online(j))
9822 ++ continue;
9823 ++
9824 ++ /* Set ourselves in the remote node's masks */
9825 + if (node_distance(j, node) <= sched_domains_numa_distance[i])
9826 + cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]);
9827 + }
9828 +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
9829 +index 4a66725b1d4ac..5af7584734888 100644
9830 +--- a/kernel/time/hrtimer.c
9831 ++++ b/kernel/time/hrtimer.c
9832 +@@ -758,22 +758,6 @@ static void hrtimer_switch_to_hres(void)
9833 + retrigger_next_event(NULL);
9834 + }
9835 +
9836 +-static void clock_was_set_work(struct work_struct *work)
9837 +-{
9838 +- clock_was_set();
9839 +-}
9840 +-
9841 +-static DECLARE_WORK(hrtimer_work, clock_was_set_work);
9842 +-
9843 +-/*
9844 +- * Called from timekeeping and resume code to reprogram the hrtimer
9845 +- * interrupt device on all cpus.
9846 +- */
9847 +-void clock_was_set_delayed(void)
9848 +-{
9849 +- schedule_work(&hrtimer_work);
9850 +-}
9851 +-
9852 + #else
9853 +
9854 + static inline int hrtimer_is_hres_enabled(void) { return 0; }
9855 +@@ -891,6 +875,22 @@ void clock_was_set(void)
9856 + timerfd_clock_was_set();
9857 + }
9858 +
9859 ++static void clock_was_set_work(struct work_struct *work)
9860 ++{
9861 ++ clock_was_set();
9862 ++}
9863 ++
9864 ++static DECLARE_WORK(hrtimer_work, clock_was_set_work);
9865 ++
9866 ++/*
9867 ++ * Called from timekeeping and resume code to reprogram the hrtimer
9868 ++ * interrupt device on all cpus and to notify timerfd.
9869 ++ */
9870 ++void clock_was_set_delayed(void)
9871 ++{
9872 ++ schedule_work(&hrtimer_work);
9873 ++}
9874 ++
9875 + /*
9876 + * During resume we might have to reprogram the high resolution timer
9877 + * interrupt on all online CPUs. However, all other CPUs will be
9878 +@@ -1030,12 +1030,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
9879 + * remove hrtimer, called with base lock held
9880 + */
9881 + static inline int
9882 +-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
9883 ++remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base,
9884 ++ bool restart, bool keep_local)
9885 + {
9886 + u8 state = timer->state;
9887 +
9888 + if (state & HRTIMER_STATE_ENQUEUED) {
9889 +- int reprogram;
9890 ++ bool reprogram;
9891 +
9892 + /*
9893 + * Remove the timer and force reprogramming when high
9894 +@@ -1048,8 +1049,16 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
9895 + debug_deactivate(timer);
9896 + reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
9897 +
9898 ++ /*
9899 ++ * If the timer is not restarted then reprogramming is
9900 ++ * required if the timer is local. If it is local and about
9901 ++ * to be restarted, avoid programming it twice (on removal
9902 ++ * and a moment later when it's requeued).
9903 ++ */
9904 + if (!restart)
9905 + state = HRTIMER_STATE_INACTIVE;
9906 ++ else
9907 ++ reprogram &= !keep_local;
9908 +
9909 + __remove_hrtimer(timer, base, state, reprogram);
9910 + return 1;
9911 +@@ -1103,9 +1112,31 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
9912 + struct hrtimer_clock_base *base)
9913 + {
9914 + struct hrtimer_clock_base *new_base;
9915 ++ bool force_local, first;
9916 ++
9917 ++ /*
9918 ++ * If the timer is on the local cpu base and is the first expiring
9919 ++ * timer then this might end up reprogramming the hardware twice
9920 ++ * (on removal and on enqueue). To avoid that by prevent the
9921 ++ * reprogram on removal, keep the timer local to the current CPU
9922 ++ * and enforce reprogramming after it is queued no matter whether
9923 ++ * it is the new first expiring timer again or not.
9924 ++ */
9925 ++ force_local = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
9926 ++ force_local &= base->cpu_base->next_timer == timer;
9927 +
9928 +- /* Remove an active timer from the queue: */
9929 +- remove_hrtimer(timer, base, true);
9930 ++ /*
9931 ++ * Remove an active timer from the queue. In case it is not queued
9932 ++ * on the current CPU, make sure that remove_hrtimer() updates the
9933 ++ * remote data correctly.
9934 ++ *
9935 ++ * If it's on the current CPU and the first expiring timer, then
9936 ++ * skip reprogramming, keep the timer local and enforce
9937 ++ * reprogramming later if it was the first expiring timer. This
9938 ++ * avoids programming the underlying clock event twice (once at
9939 ++ * removal and once after enqueue).
9940 ++ */
9941 ++ remove_hrtimer(timer, base, true, force_local);
9942 +
9943 + if (mode & HRTIMER_MODE_REL)
9944 + tim = ktime_add_safe(tim, base->get_time());
9945 +@@ -1115,9 +1146,24 @@ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
9946 + hrtimer_set_expires_range_ns(timer, tim, delta_ns);
9947 +
9948 + /* Switch the timer base, if necessary: */
9949 +- new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
9950 ++ if (!force_local) {
9951 ++ new_base = switch_hrtimer_base(timer, base,
9952 ++ mode & HRTIMER_MODE_PINNED);
9953 ++ } else {
9954 ++ new_base = base;
9955 ++ }
9956 ++
9957 ++ first = enqueue_hrtimer(timer, new_base, mode);
9958 ++ if (!force_local)
9959 ++ return first;
9960 +
9961 +- return enqueue_hrtimer(timer, new_base, mode);
9962 ++ /*
9963 ++ * Timer was forced to stay on the current CPU to avoid
9964 ++ * reprogramming on removal and enqueue. Force reprogram the
9965 ++ * hardware by evaluating the new first expiring timer.
9966 ++ */
9967 ++ hrtimer_force_reprogram(new_base->cpu_base, 1);
9968 ++ return 0;
9969 + }
9970 +
9971 + /**
9972 +@@ -1183,7 +1229,7 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
9973 + base = lock_hrtimer_base(timer, &flags);
9974 +
9975 + if (!hrtimer_callback_running(timer))
9976 +- ret = remove_hrtimer(timer, base, false);
9977 ++ ret = remove_hrtimer(timer, base, false, false);
9978 +
9979 + unlock_hrtimer_base(timer, &flags);
9980 +
9981 +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
9982 +index aa52fc85dbcbf..a9f8d25220b1a 100644
9983 +--- a/kernel/time/posix-cpu-timers.c
9984 ++++ b/kernel/time/posix-cpu-timers.c
9985 +@@ -1346,8 +1346,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clkid,
9986 + }
9987 + }
9988 +
9989 +- if (!*newval)
9990 +- return;
9991 + *newval += now;
9992 + }
9993 +
9994 +diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
9995 +index 7a981c9e87a4a..e61c1244e7d46 100644
9996 +--- a/kernel/time/tick-internal.h
9997 ++++ b/kernel/time/tick-internal.h
9998 +@@ -164,3 +164,6 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
9999 +
10000 + extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
10001 + void timer_clear_idle(void);
10002 ++
10003 ++void clock_was_set(void);
10004 ++void clock_was_set_delayed(void);
10005 +diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c
10006 +index 3c63710c20c69..e6c4b3180ab1d 100644
10007 +--- a/lib/mpi/mpiutil.c
10008 ++++ b/lib/mpi/mpiutil.c
10009 +@@ -148,7 +148,7 @@ int mpi_resize(MPI a, unsigned nlimbs)
10010 + return 0; /* no need to do it */
10011 +
10012 + if (a->d) {
10013 +- p = kmalloc_array(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
10014 ++ p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL);
10015 + if (!p)
10016 + return -ENOMEM;
10017 + memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t));
10018 +diff --git a/net/6lowpan/debugfs.c b/net/6lowpan/debugfs.c
10019 +index 1c140af06d527..600b9563bfc53 100644
10020 +--- a/net/6lowpan/debugfs.c
10021 ++++ b/net/6lowpan/debugfs.c
10022 +@@ -170,7 +170,8 @@ static void lowpan_dev_debugfs_ctx_init(struct net_device *dev,
10023 + struct dentry *root;
10024 + char buf[32];
10025 +
10026 +- WARN_ON_ONCE(id > LOWPAN_IPHC_CTX_TABLE_SIZE);
10027 ++ if (WARN_ON_ONCE(id >= LOWPAN_IPHC_CTX_TABLE_SIZE))
10028 ++ return;
10029 +
10030 + sprintf(buf, "%d", id);
10031 +
10032 +diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
10033 +index c32638dddbf94..f6b9dc4e408f2 100644
10034 +--- a/net/bluetooth/cmtp/cmtp.h
10035 ++++ b/net/bluetooth/cmtp/cmtp.h
10036 +@@ -26,7 +26,7 @@
10037 + #include <linux/types.h>
10038 + #include <net/bluetooth/bluetooth.h>
10039 +
10040 +-#define BTNAMSIZ 18
10041 ++#define BTNAMSIZ 21
10042 +
10043 + /* CMTP ioctl defines */
10044 + #define CMTPCONNADD _IOW('C', 200, int)
10045 +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
10046 +index ee59d1c7f1f6c..bf1bb08b94aad 100644
10047 +--- a/net/bluetooth/hci_core.c
10048 ++++ b/net/bluetooth/hci_core.c
10049 +@@ -1343,6 +1343,12 @@ int hci_inquiry(void __user *arg)
10050 + goto done;
10051 + }
10052 +
10053 ++ /* Restrict maximum inquiry length to 60 seconds */
10054 ++ if (ir.length > 60) {
10055 ++ err = -EINVAL;
10056 ++ goto done;
10057 ++ }
10058 ++
10059 + hci_dev_lock(hdev);
10060 + if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
10061 + inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
10062 +@@ -1734,6 +1740,14 @@ int hci_dev_do_close(struct hci_dev *hdev)
10063 + hci_request_cancel_all(hdev);
10064 + hci_req_sync_lock(hdev);
10065 +
10066 ++ if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
10067 ++ !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
10068 ++ test_bit(HCI_UP, &hdev->flags)) {
10069 ++ /* Execute vendor specific shutdown routine */
10070 ++ if (hdev->shutdown)
10071 ++ hdev->shutdown(hdev);
10072 ++ }
10073 ++
10074 + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
10075 + cancel_delayed_work_sync(&hdev->cmd_timer);
10076 + hci_req_sync_unlock(hdev);
10077 +diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
10078 +index 470eaabb021f9..6a9826164fd7f 100644
10079 +--- a/net/bluetooth/mgmt.c
10080 ++++ b/net/bluetooth/mgmt.c
10081 +@@ -7725,7 +7725,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
10082 + * advertising.
10083 + */
10084 + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
10085 +- return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
10086 ++ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
10087 + MGMT_STATUS_NOT_SUPPORTED);
10088 +
10089 + if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
10090 +diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
10091 +index 3bd41563f118a..9769a7ceb6898 100644
10092 +--- a/net/bluetooth/sco.c
10093 ++++ b/net/bluetooth/sco.c
10094 +@@ -85,7 +85,6 @@ static void sco_sock_timeout(struct timer_list *t)
10095 + sk->sk_state_change(sk);
10096 + bh_unlock_sock(sk);
10097 +
10098 +- sco_sock_kill(sk);
10099 + sock_put(sk);
10100 + }
10101 +
10102 +@@ -177,7 +176,6 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
10103 + sco_sock_clear_timer(sk);
10104 + sco_chan_del(sk, err);
10105 + bh_unlock_sock(sk);
10106 +- sco_sock_kill(sk);
10107 + sock_put(sk);
10108 + }
10109 +
10110 +@@ -394,8 +392,7 @@ static void sco_sock_cleanup_listen(struct sock *parent)
10111 + */
10112 + static void sco_sock_kill(struct sock *sk)
10113 + {
10114 +- if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket ||
10115 +- sock_flag(sk, SOCK_DEAD))
10116 ++ if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
10117 + return;
10118 +
10119 + BT_DBG("sk %p state %d", sk, sk->sk_state);
10120 +@@ -447,7 +444,6 @@ static void sco_sock_close(struct sock *sk)
10121 + lock_sock(sk);
10122 + __sco_sock_close(sk);
10123 + release_sock(sk);
10124 +- sco_sock_kill(sk);
10125 + }
10126 +
10127 + static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg,
10128 +@@ -773,6 +769,11 @@ static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
10129 + cp.max_latency = cpu_to_le16(0xffff);
10130 + cp.retrans_effort = 0xff;
10131 + break;
10132 ++ default:
10133 ++ /* use CVSD settings as fallback */
10134 ++ cp.max_latency = cpu_to_le16(0xffff);
10135 ++ cp.retrans_effort = 0xff;
10136 ++ break;
10137 + }
10138 +
10139 + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
10140 +diff --git a/net/core/devlink.c b/net/core/devlink.c
10141 +index 051432ea4f69e..5d01bebffacab 100644
10142 +--- a/net/core/devlink.c
10143 ++++ b/net/core/devlink.c
10144 +@@ -3283,10 +3283,12 @@ static void devlink_param_notify(struct devlink *devlink,
10145 + struct devlink_param_item *param_item,
10146 + enum devlink_command cmd);
10147 +
10148 +-static void devlink_reload_netns_change(struct devlink *devlink,
10149 +- struct net *dest_net)
10150 ++static void devlink_ns_change_notify(struct devlink *devlink,
10151 ++ struct net *dest_net, struct net *curr_net,
10152 ++ bool new)
10153 + {
10154 + struct devlink_param_item *param_item;
10155 ++ enum devlink_command cmd;
10156 +
10157 + /* Userspace needs to be notified about devlink objects
10158 + * removed from original and entering new network namespace.
10159 +@@ -3294,17 +3296,18 @@ static void devlink_reload_netns_change(struct devlink *devlink,
10160 + * reload process so the notifications are generated separatelly.
10161 + */
10162 +
10163 +- list_for_each_entry(param_item, &devlink->param_list, list)
10164 +- devlink_param_notify(devlink, 0, param_item,
10165 +- DEVLINK_CMD_PARAM_DEL);
10166 +- devlink_notify(devlink, DEVLINK_CMD_DEL);
10167 ++ if (!dest_net || net_eq(dest_net, curr_net))
10168 ++ return;
10169 +
10170 +- __devlink_net_set(devlink, dest_net);
10171 ++ if (new)
10172 ++ devlink_notify(devlink, DEVLINK_CMD_NEW);
10173 +
10174 +- devlink_notify(devlink, DEVLINK_CMD_NEW);
10175 ++ cmd = new ? DEVLINK_CMD_PARAM_NEW : DEVLINK_CMD_PARAM_DEL;
10176 + list_for_each_entry(param_item, &devlink->param_list, list)
10177 +- devlink_param_notify(devlink, 0, param_item,
10178 +- DEVLINK_CMD_PARAM_NEW);
10179 ++ devlink_param_notify(devlink, 0, param_item, cmd);
10180 ++
10181 ++ if (!new)
10182 ++ devlink_notify(devlink, DEVLINK_CMD_DEL);
10183 + }
10184 +
10185 + static bool devlink_reload_supported(const struct devlink_ops *ops)
10186 +@@ -3384,6 +3387,7 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
10187 + u32 *actions_performed, struct netlink_ext_ack *extack)
10188 + {
10189 + u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
10190 ++ struct net *curr_net;
10191 + int err;
10192 +
10193 + if (!devlink->reload_enabled)
10194 +@@ -3391,18 +3395,22 @@ static int devlink_reload(struct devlink *devlink, struct net *dest_net,
10195 +
10196 + memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
10197 + sizeof(remote_reload_stats));
10198 ++
10199 ++ curr_net = devlink_net(devlink);
10200 ++ devlink_ns_change_notify(devlink, dest_net, curr_net, false);
10201 + err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
10202 + if (err)
10203 + return err;
10204 +
10205 +- if (dest_net && !net_eq(dest_net, devlink_net(devlink)))
10206 +- devlink_reload_netns_change(devlink, dest_net);
10207 ++ if (dest_net && !net_eq(dest_net, curr_net))
10208 ++ __devlink_net_set(devlink, dest_net);
10209 +
10210 + err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
10211 + devlink_reload_failed_set(devlink, !!err);
10212 + if (err)
10213 + return err;
10214 +
10215 ++ devlink_ns_change_notify(devlink, dest_net, curr_net, true);
10216 + WARN_ON(!(*actions_performed & BIT(action)));
10217 + /* Catch driver on updating the remote action within devlink reload */
10218 + WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
10219 +@@ -3599,7 +3607,7 @@ out_free_msg:
10220 +
10221 + static void devlink_flash_update_begin_notify(struct devlink *devlink)
10222 + {
10223 +- struct devlink_flash_notify params = { 0 };
10224 ++ struct devlink_flash_notify params = {};
10225 +
10226 + __devlink_flash_update_notify(devlink,
10227 + DEVLINK_CMD_FLASH_UPDATE,
10228 +@@ -3608,7 +3616,7 @@ static void devlink_flash_update_begin_notify(struct devlink *devlink)
10229 +
10230 + static void devlink_flash_update_end_notify(struct devlink *devlink)
10231 + {
10232 +- struct devlink_flash_notify params = { 0 };
10233 ++ struct devlink_flash_notify params = {};
10234 +
10235 + __devlink_flash_update_notify(devlink,
10236 + DEVLINK_CMD_FLASH_UPDATE_END,
10237 +diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
10238 +index 92282de54230f..1bf602f30ce4c 100644
10239 +--- a/net/dsa/dsa_priv.h
10240 ++++ b/net/dsa/dsa_priv.h
10241 +@@ -211,8 +211,6 @@ int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
10242 + int dsa_port_bridge_flags(const struct dsa_port *dp,
10243 + struct switchdev_brport_flags flags,
10244 + struct netlink_ext_ack *extack);
10245 +-int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
10246 +- struct netlink_ext_ack *extack);
10247 + int dsa_port_vlan_add(struct dsa_port *dp,
10248 + const struct switchdev_obj_port_vlan *vlan,
10249 + struct netlink_ext_ack *extack);
10250 +diff --git a/net/dsa/port.c b/net/dsa/port.c
10251 +index 6379d66a6bb32..c3ffbd41331a9 100644
10252 +--- a/net/dsa/port.c
10253 ++++ b/net/dsa/port.c
10254 +@@ -186,10 +186,6 @@ static int dsa_port_switchdev_sync(struct dsa_port *dp,
10255 + if (err && err != -EOPNOTSUPP)
10256 + return err;
10257 +
10258 +- err = dsa_port_mrouter(dp->cpu_dp, br_multicast_router(br), extack);
10259 +- if (err && err != -EOPNOTSUPP)
10260 +- return err;
10261 +-
10262 + err = dsa_port_ageing_time(dp, br_get_ageing_time(br));
10263 + if (err && err != -EOPNOTSUPP)
10264 + return err;
10265 +@@ -235,12 +231,6 @@ static void dsa_port_switchdev_unsync(struct dsa_port *dp)
10266 +
10267 + /* VLAN filtering is handled by dsa_switch_bridge_leave */
10268 +
10269 +- /* Some drivers treat the notification for having a local multicast
10270 +- * router by allowing multicast to be flooded to the CPU, so we should
10271 +- * allow this in standalone mode too.
10272 +- */
10273 +- dsa_port_mrouter(dp->cpu_dp, true, NULL);
10274 +-
10275 + /* Ageing time may be global to the switch chip, so don't change it
10276 + * here because we have no good reason (or value) to change it to.
10277 + */
10278 +@@ -555,17 +545,6 @@ int dsa_port_bridge_flags(const struct dsa_port *dp,
10279 + return ds->ops->port_bridge_flags(ds, dp->index, flags, extack);
10280 + }
10281 +
10282 +-int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
10283 +- struct netlink_ext_ack *extack)
10284 +-{
10285 +- struct dsa_switch *ds = dp->ds;
10286 +-
10287 +- if (!ds->ops->port_set_mrouter)
10288 +- return -EOPNOTSUPP;
10289 +-
10290 +- return ds->ops->port_set_mrouter(ds, dp->index, mrouter, extack);
10291 +-}
10292 +-
10293 + int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
10294 + bool propagate_upstream)
10295 + {
10296 +diff --git a/net/dsa/slave.c b/net/dsa/slave.c
10297 +index d4756b9201089..5882159137eaf 100644
10298 +--- a/net/dsa/slave.c
10299 ++++ b/net/dsa/slave.c
10300 +@@ -311,12 +311,6 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
10301 +
10302 + ret = dsa_port_bridge_flags(dp, attr->u.brport_flags, extack);
10303 + break;
10304 +- case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
10305 +- if (!dsa_port_offloads_bridge(dp, attr->orig_dev))
10306 +- return -EOPNOTSUPP;
10307 +-
10308 +- ret = dsa_port_mrouter(dp->cpu_dp, attr->u.mrouter, extack);
10309 +- break;
10310 + default:
10311 + ret = -EOPNOTSUPP;
10312 + break;
10313 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
10314 +index d8811e1fbd6c8..f495fad73be90 100644
10315 +--- a/net/ipv4/route.c
10316 ++++ b/net/ipv4/route.c
10317 +@@ -586,18 +586,25 @@ static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
10318 + }
10319 + }
10320 +
10321 +-static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
10322 ++static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
10323 + {
10324 +- struct fib_nh_exception *fnhe, *oldest;
10325 ++ struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
10326 ++ struct fib_nh_exception *fnhe, *oldest = NULL;
10327 +
10328 +- oldest = rcu_dereference(hash->chain);
10329 +- for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
10330 +- fnhe = rcu_dereference(fnhe->fnhe_next)) {
10331 +- if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
10332 ++ for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
10333 ++ fnhe = rcu_dereference_protected(*fnhe_p,
10334 ++ lockdep_is_held(&fnhe_lock));
10335 ++ if (!fnhe)
10336 ++ break;
10337 ++ if (!oldest ||
10338 ++ time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
10339 + oldest = fnhe;
10340 ++ oldest_p = fnhe_p;
10341 ++ }
10342 + }
10343 + fnhe_flush_routes(oldest);
10344 +- return oldest;
10345 ++ *oldest_p = oldest->fnhe_next;
10346 ++ kfree_rcu(oldest, rcu);
10347 + }
10348 +
10349 + static u32 fnhe_hashfun(__be32 daddr)
10350 +@@ -676,16 +683,21 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
10351 + if (rt)
10352 + fill_route_from_fnhe(rt, fnhe);
10353 + } else {
10354 +- if (depth > FNHE_RECLAIM_DEPTH)
10355 +- fnhe = fnhe_oldest(hash);
10356 +- else {
10357 +- fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
10358 +- if (!fnhe)
10359 +- goto out_unlock;
10360 +-
10361 +- fnhe->fnhe_next = hash->chain;
10362 +- rcu_assign_pointer(hash->chain, fnhe);
10363 ++ /* Randomize max depth to avoid some side channels attacks. */
10364 ++ int max_depth = FNHE_RECLAIM_DEPTH +
10365 ++ prandom_u32_max(FNHE_RECLAIM_DEPTH);
10366 ++
10367 ++ while (depth > max_depth) {
10368 ++ fnhe_remove_oldest(hash);
10369 ++ depth--;
10370 + }
10371 ++
10372 ++ fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
10373 ++ if (!fnhe)
10374 ++ goto out_unlock;
10375 ++
10376 ++ fnhe->fnhe_next = hash->chain;
10377 ++
10378 + fnhe->fnhe_genid = genid;
10379 + fnhe->fnhe_daddr = daddr;
10380 + fnhe->fnhe_gw = gw;
10381 +@@ -693,6 +705,8 @@ static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
10382 + fnhe->fnhe_mtu_locked = lock;
10383 + fnhe->fnhe_expires = max(1UL, expires);
10384 +
10385 ++ rcu_assign_pointer(hash->chain, fnhe);
10386 ++
10387 + /* Exception created; mark the cached routes for the nexthop
10388 + * stale, so anyone caching it rechecks if this exception
10389 + * applies to them.
10390 +@@ -3047,7 +3061,7 @@ static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
10391 + udph = skb_put_zero(skb, sizeof(struct udphdr));
10392 + udph->source = sport;
10393 + udph->dest = dport;
10394 +- udph->len = sizeof(struct udphdr);
10395 ++ udph->len = htons(sizeof(struct udphdr));
10396 + udph->check = 0;
10397 + break;
10398 + }
10399 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
10400 +index 8bb5f7f51dae7..7f3c7d57a39d4 100644
10401 +--- a/net/ipv4/tcp_ipv4.c
10402 ++++ b/net/ipv4/tcp_ipv4.c
10403 +@@ -2440,6 +2440,7 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
10404 + static void *tcp_seek_last_pos(struct seq_file *seq)
10405 + {
10406 + struct tcp_iter_state *st = seq->private;
10407 ++ int bucket = st->bucket;
10408 + int offset = st->offset;
10409 + int orig_num = st->num;
10410 + void *rc = NULL;
10411 +@@ -2450,7 +2451,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
10412 + break;
10413 + st->state = TCP_SEQ_STATE_LISTENING;
10414 + rc = listening_get_next(seq, NULL);
10415 +- while (offset-- && rc)
10416 ++ while (offset-- && rc && bucket == st->bucket)
10417 + rc = listening_get_next(seq, rc);
10418 + if (rc)
10419 + break;
10420 +@@ -2461,7 +2462,7 @@ static void *tcp_seek_last_pos(struct seq_file *seq)
10421 + if (st->bucket > tcp_hashinfo.ehash_mask)
10422 + break;
10423 + rc = established_get_first(seq);
10424 +- while (offset-- && rc)
10425 ++ while (offset-- && rc && bucket == st->bucket)
10426 + rc = established_get_next(seq, rc);
10427 + }
10428 +
10429 +diff --git a/net/ipv6/route.c b/net/ipv6/route.c
10430 +index 67c74469503a5..cd99de5b6882c 100644
10431 +--- a/net/ipv6/route.c
10432 ++++ b/net/ipv6/route.c
10433 +@@ -1657,6 +1657,7 @@ static int rt6_insert_exception(struct rt6_info *nrt,
10434 + struct in6_addr *src_key = NULL;
10435 + struct rt6_exception *rt6_ex;
10436 + struct fib6_nh *nh = res->nh;
10437 ++ int max_depth;
10438 + int err = 0;
10439 +
10440 + spin_lock_bh(&rt6_exception_lock);
10441 +@@ -1711,7 +1712,9 @@ static int rt6_insert_exception(struct rt6_info *nrt,
10442 + bucket->depth++;
10443 + net->ipv6.rt6_stats->fib_rt_cache++;
10444 +
10445 +- if (bucket->depth > FIB6_MAX_DEPTH)
10446 ++ /* Randomize max depth to avoid some side channels attacks. */
10447 ++ max_depth = FIB6_MAX_DEPTH + prandom_u32_max(FIB6_MAX_DEPTH);
10448 ++ while (bucket->depth > max_depth)
10449 + rt6_exception_remove_oldest(bucket);
10450 +
10451 + out:
10452 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
10453 +index 2651498d05e8e..e61c320974ba7 100644
10454 +--- a/net/mac80211/tx.c
10455 ++++ b/net/mac80211/tx.c
10456 +@@ -3210,7 +3210,9 @@ static bool ieee80211_amsdu_prepare_head(struct ieee80211_sub_if_data *sdata,
10457 + if (info->control.flags & IEEE80211_TX_CTRL_AMSDU)
10458 + return true;
10459 +
10460 +- if (!ieee80211_amsdu_realloc_pad(local, skb, sizeof(*amsdu_hdr)))
10461 ++ if (!ieee80211_amsdu_realloc_pad(local, skb,
10462 ++ sizeof(*amsdu_hdr) +
10463 ++ local->hw.extra_tx_headroom))
10464 + return false;
10465 +
10466 + data = skb_push(skb, sizeof(*amsdu_hdr));
10467 +diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
10468 +index 4f50a64315cf0..50f40943c8153 100644
10469 +--- a/net/netlabel/netlabel_cipso_v4.c
10470 ++++ b/net/netlabel/netlabel_cipso_v4.c
10471 +@@ -187,14 +187,14 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
10472 + }
10473 + doi_def->map.std->lvl.local = kcalloc(doi_def->map.std->lvl.local_size,
10474 + sizeof(u32),
10475 +- GFP_KERNEL);
10476 ++ GFP_KERNEL | __GFP_NOWARN);
10477 + if (doi_def->map.std->lvl.local == NULL) {
10478 + ret_val = -ENOMEM;
10479 + goto add_std_failure;
10480 + }
10481 + doi_def->map.std->lvl.cipso = kcalloc(doi_def->map.std->lvl.cipso_size,
10482 + sizeof(u32),
10483 +- GFP_KERNEL);
10484 ++ GFP_KERNEL | __GFP_NOWARN);
10485 + if (doi_def->map.std->lvl.cipso == NULL) {
10486 + ret_val = -ENOMEM;
10487 + goto add_std_failure;
10488 +@@ -263,7 +263,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
10489 + doi_def->map.std->cat.local = kcalloc(
10490 + doi_def->map.std->cat.local_size,
10491 + sizeof(u32),
10492 +- GFP_KERNEL);
10493 ++ GFP_KERNEL | __GFP_NOWARN);
10494 + if (doi_def->map.std->cat.local == NULL) {
10495 + ret_val = -ENOMEM;
10496 + goto add_std_failure;
10497 +@@ -271,7 +271,7 @@ static int netlbl_cipsov4_add_std(struct genl_info *info,
10498 + doi_def->map.std->cat.cipso = kcalloc(
10499 + doi_def->map.std->cat.cipso_size,
10500 + sizeof(u32),
10501 +- GFP_KERNEL);
10502 ++ GFP_KERNEL | __GFP_NOWARN);
10503 + if (doi_def->map.std->cat.cipso == NULL) {
10504 + ret_val = -ENOMEM;
10505 + goto add_std_failure;
10506 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
10507 +index 52b7f6490d248..2e732ea2b82fc 100644
10508 +--- a/net/qrtr/qrtr.c
10509 ++++ b/net/qrtr/qrtr.c
10510 +@@ -493,7 +493,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
10511 + goto err;
10512 + }
10513 +
10514 +- if (!size || len != ALIGN(size, 4) + hdrlen)
10515 ++ if (!size || size & 3 || len != size + hdrlen)
10516 + goto err;
10517 +
10518 + if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
10519 +@@ -506,8 +506,12 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
10520 +
10521 + if (cb->type == QRTR_TYPE_NEW_SERVER) {
10522 + /* Remote node endpoint can bridge other distant nodes */
10523 +- const struct qrtr_ctrl_pkt *pkt = data + hdrlen;
10524 ++ const struct qrtr_ctrl_pkt *pkt;
10525 +
10526 ++ if (size < sizeof(*pkt))
10527 ++ goto err;
10528 ++
10529 ++ pkt = data + hdrlen;
10530 + qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
10531 + }
10532 +
10533 +diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
10534 +index b79a7e27bb315..38a3a8394bbda 100644
10535 +--- a/net/sched/sch_cbq.c
10536 ++++ b/net/sched/sch_cbq.c
10537 +@@ -1614,7 +1614,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
10538 + err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
10539 + if (err) {
10540 + kfree(cl);
10541 +- return err;
10542 ++ goto failure;
10543 + }
10544 +
10545 + if (tca[TCA_RATE]) {
10546 +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
10547 +index 8827987ba9034..11bc6bf35f845 100644
10548 +--- a/net/sched/sch_htb.c
10549 ++++ b/net/sched/sch_htb.c
10550 +@@ -125,6 +125,7 @@ struct htb_class {
10551 + struct htb_class_leaf {
10552 + int deficit[TC_HTB_MAXDEPTH];
10553 + struct Qdisc *q;
10554 ++ struct netdev_queue *offload_queue;
10555 + } leaf;
10556 + struct htb_class_inner {
10557 + struct htb_prio clprio[TC_HTB_NUMPRIO];
10558 +@@ -1376,24 +1377,47 @@ htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
10559 + return old_q;
10560 + }
10561 +
10562 +-static void htb_offload_move_qdisc(struct Qdisc *sch, u16 qid_old, u16 qid_new)
10563 ++static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
10564 ++{
10565 ++ struct netdev_queue *queue;
10566 ++
10567 ++ queue = cl->leaf.offload_queue;
10568 ++ if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
10569 ++ WARN_ON(cl->leaf.q->dev_queue != queue);
10570 ++
10571 ++ return queue;
10572 ++}
10573 ++
10574 ++static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
10575 ++ struct htb_class *cl_new, bool destroying)
10576 + {
10577 + struct netdev_queue *queue_old, *queue_new;
10578 + struct net_device *dev = qdisc_dev(sch);
10579 +- struct Qdisc *qdisc;
10580 +
10581 +- queue_old = netdev_get_tx_queue(dev, qid_old);
10582 +- queue_new = netdev_get_tx_queue(dev, qid_new);
10583 ++ queue_old = htb_offload_get_queue(cl_old);
10584 ++ queue_new = htb_offload_get_queue(cl_new);
10585 +
10586 +- if (dev->flags & IFF_UP)
10587 +- dev_deactivate(dev);
10588 +- qdisc = dev_graft_qdisc(queue_old, NULL);
10589 +- qdisc->dev_queue = queue_new;
10590 +- qdisc = dev_graft_qdisc(queue_new, qdisc);
10591 +- if (dev->flags & IFF_UP)
10592 +- dev_activate(dev);
10593 ++ if (!destroying) {
10594 ++ struct Qdisc *qdisc;
10595 +
10596 +- WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
10597 ++ if (dev->flags & IFF_UP)
10598 ++ dev_deactivate(dev);
10599 ++ qdisc = dev_graft_qdisc(queue_old, NULL);
10600 ++ WARN_ON(qdisc != cl_old->leaf.q);
10601 ++ }
10602 ++
10603 ++ if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
10604 ++ cl_old->leaf.q->dev_queue = queue_new;
10605 ++ cl_old->leaf.offload_queue = queue_new;
10606 ++
10607 ++ if (!destroying) {
10608 ++ struct Qdisc *qdisc;
10609 ++
10610 ++ qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
10611 ++ if (dev->flags & IFF_UP)
10612 ++ dev_activate(dev);
10613 ++ WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
10614 ++ }
10615 + }
10616 +
10617 + static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10618 +@@ -1407,10 +1431,8 @@ static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
10619 + if (cl->level)
10620 + return -EINVAL;
10621 +
10622 +- if (q->offload) {
10623 +- dev_queue = new->dev_queue;
10624 +- WARN_ON(dev_queue != cl->leaf.q->dev_queue);
10625 +- }
10626 ++ if (q->offload)
10627 ++ dev_queue = htb_offload_get_queue(cl);
10628 +
10629 + if (!new) {
10630 + new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
10631 +@@ -1479,6 +1501,8 @@ static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
10632 + parent->ctokens = parent->cbuffer;
10633 + parent->t_c = ktime_get_ns();
10634 + parent->cmode = HTB_CAN_SEND;
10635 ++ if (q->offload)
10636 ++ parent->leaf.offload_queue = cl->leaf.offload_queue;
10637 + }
10638 +
10639 + static void htb_parent_to_leaf_offload(struct Qdisc *sch,
10640 +@@ -1499,6 +1523,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
10641 + struct netlink_ext_ack *extack)
10642 + {
10643 + struct tc_htb_qopt_offload offload_opt;
10644 ++ struct netdev_queue *dev_queue;
10645 + struct Qdisc *q = cl->leaf.q;
10646 + struct Qdisc *old = NULL;
10647 + int err;
10648 +@@ -1507,16 +1532,15 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
10649 + return -EINVAL;
10650 +
10651 + WARN_ON(!q);
10652 +- if (!destroying) {
10653 +- /* On destroy of HTB, two cases are possible:
10654 +- * 1. q is a normal qdisc, but q->dev_queue has noop qdisc.
10655 +- * 2. q is a noop qdisc (for nodes that were inner),
10656 +- * q->dev_queue is noop_netdev_queue.
10657 ++ dev_queue = htb_offload_get_queue(cl);
10658 ++ old = htb_graft_helper(dev_queue, NULL);
10659 ++ if (destroying)
10660 ++ /* Before HTB is destroyed, the kernel grafts noop_qdisc to
10661 ++ * all queues.
10662 + */
10663 +- old = htb_graft_helper(q->dev_queue, NULL);
10664 +- WARN_ON(!old);
10665 ++ WARN_ON(!(old->flags & TCQ_F_BUILTIN));
10666 ++ else
10667 + WARN_ON(old != q);
10668 +- }
10669 +
10670 + if (cl->parent) {
10671 + cl->parent->bstats_bias.bytes += q->bstats.bytes;
10672 +@@ -1535,18 +1559,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
10673 + if (!err || destroying)
10674 + qdisc_put(old);
10675 + else
10676 +- htb_graft_helper(q->dev_queue, old);
10677 ++ htb_graft_helper(dev_queue, old);
10678 +
10679 + if (last_child)
10680 + return err;
10681 +
10682 +- if (!err && offload_opt.moved_qid != 0) {
10683 +- if (destroying)
10684 +- q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch),
10685 +- offload_opt.qid);
10686 +- else
10687 +- htb_offload_move_qdisc(sch, offload_opt.moved_qid,
10688 +- offload_opt.qid);
10689 ++ if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
10690 ++ u32 classid = TC_H_MAJ(sch->handle) |
10691 ++ TC_H_MIN(offload_opt.classid);
10692 ++ struct htb_class *moved_cl = htb_find(classid, sch);
10693 ++
10694 ++ htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
10695 + }
10696 +
10697 + return err;
10698 +@@ -1669,9 +1692,11 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
10699 + }
10700 +
10701 + if (last_child) {
10702 +- struct netdev_queue *dev_queue;
10703 ++ struct netdev_queue *dev_queue = sch->dev_queue;
10704 ++
10705 ++ if (q->offload)
10706 ++ dev_queue = htb_offload_get_queue(cl);
10707 +
10708 +- dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue;
10709 + new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
10710 + cl->parent->common.classid,
10711 + NULL);
10712 +@@ -1843,7 +1868,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
10713 + }
10714 + dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
10715 + } else { /* First child. */
10716 +- dev_queue = parent->leaf.q->dev_queue;
10717 ++ dev_queue = htb_offload_get_queue(parent);
10718 + old_q = htb_graft_helper(dev_queue, NULL);
10719 + WARN_ON(old_q != parent->leaf.q);
10720 + offload_opt = (struct tc_htb_qopt_offload) {
10721 +@@ -1900,6 +1925,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
10722 +
10723 + /* leaf (we) needs elementary qdisc */
10724 + cl->leaf.q = new_q ? new_q : &noop_qdisc;
10725 ++ if (q->offload)
10726 ++ cl->leaf.offload_queue = dev_queue;
10727 +
10728 + cl->parent = parent;
10729 +
10730 +diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
10731 +index 0de918cb3d90d..a47e290b0668e 100644
10732 +--- a/net/sunrpc/svc.c
10733 ++++ b/net/sunrpc/svc.c
10734 +@@ -1629,6 +1629,21 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
10735 + }
10736 + EXPORT_SYMBOL_GPL(svc_max_payload);
10737 +
10738 ++/**
10739 ++ * svc_proc_name - Return RPC procedure name in string form
10740 ++ * @rqstp: svc_rqst to operate on
10741 ++ *
10742 ++ * Return value:
10743 ++ * Pointer to a NUL-terminated string
10744 ++ */
10745 ++const char *svc_proc_name(const struct svc_rqst *rqstp)
10746 ++{
10747 ++ if (rqstp && rqstp->rq_procinfo)
10748 ++ return rqstp->rq_procinfo->pc_name;
10749 ++ return "unknown";
10750 ++}
10751 ++
10752 ++
10753 + /**
10754 + * svc_encode_result_payload - mark a range of bytes as a result payload
10755 + * @rqstp: svc_rqst to operate on
10756 +diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
10757 +index 5764116125237..c7d7d35867302 100644
10758 +--- a/samples/bpf/xdp_redirect_cpu_user.c
10759 ++++ b/samples/bpf/xdp_redirect_cpu_user.c
10760 +@@ -831,7 +831,7 @@ int main(int argc, char **argv)
10761 + memset(cpu, 0, n_cpus * sizeof(int));
10762 +
10763 + /* Parse commands line args */
10764 +- while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:",
10765 ++ while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzFf:e:r:m:n",
10766 + long_options, &longindex)) != -1) {
10767 + switch (opt) {
10768 + case 'd':
10769 +diff --git a/samples/pktgen/pktgen_sample04_many_flows.sh b/samples/pktgen/pktgen_sample04_many_flows.sh
10770 +index ddce876635aa0..507c1143eb96b 100755
10771 +--- a/samples/pktgen/pktgen_sample04_many_flows.sh
10772 ++++ b/samples/pktgen/pktgen_sample04_many_flows.sh
10773 +@@ -13,13 +13,15 @@ root_check_run_with_sudo "$@"
10774 + # Parameter parsing via include
10775 + source ${basedir}/parameters.sh
10776 + # Set some default params, if they didn't get set
10777 +-[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
10778 ++if [ -z "$DEST_IP" ]; then
10779 ++ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
10780 ++fi
10781 + [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
10782 + [ -z "$CLONE_SKB" ] && CLONE_SKB="0"
10783 + [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
10784 + if [ -n "$DEST_IP" ]; then
10785 +- validate_addr $DEST_IP
10786 +- read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
10787 ++ validate_addr${IP6} $DEST_IP
10788 ++ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
10789 + fi
10790 + if [ -n "$DST_PORT" ]; then
10791 + read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
10792 +@@ -62,8 +64,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
10793 +
10794 + # Single destination
10795 + pg_set $dev "dst_mac $DST_MAC"
10796 +- pg_set $dev "dst_min $DST_MIN"
10797 +- pg_set $dev "dst_max $DST_MAX"
10798 ++ pg_set $dev "dst${IP6}_min $DST_MIN"
10799 ++ pg_set $dev "dst${IP6}_max $DST_MAX"
10800 +
10801 + if [ -n "$DST_PORT" ]; then
10802 + # Single destination port or random port range
10803 +diff --git a/samples/pktgen/pktgen_sample05_flow_per_thread.sh b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
10804 +index 4a65fe2fcee92..160143ebcdd08 100755
10805 +--- a/samples/pktgen/pktgen_sample05_flow_per_thread.sh
10806 ++++ b/samples/pktgen/pktgen_sample05_flow_per_thread.sh
10807 +@@ -17,14 +17,16 @@ root_check_run_with_sudo "$@"
10808 + # Parameter parsing via include
10809 + source ${basedir}/parameters.sh
10810 + # Set some default params, if they didn't get set
10811 +-[ -z "$DEST_IP" ] && DEST_IP="198.18.0.42"
10812 ++if [ -z "$DEST_IP" ]; then
10813 ++ [ -z "$IP6" ] && DEST_IP="198.18.0.42" || DEST_IP="FD00::1"
10814 ++fi
10815 + [ -z "$DST_MAC" ] && DST_MAC="90:e2:ba:ff:ff:ff"
10816 + [ -z "$CLONE_SKB" ] && CLONE_SKB="0"
10817 + [ -z "$BURST" ] && BURST=32
10818 + [ -z "$COUNT" ] && COUNT="0" # Zero means indefinitely
10819 + if [ -n "$DEST_IP" ]; then
10820 +- validate_addr $DEST_IP
10821 +- read -r DST_MIN DST_MAX <<< $(parse_addr $DEST_IP)
10822 ++ validate_addr${IP6} $DEST_IP
10823 ++ read -r DST_MIN DST_MAX <<< $(parse_addr${IP6} $DEST_IP)
10824 + fi
10825 + if [ -n "$DST_PORT" ]; then
10826 + read -r UDP_DST_MIN UDP_DST_MAX <<< $(parse_ports $DST_PORT)
10827 +@@ -52,8 +54,8 @@ for ((thread = $F_THREAD; thread <= $L_THREAD; thread++)); do
10828 +
10829 + # Single destination
10830 + pg_set $dev "dst_mac $DST_MAC"
10831 +- pg_set $dev "dst_min $DST_MIN"
10832 +- pg_set $dev "dst_max $DST_MAX"
10833 ++ pg_set $dev "dst${IP6}_min $DST_MIN"
10834 ++ pg_set $dev "dst${IP6}_max $DST_MAX"
10835 +
10836 + if [ -n "$DST_PORT" ]; then
10837 + # Single destination port or random port range
10838 +diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
10839 +index 12e9250c1bec6..9e72edb8d31af 100644
10840 +--- a/security/integrity/ima/Kconfig
10841 ++++ b/security/integrity/ima/Kconfig
10842 +@@ -6,7 +6,6 @@ config IMA
10843 + select SECURITYFS
10844 + select CRYPTO
10845 + select CRYPTO_HMAC
10846 +- select CRYPTO_MD5
10847 + select CRYPTO_SHA1
10848 + select CRYPTO_HASH_INFO
10849 + select TCG_TPM if HAS_IOMEM && !UML
10850 +diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
10851 +index 1e5c019161738..95cc31525c573 100644
10852 +--- a/security/integrity/ima/ima_mok.c
10853 ++++ b/security/integrity/ima/ima_mok.c
10854 +@@ -21,7 +21,7 @@ struct key *ima_blacklist_keyring;
10855 + /*
10856 + * Allocate the IMA blacklist keyring
10857 + */
10858 +-__init int ima_mok_init(void)
10859 ++static __init int ima_mok_init(void)
10860 + {
10861 + struct key_restriction *restriction;
10862 +
10863 +diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c
10864 +index cd964e023d96e..b9d5d7a0975b3 100644
10865 +--- a/sound/soc/codecs/rt5682-i2c.c
10866 ++++ b/sound/soc/codecs/rt5682-i2c.c
10867 +@@ -117,6 +117,13 @@ static struct snd_soc_dai_driver rt5682_dai[] = {
10868 + },
10869 + };
10870 +
10871 ++static void rt5682_i2c_disable_regulators(void *data)
10872 ++{
10873 ++ struct rt5682_priv *rt5682 = data;
10874 ++
10875 ++ regulator_bulk_disable(ARRAY_SIZE(rt5682->supplies), rt5682->supplies);
10876 ++}
10877 ++
10878 + static int rt5682_i2c_probe(struct i2c_client *i2c,
10879 + const struct i2c_device_id *id)
10880 + {
10881 +@@ -157,6 +164,11 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
10882 + return ret;
10883 + }
10884 +
10885 ++ ret = devm_add_action_or_reset(&i2c->dev, rt5682_i2c_disable_regulators,
10886 ++ rt5682);
10887 ++ if (ret)
10888 ++ return ret;
10889 ++
10890 + ret = regulator_bulk_enable(ARRAY_SIZE(rt5682->supplies),
10891 + rt5682->supplies);
10892 + if (ret) {
10893 +@@ -280,6 +292,13 @@ static void rt5682_i2c_shutdown(struct i2c_client *client)
10894 + rt5682_reset(rt5682);
10895 + }
10896 +
10897 ++static int rt5682_i2c_remove(struct i2c_client *client)
10898 ++{
10899 ++ rt5682_i2c_shutdown(client);
10900 ++
10901 ++ return 0;
10902 ++}
10903 ++
10904 + static const struct of_device_id rt5682_of_match[] = {
10905 + {.compatible = "realtek,rt5682i"},
10906 + {},
10907 +@@ -306,6 +325,7 @@ static struct i2c_driver rt5682_i2c_driver = {
10908 + .probe_type = PROBE_PREFER_ASYNCHRONOUS,
10909 + },
10910 + .probe = rt5682_i2c_probe,
10911 ++ .remove = rt5682_i2c_remove,
10912 + .shutdown = rt5682_i2c_shutdown,
10913 + .id_table = rt5682_i2c_id,
10914 + };
10915 +diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
10916 +index 86c92e03ea5d4..d885ced34f606 100644
10917 +--- a/sound/soc/codecs/wcd9335.c
10918 ++++ b/sound/soc/codecs/wcd9335.c
10919 +@@ -4076,6 +4076,16 @@ static int wcd9335_setup_irqs(struct wcd9335_codec *wcd)
10920 + return ret;
10921 + }
10922 +
10923 ++static void wcd9335_teardown_irqs(struct wcd9335_codec *wcd)
10924 ++{
10925 ++ int i;
10926 ++
10927 ++ /* disable interrupts on all slave ports */
10928 ++ for (i = 0; i < WCD9335_SLIM_NUM_PORT_REG; i++)
10929 ++ regmap_write(wcd->if_regmap, WCD9335_SLIM_PGD_PORT_INT_EN0 + i,
10930 ++ 0x00);
10931 ++}
10932 ++
10933 + static void wcd9335_cdc_sido_ccl_enable(struct wcd9335_codec *wcd,
10934 + bool ccl_flag)
10935 + {
10936 +@@ -4844,6 +4854,7 @@ static void wcd9335_codec_init(struct snd_soc_component *component)
10937 + static int wcd9335_codec_probe(struct snd_soc_component *component)
10938 + {
10939 + struct wcd9335_codec *wcd = dev_get_drvdata(component->dev);
10940 ++ int ret;
10941 + int i;
10942 +
10943 + snd_soc_component_init_regmap(component, wcd->regmap);
10944 +@@ -4861,7 +4872,15 @@ static int wcd9335_codec_probe(struct snd_soc_component *component)
10945 + for (i = 0; i < NUM_CODEC_DAIS; i++)
10946 + INIT_LIST_HEAD(&wcd->dai[i].slim_ch_list);
10947 +
10948 +- return wcd9335_setup_irqs(wcd);
10949 ++ ret = wcd9335_setup_irqs(wcd);
10950 ++ if (ret)
10951 ++ goto free_clsh_ctrl;
10952 ++
10953 ++ return 0;
10954 ++
10955 ++free_clsh_ctrl:
10956 ++ wcd_clsh_ctrl_free(wcd->clsh_ctrl);
10957 ++ return ret;
10958 + }
10959 +
10960 + static void wcd9335_codec_remove(struct snd_soc_component *comp)
10961 +@@ -4869,7 +4888,7 @@ static void wcd9335_codec_remove(struct snd_soc_component *comp)
10962 + struct wcd9335_codec *wcd = dev_get_drvdata(comp->dev);
10963 +
10964 + wcd_clsh_ctrl_free(wcd->clsh_ctrl);
10965 +- free_irq(regmap_irq_get_virq(wcd->irq_data, WCD9335_IRQ_SLIMBUS), wcd);
10966 ++ wcd9335_teardown_irqs(wcd);
10967 + }
10968 +
10969 + static int wcd9335_codec_set_sysclk(struct snd_soc_component *comp,
10970 +diff --git a/sound/soc/fsl/fsl_rpmsg.c b/sound/soc/fsl/fsl_rpmsg.c
10971 +index ea5c973e2e846..d60f4dac6c1b3 100644
10972 +--- a/sound/soc/fsl/fsl_rpmsg.c
10973 ++++ b/sound/soc/fsl/fsl_rpmsg.c
10974 +@@ -165,25 +165,25 @@ static int fsl_rpmsg_probe(struct platform_device *pdev)
10975 + }
10976 +
10977 + /* Get the optional clocks */
10978 +- rpmsg->ipg = devm_clk_get(&pdev->dev, "ipg");
10979 ++ rpmsg->ipg = devm_clk_get_optional(&pdev->dev, "ipg");
10980 + if (IS_ERR(rpmsg->ipg))
10981 +- rpmsg->ipg = NULL;
10982 ++ return PTR_ERR(rpmsg->ipg);
10983 +
10984 +- rpmsg->mclk = devm_clk_get(&pdev->dev, "mclk");
10985 ++ rpmsg->mclk = devm_clk_get_optional(&pdev->dev, "mclk");
10986 + if (IS_ERR(rpmsg->mclk))
10987 +- rpmsg->mclk = NULL;
10988 ++ return PTR_ERR(rpmsg->mclk);
10989 +
10990 +- rpmsg->dma = devm_clk_get(&pdev->dev, "dma");
10991 ++ rpmsg->dma = devm_clk_get_optional(&pdev->dev, "dma");
10992 + if (IS_ERR(rpmsg->dma))
10993 +- rpmsg->dma = NULL;
10994 ++ return PTR_ERR(rpmsg->dma);
10995 +
10996 +- rpmsg->pll8k = devm_clk_get(&pdev->dev, "pll8k");
10997 ++ rpmsg->pll8k = devm_clk_get_optional(&pdev->dev, "pll8k");
10998 + if (IS_ERR(rpmsg->pll8k))
10999 +- rpmsg->pll8k = NULL;
11000 ++ return PTR_ERR(rpmsg->pll8k);
11001 +
11002 +- rpmsg->pll11k = devm_clk_get(&pdev->dev, "pll11k");
11003 ++ rpmsg->pll11k = devm_clk_get_optional(&pdev->dev, "pll11k");
11004 + if (IS_ERR(rpmsg->pll11k))
11005 +- rpmsg->pll11k = NULL;
11006 ++ return PTR_ERR(rpmsg->pll11k);
11007 +
11008 + platform_set_drvdata(pdev, rpmsg);
11009 + pm_runtime_enable(&pdev->dev);
11010 +diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
11011 +index 4b7b4a044f81c..255f8df09d84c 100644
11012 +--- a/sound/soc/intel/boards/kbl_da7219_max98927.c
11013 ++++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
11014 +@@ -199,7 +199,7 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
11015 + }
11016 + if (!strcmp(codec_dai->component->name, MAX98373_DEV0_NAME)) {
11017 + ret = snd_soc_dai_set_tdm_slot(codec_dai,
11018 +- 0x03, 3, 8, 24);
11019 ++ 0x30, 3, 8, 16);
11020 + if (ret < 0) {
11021 + dev_err(runtime->dev,
11022 + "DEV0 TDM slot err:%d\n", ret);
11023 +@@ -208,10 +208,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
11024 + }
11025 + if (!strcmp(codec_dai->component->name, MAX98373_DEV1_NAME)) {
11026 + ret = snd_soc_dai_set_tdm_slot(codec_dai,
11027 +- 0x0C, 3, 8, 24);
11028 ++ 0xC0, 3, 8, 16);
11029 + if (ret < 0) {
11030 + dev_err(runtime->dev,
11031 +- "DEV0 TDM slot err:%d\n", ret);
11032 ++ "DEV1 TDM slot err:%d\n", ret);
11033 + return ret;
11034 + }
11035 + }
11036 +@@ -311,24 +311,6 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
11037 + * The above 2 loops are mutually exclusive based on the stream direction,
11038 + * thus rtd_dpcm variable will never be overwritten
11039 + */
11040 +- /*
11041 +- * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE,
11042 +- * where as kblda7219m98927 & kblmax98927 supports S16_LE by default.
11043 +- * Skipping the port wise FE and BE configuration for kblda7219m98373 &
11044 +- * kblmax98373 as the topology (FE & BE) supports S24_LE only.
11045 +- */
11046 +-
11047 +- if (!strcmp(rtd->card->name, "kblda7219m98373") ||
11048 +- !strcmp(rtd->card->name, "kblmax98373")) {
11049 +- /* The ADSP will convert the FE rate to 48k, stereo */
11050 +- rate->min = rate->max = 48000;
11051 +- chan->min = chan->max = DUAL_CHANNEL;
11052 +-
11053 +- /* set SSP to 24 bit */
11054 +- snd_mask_none(fmt);
11055 +- snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE);
11056 +- return 0;
11057 +- }
11058 +
11059 + /*
11060 + * The ADSP will convert the FE rate to 48k, stereo, 24 bit
11061 +@@ -479,31 +461,20 @@ static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
11062 + static int kbl_fe_startup(struct snd_pcm_substream *substream)
11063 + {
11064 + struct snd_pcm_runtime *runtime = substream->runtime;
11065 +- struct snd_soc_pcm_runtime *soc_rt = asoc_substream_to_rtd(substream);
11066 +
11067 + /*
11068 + * On this platform for PCM device we support,
11069 + * 48Khz
11070 + * stereo
11071 ++ * 16 bit audio
11072 + */
11073 +
11074 + runtime->hw.channels_max = DUAL_CHANNEL;
11075 + snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
11076 + &constraints_channels);
11077 +- /*
11078 +- * Setup S24_LE (32 bit container and 24 bit valid data) for
11079 +- * kblda7219m98373 & kblmax98373. For kblda7219m98927 &
11080 +- * kblmax98927 keeping it as 16/16 due to topology FW dependency.
11081 +- */
11082 +- if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
11083 +- !strcmp(soc_rt->card->name, "kblmax98373")) {
11084 +- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
11085 +- snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
11086 +-
11087 +- } else {
11088 +- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
11089 +- snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
11090 +- }
11091 ++
11092 ++ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
11093 ++ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
11094 +
11095 + snd_pcm_hw_constraint_list(runtime, 0,
11096 + SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
11097 +@@ -536,23 +507,11 @@ static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
11098 + static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
11099 + {
11100 + struct snd_pcm_runtime *runtime = substream->runtime;
11101 +- struct snd_soc_pcm_runtime *soc_rt = asoc_substream_to_rtd(substream);
11102 +
11103 + runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
11104 + snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
11105 + &constraints_channels_quad);
11106 +
11107 +- /*
11108 +- * Topology for kblda7219m98373 & kblmax98373 supports only S24_LE.
11109 +- * The DMIC also configured for S24_LE. Forcing the DMIC format to
11110 +- * S24_LE due to the topology FW dependency.
11111 +- */
11112 +- if (!strcmp(soc_rt->card->name, "kblda7219m98373") ||
11113 +- !strcmp(soc_rt->card->name, "kblmax98373")) {
11114 +- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_LE;
11115 +- snd_pcm_hw_constraint_msbits(runtime, 0, 32, 24);
11116 +- }
11117 +-
11118 + return snd_pcm_hw_constraint_list(substream->runtime, 0,
11119 + SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
11120 + }
11121 +diff --git a/sound/soc/intel/common/soc-acpi-intel-cml-match.c b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
11122 +index 7f6ef82299698..4d7a181fb8e6b 100644
11123 +--- a/sound/soc/intel/common/soc-acpi-intel-cml-match.c
11124 ++++ b/sound/soc/intel/common/soc-acpi-intel-cml-match.c
11125 +@@ -75,7 +75,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[] = {
11126 + },
11127 + {
11128 + .id = "DLGS7219",
11129 +- .drv_name = "cml_da7219_max98357a",
11130 ++ .drv_name = "cml_da7219_mx98357a",
11131 + .machine_quirk = snd_soc_acpi_codec_list,
11132 + .quirk_data = &max98390_spk_codecs,
11133 + .sof_fw_filename = "sof-cml.ri",
11134 +diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
11135 +index ba5ff468c265a..741bf2f9e081f 100644
11136 +--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
11137 ++++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
11138 +@@ -87,7 +87,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
11139 + },
11140 + {
11141 + .id = "DLGS7219",
11142 +- .drv_name = "kbl_da7219_max98357a",
11143 ++ .drv_name = "kbl_da7219_mx98357a",
11144 + .fw_filename = "intel/dsp_fw_kbl.bin",
11145 + .machine_quirk = snd_soc_acpi_codec_list,
11146 + .quirk_data = &kbl_7219_98357_codecs,
11147 +diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
11148 +index c0fdab39e7c28..09037d555ec49 100644
11149 +--- a/sound/soc/intel/skylake/skl-topology.c
11150 ++++ b/sound/soc/intel/skylake/skl-topology.c
11151 +@@ -113,7 +113,7 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w,
11152 +
11153 + static void skl_dump_mconfig(struct skl_dev *skl, struct skl_module_cfg *mcfg)
11154 + {
11155 +- struct skl_module_iface *iface = &mcfg->module->formats[0];
11156 ++ struct skl_module_iface *iface = &mcfg->module->formats[mcfg->fmt_idx];
11157 +
11158 + dev_dbg(skl->dev, "Dumping config\n");
11159 + dev_dbg(skl->dev, "Input Format:\n");
11160 +@@ -195,8 +195,8 @@ static void skl_tplg_update_params_fixup(struct skl_module_cfg *m_cfg,
11161 + struct skl_module_fmt *in_fmt, *out_fmt;
11162 +
11163 + /* Fixups will be applied to pin 0 only */
11164 +- in_fmt = &m_cfg->module->formats[0].inputs[0].fmt;
11165 +- out_fmt = &m_cfg->module->formats[0].outputs[0].fmt;
11166 ++ in_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].inputs[0].fmt;
11167 ++ out_fmt = &m_cfg->module->formats[m_cfg->fmt_idx].outputs[0].fmt;
11168 +
11169 + if (params->stream == SNDRV_PCM_STREAM_PLAYBACK) {
11170 + if (is_fe) {
11171 +@@ -239,9 +239,9 @@ static void skl_tplg_update_buffer_size(struct skl_dev *skl,
11172 + /* Since fixups is applied to pin 0 only, ibs, obs needs
11173 + * change for pin 0 only
11174 + */
11175 +- res = &mcfg->module->resources[0];
11176 +- in_fmt = &mcfg->module->formats[0].inputs[0].fmt;
11177 +- out_fmt = &mcfg->module->formats[0].outputs[0].fmt;
11178 ++ res = &mcfg->module->resources[mcfg->res_idx];
11179 ++ in_fmt = &mcfg->module->formats[mcfg->fmt_idx].inputs[0].fmt;
11180 ++ out_fmt = &mcfg->module->formats[mcfg->fmt_idx].outputs[0].fmt;
11181 +
11182 + if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
11183 + multiplier = 5;
11184 +@@ -1463,12 +1463,6 @@ static int skl_tplg_tlv_control_set(struct snd_kcontrol *kcontrol,
11185 + struct skl_dev *skl = get_skl_ctx(w->dapm->dev);
11186 +
11187 + if (ac->params) {
11188 +- /*
11189 +- * Widget data is expected to be stripped of T and L
11190 +- */
11191 +- size -= 2 * sizeof(unsigned int);
11192 +- data += 2;
11193 +-
11194 + if (size > ac->max)
11195 + return -EINVAL;
11196 + ac->size = size;
11197 +@@ -1637,11 +1631,12 @@ int skl_tplg_update_pipe_params(struct device *dev,
11198 + struct skl_module_cfg *mconfig,
11199 + struct skl_pipe_params *params)
11200 + {
11201 +- struct skl_module_res *res = &mconfig->module->resources[0];
11202 ++ struct skl_module_res *res;
11203 + struct skl_dev *skl = get_skl_ctx(dev);
11204 + struct skl_module_fmt *format = NULL;
11205 + u8 cfg_idx = mconfig->pipe->cur_config_idx;
11206 +
11207 ++ res = &mconfig->module->resources[mconfig->res_idx];
11208 + skl_tplg_fill_dma_id(mconfig, params);
11209 + mconfig->fmt_idx = mconfig->mod_cfg[cfg_idx].fmt_idx;
11210 + mconfig->res_idx = mconfig->mod_cfg[cfg_idx].res_idx;
11211 +@@ -1650,9 +1645,9 @@ int skl_tplg_update_pipe_params(struct device *dev,
11212 + return 0;
11213 +
11214 + if (params->stream == SNDRV_PCM_STREAM_PLAYBACK)
11215 +- format = &mconfig->module->formats[0].inputs[0].fmt;
11216 ++ format = &mconfig->module->formats[mconfig->fmt_idx].inputs[0].fmt;
11217 + else
11218 +- format = &mconfig->module->formats[0].outputs[0].fmt;
11219 ++ format = &mconfig->module->formats[mconfig->fmt_idx].outputs[0].fmt;
11220 +
11221 + /* set the hw_params */
11222 + format->s_freq = params->s_freq;
11223 +diff --git a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
11224 +index c4a598cbbdaa1..14e77df06b011 100644
11225 +--- a/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
11226 ++++ b/sound/soc/mediatek/mt8183/mt8183-afe-pcm.c
11227 +@@ -1119,25 +1119,26 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11228 + afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
11229 + if (IS_ERR(afe->regmap)) {
11230 + dev_err(dev, "could not get regmap from parent\n");
11231 +- return PTR_ERR(afe->regmap);
11232 ++ ret = PTR_ERR(afe->regmap);
11233 ++ goto err_pm_disable;
11234 + }
11235 + ret = regmap_attach_dev(dev, afe->regmap, &mt8183_afe_regmap_config);
11236 + if (ret) {
11237 + dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
11238 +- return ret;
11239 ++ goto err_pm_disable;
11240 + }
11241 +
11242 + rstc = devm_reset_control_get(dev, "audiosys");
11243 + if (IS_ERR(rstc)) {
11244 + ret = PTR_ERR(rstc);
11245 + dev_err(dev, "could not get audiosys reset:%d\n", ret);
11246 +- return ret;
11247 ++ goto err_pm_disable;
11248 + }
11249 +
11250 + ret = reset_control_reset(rstc);
11251 + if (ret) {
11252 + dev_err(dev, "failed to trigger audio reset:%d\n", ret);
11253 +- return ret;
11254 ++ goto err_pm_disable;
11255 + }
11256 +
11257 + /* enable clock for regcache get default value from hw */
11258 +@@ -1147,7 +1148,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11259 + ret = regmap_reinit_cache(afe->regmap, &mt8183_afe_regmap_config);
11260 + if (ret) {
11261 + dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
11262 +- return ret;
11263 ++ goto err_pm_disable;
11264 + }
11265 +
11266 + pm_runtime_put_sync(&pdev->dev);
11267 +@@ -1160,8 +1161,10 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11268 + afe->memif_size = MT8183_MEMIF_NUM;
11269 + afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
11270 + GFP_KERNEL);
11271 +- if (!afe->memif)
11272 +- return -ENOMEM;
11273 ++ if (!afe->memif) {
11274 ++ ret = -ENOMEM;
11275 ++ goto err_pm_disable;
11276 ++ }
11277 +
11278 + for (i = 0; i < afe->memif_size; i++) {
11279 + afe->memif[i].data = &memif_data[i];
11280 +@@ -1178,22 +1181,26 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11281 + afe->irqs_size = MT8183_IRQ_NUM;
11282 + afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
11283 + GFP_KERNEL);
11284 +- if (!afe->irqs)
11285 +- return -ENOMEM;
11286 ++ if (!afe->irqs) {
11287 ++ ret = -ENOMEM;
11288 ++ goto err_pm_disable;
11289 ++ }
11290 +
11291 + for (i = 0; i < afe->irqs_size; i++)
11292 + afe->irqs[i].irq_data = &irq_data[i];
11293 +
11294 + /* request irq */
11295 + irq_id = platform_get_irq(pdev, 0);
11296 +- if (irq_id < 0)
11297 +- return irq_id;
11298 ++ if (irq_id < 0) {
11299 ++ ret = irq_id;
11300 ++ goto err_pm_disable;
11301 ++ }
11302 +
11303 + ret = devm_request_irq(dev, irq_id, mt8183_afe_irq_handler,
11304 + IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
11305 + if (ret) {
11306 + dev_err(dev, "could not request_irq for asys-isr\n");
11307 +- return ret;
11308 ++ goto err_pm_disable;
11309 + }
11310 +
11311 + /* init sub_dais */
11312 +@@ -1204,7 +1211,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11313 + if (ret) {
11314 + dev_warn(afe->dev, "dai register i %d fail, ret %d\n",
11315 + i, ret);
11316 +- return ret;
11317 ++ goto err_pm_disable;
11318 + }
11319 + }
11320 +
11321 +@@ -1213,7 +1220,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11322 + if (ret) {
11323 + dev_warn(afe->dev, "mtk_afe_combine_sub_dai fail, ret %d\n",
11324 + ret);
11325 +- return ret;
11326 ++ goto err_pm_disable;
11327 + }
11328 +
11329 + afe->mtk_afe_hardware = &mt8183_afe_hardware;
11330 +@@ -1229,7 +1236,7 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11331 + NULL, 0);
11332 + if (ret) {
11333 + dev_warn(dev, "err_platform\n");
11334 +- return ret;
11335 ++ goto err_pm_disable;
11336 + }
11337 +
11338 + ret = devm_snd_soc_register_component(afe->dev,
11339 +@@ -1238,10 +1245,14 @@ static int mt8183_afe_pcm_dev_probe(struct platform_device *pdev)
11340 + afe->num_dai_drivers);
11341 + if (ret) {
11342 + dev_warn(dev, "err_dai_component\n");
11343 +- return ret;
11344 ++ goto err_pm_disable;
11345 + }
11346 +
11347 + return ret;
11348 ++
11349 ++err_pm_disable:
11350 ++ pm_runtime_disable(&pdev->dev);
11351 ++ return ret;
11352 + }
11353 +
11354 + static int mt8183_afe_pcm_dev_remove(struct platform_device *pdev)
11355 +diff --git a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
11356 +index 7a1724f5ff4c6..31c280339c503 100644
11357 +--- a/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
11358 ++++ b/sound/soc/mediatek/mt8192/mt8192-afe-pcm.c
11359 +@@ -2229,12 +2229,13 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
11360 + afe->regmap = syscon_node_to_regmap(dev->parent->of_node);
11361 + if (IS_ERR(afe->regmap)) {
11362 + dev_err(dev, "could not get regmap from parent\n");
11363 +- return PTR_ERR(afe->regmap);
11364 ++ ret = PTR_ERR(afe->regmap);
11365 ++ goto err_pm_disable;
11366 + }
11367 + ret = regmap_attach_dev(dev, afe->regmap, &mt8192_afe_regmap_config);
11368 + if (ret) {
11369 + dev_warn(dev, "regmap_attach_dev fail, ret %d\n", ret);
11370 +- return ret;
11371 ++ goto err_pm_disable;
11372 + }
11373 +
11374 + /* enable clock for regcache get default value from hw */
11375 +@@ -2244,7 +2245,7 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
11376 + ret = regmap_reinit_cache(afe->regmap, &mt8192_afe_regmap_config);
11377 + if (ret) {
11378 + dev_err(dev, "regmap_reinit_cache fail, ret %d\n", ret);
11379 +- return ret;
11380 ++ goto err_pm_disable;
11381 + }
11382 +
11383 + pm_runtime_put_sync(&pdev->dev);
11384 +@@ -2257,8 +2258,10 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
11385 + afe->memif_size = MT8192_MEMIF_NUM;
11386 + afe->memif = devm_kcalloc(dev, afe->memif_size, sizeof(*afe->memif),
11387 + GFP_KERNEL);
11388 +- if (!afe->memif)
11389 +- return -ENOMEM;
11390 ++ if (!afe->memif) {
11391 ++ ret = -ENOMEM;
11392 ++ goto err_pm_disable;
11393 ++ }
11394 +
11395 + for (i = 0; i < afe->memif_size; i++) {
11396 + afe->memif[i].data = &memif_data[i];
11397 +@@ -2272,22 +2275,26 @@ static int mt8192_afe_pcm_dev_probe(struct platform_device *pdev)
11398 + afe->irqs_size = MT8192_IRQ_NUM;
11399 + afe->irqs = devm_kcalloc(dev, afe->irqs_size, sizeof(*afe->irqs),
11400 + GFP_KERNEL);
11401 +- if (!afe->irqs)
11402 +- return -ENOMEM;
11403 ++ if (!afe->irqs) {
11404 ++ ret = -ENOMEM;
11405 ++ goto err_pm_disable;
11406 ++ }
11407 +
11408 + for (i = 0; i < afe->irqs_size; i++)
11409 + afe->irqs[i].irq_data = &irq_data[i];
11410 +
11411 + /* request irq */
11412 + irq_id = platform_get_irq(pdev, 0);
11413 +- if (irq_id < 0)
11414 +- return irq_id;
11415 ++ if (irq_id < 0) {
11416 ++ ret = irq_id;
11417 ++ goto err_pm_disable;
11418 ++ }
11419 +
11420 + ret = devm_request_irq(dev, irq_id, mt8192_afe_irq_handler,
11421 + IRQF_TRIGGER_NONE, "asys-isr", (void *)afe);
11422 + if (ret) {
11423 + dev_err(dev, "could not request_irq for Afe_ISR_Handle\n");
11424 +- return ret;
11425 ++ goto err_pm_disable;
11426 + }
11427 +
11428 + /* init sub_dais */
11429 +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
11430 +index da4846c9856af..a51bf4c698799 100644
11431 +--- a/tools/bpf/bpftool/prog.c
11432 ++++ b/tools/bpf/bpftool/prog.c
11433 +@@ -778,6 +778,8 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
11434 + kernel_syms_destroy(&dd);
11435 + }
11436 +
11437 ++ btf__free(btf);
11438 ++
11439 + return 0;
11440 + }
11441 +
11442 +@@ -1897,8 +1899,8 @@ static char *profile_target_name(int tgt_fd)
11443 + struct bpf_prog_info_linear *info_linear;
11444 + struct bpf_func_info *func_info;
11445 + const struct btf_type *t;
11446 ++ struct btf *btf = NULL;
11447 + char *name = NULL;
11448 +- struct btf *btf;
11449 +
11450 + info_linear = bpf_program__get_prog_info_linear(
11451 + tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
11452 +@@ -1922,6 +1924,7 @@ static char *profile_target_name(int tgt_fd)
11453 + }
11454 + name = strdup(btf__name_by_offset(btf, t->name_off));
11455 + out:
11456 ++ btf__free(btf);
11457 + free(info_linear);
11458 + return name;
11459 + }
11460 +diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
11461 +index ec6d85a817449..353f06cf210e9 100644
11462 +--- a/tools/include/uapi/linux/bpf.h
11463 ++++ b/tools/include/uapi/linux/bpf.h
11464 +@@ -3222,7 +3222,7 @@ union bpf_attr {
11465 + * long bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
11466 + * Description
11467 + * Select a **SO_REUSEPORT** socket from a
11468 +- * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
11469 ++ * **BPF_MAP_TYPE_REUSEPORT_SOCKARRAY** *map*.
11470 + * It checks the selected socket is matching the incoming
11471 + * request in the socket buffer.
11472 + * Return
11473 +diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
11474 +index e43e1896cb4be..0d9d8ed6512b0 100644
11475 +--- a/tools/lib/bpf/Makefile
11476 ++++ b/tools/lib/bpf/Makefile
11477 +@@ -4,8 +4,9 @@
11478 + RM ?= rm
11479 + srctree = $(abs_srctree)
11480 +
11481 ++VERSION_SCRIPT := libbpf.map
11482 + LIBBPF_VERSION := $(shell \
11483 +- grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \
11484 ++ grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \
11485 + sort -rV | head -n1 | cut -d'_' -f2)
11486 + LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
11487 +
11488 +@@ -110,7 +111,6 @@ SHARED_OBJDIR := $(OUTPUT)sharedobjs/
11489 + STATIC_OBJDIR := $(OUTPUT)staticobjs/
11490 + BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o
11491 + BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o
11492 +-VERSION_SCRIPT := libbpf.map
11493 + BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h
11494 +
11495 + LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET))
11496 +@@ -163,10 +163,10 @@ $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h
11497 +
11498 + $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
11499 +
11500 +-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
11501 ++$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED) $(VERSION_SCRIPT)
11502 + $(QUIET_LINK)$(CC) $(LDFLAGS) \
11503 + --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
11504 +- -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -lz -o $@
11505 ++ -Wl,--version-script=$(VERSION_SCRIPT) $< -lelf -lz -o $@
11506 + @ln -sf $(@F) $(OUTPUT)libbpf.so
11507 + @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
11508 +
11509 +@@ -181,7 +181,7 @@ $(OUTPUT)libbpf.pc:
11510 +
11511 + check: check_abi
11512 +
11513 +-check_abi: $(OUTPUT)libbpf.so
11514 ++check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT)
11515 + @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \
11516 + echo "Warning: Num of global symbols in $(BPF_IN_SHARED)" \
11517 + "($(GLOBAL_SYM_COUNT)) does NOT match with num of" \
11518 +diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
11519 +index c41d9b2b59ace..f6ebda75b0306 100644
11520 +--- a/tools/lib/bpf/libbpf.c
11521 ++++ b/tools/lib/bpf/libbpf.c
11522 +@@ -4409,6 +4409,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
11523 + {
11524 + struct bpf_create_map_attr create_attr;
11525 + struct bpf_map_def *def = &map->def;
11526 ++ int err = 0;
11527 +
11528 + memset(&create_attr, 0, sizeof(create_attr));
11529 +
11530 +@@ -4451,8 +4452,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
11531 +
11532 + if (bpf_map_type__is_map_in_map(def->type)) {
11533 + if (map->inner_map) {
11534 +- int err;
11535 +-
11536 + err = bpf_object__create_map(obj, map->inner_map);
11537 + if (err) {
11538 + pr_warn("map '%s': failed to create inner map: %d\n",
11539 +@@ -4469,8 +4468,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
11540 + if (map->fd < 0 && (create_attr.btf_key_type_id ||
11541 + create_attr.btf_value_type_id)) {
11542 + char *cp, errmsg[STRERR_BUFSIZE];
11543 +- int err = -errno;
11544 +
11545 ++ err = -errno;
11546 + cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
11547 + pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
11548 + map->name, cp, err);
11549 +@@ -4482,15 +4481,14 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
11550 + map->fd = bpf_create_map_xattr(&create_attr);
11551 + }
11552 +
11553 +- if (map->fd < 0)
11554 +- return -errno;
11555 ++ err = map->fd < 0 ? -errno : 0;
11556 +
11557 + if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
11558 + bpf_map__destroy(map->inner_map);
11559 + zfree(&map->inner_map);
11560 + }
11561 +
11562 +- return 0;
11563 ++ return err;
11564 + }
11565 +
11566 + static int init_map_slots(struct bpf_map *map)
11567 +@@ -7365,8 +7363,10 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
11568 + kconfig = OPTS_GET(opts, kconfig, NULL);
11569 + if (kconfig) {
11570 + obj->kconfig = strdup(kconfig);
11571 +- if (!obj->kconfig)
11572 +- return ERR_PTR(-ENOMEM);
11573 ++ if (!obj->kconfig) {
11574 ++ err = -ENOMEM;
11575 ++ goto out;
11576 ++ }
11577 + }
11578 +
11579 + err = bpf_object__elf_init(obj);
11580 +diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
11581 +index cdecda1ddd36e..17a9844e4fbf8 100644
11582 +--- a/tools/perf/util/bpf-event.c
11583 ++++ b/tools/perf/util/bpf-event.c
11584 +@@ -296,7 +296,7 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
11585 +
11586 + out:
11587 + free(info_linear);
11588 +- free(btf);
11589 ++ btf__free(btf);
11590 + return err ? -1 : 0;
11591 + }
11592 +
11593 +@@ -486,7 +486,7 @@ static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
11594 + perf_env__fetch_btf(env, btf_id, btf);
11595 +
11596 + out:
11597 +- free(btf);
11598 ++ btf__free(btf);
11599 + close(fd);
11600 + }
11601 +
11602 +diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
11603 +index 5ed674a2f55e8..a14f0098b343d 100644
11604 +--- a/tools/perf/util/bpf_counter.c
11605 ++++ b/tools/perf/util/bpf_counter.c
11606 +@@ -74,8 +74,8 @@ static char *bpf_target_prog_name(int tgt_fd)
11607 + struct bpf_prog_info_linear *info_linear;
11608 + struct bpf_func_info *func_info;
11609 + const struct btf_type *t;
11610 ++ struct btf *btf = NULL;
11611 + char *name = NULL;
11612 +- struct btf *btf;
11613 +
11614 + info_linear = bpf_program__get_prog_info_linear(
11615 + tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
11616 +@@ -99,6 +99,7 @@ static char *bpf_target_prog_name(int tgt_fd)
11617 + }
11618 + name = strdup(btf__name_by_offset(btf, t->name_off));
11619 + out:
11620 ++ btf__free(btf);
11621 + free(info_linear);
11622 + return name;
11623 + }
11624 +diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
11625 +index 0457ae32b2702..5b7dd3227b785 100644
11626 +--- a/tools/testing/selftests/bpf/prog_tests/btf.c
11627 ++++ b/tools/testing/selftests/bpf/prog_tests/btf.c
11628 +@@ -4384,6 +4384,7 @@ skip:
11629 + fprintf(stderr, "OK");
11630 +
11631 + done:
11632 ++ btf__free(btf);
11633 + free(func_info);
11634 + bpf_object__close(obj);
11635 + }
11636 +diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
11637 +index 54380c5e10692..aa96b604b2b31 100644
11638 +--- a/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
11639 ++++ b/tools/testing/selftests/bpf/progs/bpf_iter_tcp4.c
11640 +@@ -122,7 +122,7 @@ static int dump_tcp_sock(struct seq_file *seq, struct tcp_sock *tp,
11641 + }
11642 +
11643 + BPF_SEQ_PRINTF(seq, "%4d: %08X:%04X %08X:%04X ",
11644 +- seq_num, src, srcp, destp, destp);
11645 ++ seq_num, src, srcp, dest, destp);
11646 + BPF_SEQ_PRINTF(seq, "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d ",
11647 + state,
11648 + tp->write_seq - tp->snd_una, rx_queue,
11649 +diff --git a/tools/testing/selftests/bpf/progs/test_core_autosize.c b/tools/testing/selftests/bpf/progs/test_core_autosize.c
11650 +index 44f5aa2e8956f..9a7829c5e4a72 100644
11651 +--- a/tools/testing/selftests/bpf/progs/test_core_autosize.c
11652 ++++ b/tools/testing/selftests/bpf/progs/test_core_autosize.c
11653 +@@ -125,6 +125,16 @@ int handle_downsize(void *ctx)
11654 + return 0;
11655 + }
11656 +
11657 ++#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11658 ++#define bpf_core_read_int bpf_core_read
11659 ++#else
11660 ++#define bpf_core_read_int(dst, sz, src) ({ \
11661 ++ /* Prevent "subtraction from stack pointer prohibited" */ \
11662 ++ volatile long __off = sizeof(*dst) - (sz); \
11663 ++ bpf_core_read((char *)(dst) + __off, sz, src); \
11664 ++})
11665 ++#endif
11666 ++
11667 + SEC("raw_tp/sys_enter")
11668 + int handle_probed(void *ctx)
11669 + {
11670 +@@ -132,23 +142,23 @@ int handle_probed(void *ctx)
11671 + __u64 tmp;
11672 +
11673 + tmp = 0;
11674 +- bpf_core_read(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
11675 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
11676 + ptr_probed = tmp;
11677 +
11678 + tmp = 0;
11679 +- bpf_core_read(&tmp, bpf_core_field_size(in->val1), &in->val1);
11680 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val1), &in->val1);
11681 + val1_probed = tmp;
11682 +
11683 + tmp = 0;
11684 +- bpf_core_read(&tmp, bpf_core_field_size(in->val2), &in->val2);
11685 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val2), &in->val2);
11686 + val2_probed = tmp;
11687 +
11688 + tmp = 0;
11689 +- bpf_core_read(&tmp, bpf_core_field_size(in->val3), &in->val3);
11690 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val3), &in->val3);
11691 + val3_probed = tmp;
11692 +
11693 + tmp = 0;
11694 +- bpf_core_read(&tmp, bpf_core_field_size(in->val4), &in->val4);
11695 ++ bpf_core_read_int(&tmp, bpf_core_field_size(in->val4), &in->val4);
11696 + val4_probed = tmp;
11697 +
11698 + return 0;
11699 +diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
11700 +index 51adc42b2b40e..aa38dc4a5e85f 100644
11701 +--- a/tools/testing/selftests/bpf/test_maps.c
11702 ++++ b/tools/testing/selftests/bpf/test_maps.c
11703 +@@ -747,8 +747,8 @@ static void test_sockmap(unsigned int tasks, void *data)
11704 + udp = socket(AF_INET, SOCK_DGRAM, 0);
11705 + i = 0;
11706 + err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY);
11707 +- if (!err) {
11708 +- printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n",
11709 ++ if (err) {
11710 ++ printf("Failed socket update SOCK_DGRAM '%i:%i'\n",
11711 + i, udp);
11712 + goto out_sockmap;
11713 + }