Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Wed, 30 Dec 2020 12:53:39
Message-Id: 1609332799.bdd615062cf3d0452d81dffd70baac4ddcebbf5b.mpagano@gentoo
1 commit: bdd615062cf3d0452d81dffd70baac4ddcebbf5b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 30 12:53:19 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 30 12:53:19 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=bdd61506
7
8 Linux patch 5.4.86
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1085_linux-5.4.86.patch | 12955 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 12959 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 377ed61..06423b4 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -383,6 +383,10 @@ Patch: 1084_linux-5.4.85.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.85
23
24 +Patch: 1085_linux-5.4.86.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.86
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1085_linux-5.4.86.patch b/1085_linux-5.4.86.patch
33 new file mode 100644
34 index 0000000..2b46e9a
35 --- /dev/null
36 +++ b/1085_linux-5.4.86.patch
37 @@ -0,0 +1,12955 @@
38 +diff --git a/Documentation/x86/topology.rst b/Documentation/x86/topology.rst
39 +index e29739904e37e..7f58010ea86af 100644
40 +--- a/Documentation/x86/topology.rst
41 ++++ b/Documentation/x86/topology.rst
42 +@@ -41,6 +41,8 @@ Package
43 + Packages contain a number of cores plus shared resources, e.g. DRAM
44 + controller, shared caches etc.
45 +
46 ++Modern systems may also use the term 'Die' for package.
47 ++
48 + AMD nomenclature for package is 'Node'.
49 +
50 + Package-related topology information in the kernel:
51 +@@ -53,11 +55,18 @@ Package-related topology information in the kernel:
52 +
53 + The number of dies in a package. This information is retrieved via CPUID.
54 +
55 ++ - cpuinfo_x86.cpu_die_id:
56 ++
57 ++ The physical ID of the die. This information is retrieved via CPUID.
58 ++
59 + - cpuinfo_x86.phys_proc_id:
60 +
61 + The physical ID of the package. This information is retrieved via CPUID
62 + and deduced from the APIC IDs of the cores in the package.
63 +
64 ++ Modern systems use this value for the socket. There may be multiple
65 ++ packages within a socket. This value may differ from cpu_die_id.
66 ++
67 + - cpuinfo_x86.logical_proc_id:
68 +
69 + The logical ID of the package. As we do not trust BIOSes to enumerate the
70 +diff --git a/Makefile b/Makefile
71 +index a2a2546fcda80..e1a94c8d278e6 100644
72 +--- a/Makefile
73 ++++ b/Makefile
74 +@@ -1,7 +1,7 @@
75 + # SPDX-License-Identifier: GPL-2.0
76 + VERSION = 5
77 + PATCHLEVEL = 4
78 +-SUBLEVEL = 85
79 ++SUBLEVEL = 86
80 + EXTRAVERSION =
81 + NAME = Kleptomaniac Octopus
82 +
83 +diff --git a/arch/Kconfig b/arch/Kconfig
84 +index 84653a823d3b0..a8df66e645442 100644
85 +--- a/arch/Kconfig
86 ++++ b/arch/Kconfig
87 +@@ -131,6 +131,22 @@ config UPROBES
88 + managed by the kernel and kept transparent to the probed
89 + application. )
90 +
91 ++config HAVE_64BIT_ALIGNED_ACCESS
92 ++ def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
93 ++ help
94 ++ Some architectures require 64 bit accesses to be 64 bit
95 ++ aligned, which also requires structs containing 64 bit values
96 ++ to be 64 bit aligned too. This includes some 32 bit
97 ++ architectures which can do 64 bit accesses, as well as 64 bit
98 ++ architectures without unaligned access.
99 ++
100 ++ This symbol should be selected by an architecture if 64 bit
101 ++ accesses are required to be 64 bit aligned in this way even
102 ++ though it is not a 64 bit architecture.
103 ++
104 ++ See Documentation/unaligned-memory-access.txt for more
105 ++ information on the topic of unaligned memory accesses.
106 ++
107 + config HAVE_EFFICIENT_UNALIGNED_ACCESS
108 + bool
109 + help
110 +diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
111 +index 267d0c178e55c..30abb4b64a1b6 100644
112 +--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
113 ++++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
114 +@@ -266,11 +266,6 @@
115 + reg = <0x11000 0x100>;
116 + };
117 +
118 +-&i2c1 {
119 +- compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
120 +- reg = <0x11100 0x100>;
121 +-};
122 +-
123 + &mpic {
124 + reg = <0x20a00 0x2d0>, <0x21070 0x58>;
125 + };
126 +diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
127 +index 682f729ea25e1..c58230fea45f8 100644
128 +--- a/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
129 ++++ b/arch/arm/boot/dts/aspeed-bmc-facebook-tiogapass.dts
130 +@@ -81,11 +81,6 @@
131 + status = "okay";
132 + };
133 +
134 +-&vuart {
135 +- // VUART Host Console
136 +- status = "okay";
137 +-};
138 +-
139 + &uart1 {
140 + // Host Console
141 + status = "okay";
142 +diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
143 +index 22dade6393d06..d1dbe3b6ad5a7 100644
144 +--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
145 ++++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
146 +@@ -22,9 +22,9 @@
147 + #size-cells = <1>;
148 + ranges;
149 +
150 +- vga_memory: framebuffer@7f000000 {
151 ++ vga_memory: framebuffer@9f000000 {
152 + no-map;
153 +- reg = <0x7f000000 0x01000000>;
154 ++ reg = <0x9f000000 0x01000000>; /* 16M */
155 + };
156 + };
157 +
158 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
159 +index 61f068a7b362a..400eaf640fe42 100644
160 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
161 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
162 +@@ -242,6 +242,11 @@
163 + atmel,pins =
164 + <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */
165 + };
166 ++ pinctrl_usb_default: usb_default {
167 ++ atmel,pins =
168 ++ <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
169 ++ AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
170 ++ };
171 + };
172 + };
173 + };
174 +@@ -259,6 +264,8 @@
175 + &pioE 3 GPIO_ACTIVE_LOW
176 + &pioE 4 GPIO_ACTIVE_LOW
177 + >;
178 ++ pinctrl-names = "default";
179 ++ pinctrl-0 = <&pinctrl_usb_default>;
180 + status = "okay";
181 + };
182 +
183 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
184 +index fdfc37d716e01..1d101067371b4 100644
185 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
186 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
187 +@@ -133,6 +133,11 @@
188 + atmel,pins =
189 + <AT91_PIOE 31 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
190 + };
191 ++ pinctrl_usb_default: usb_default {
192 ++ atmel,pins =
193 ++ <AT91_PIOE 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
194 ++ AT91_PIOE 14 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
195 ++ };
196 + pinctrl_key_gpio: key_gpio_0 {
197 + atmel,pins =
198 + <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
199 +@@ -158,6 +163,8 @@
200 + &pioE 11 GPIO_ACTIVE_HIGH
201 + &pioE 14 GPIO_ACTIVE_HIGH
202 + >;
203 ++ pinctrl-names = "default";
204 ++ pinctrl-0 = <&pinctrl_usb_default>;
205 + status = "okay";
206 + };
207 +
208 +diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
209 +index ea024e4b6e095..0121bb0ecde16 100644
210 +--- a/arch/arm/boot/dts/at91sam9rl.dtsi
211 ++++ b/arch/arm/boot/dts/at91sam9rl.dtsi
212 +@@ -278,23 +278,26 @@
213 + atmel,adc-use-res = "highres";
214 +
215 + trigger0 {
216 +- trigger-name = "timer-counter-0";
217 ++ trigger-name = "external-rising";
218 + trigger-value = <0x1>;
219 ++ trigger-external;
220 + };
221 ++
222 + trigger1 {
223 +- trigger-name = "timer-counter-1";
224 +- trigger-value = <0x3>;
225 ++ trigger-name = "external-falling";
226 ++ trigger-value = <0x2>;
227 ++ trigger-external;
228 + };
229 +
230 + trigger2 {
231 +- trigger-name = "timer-counter-2";
232 +- trigger-value = <0x5>;
233 ++ trigger-name = "external-any";
234 ++ trigger-value = <0x3>;
235 ++ trigger-external;
236 + };
237 +
238 + trigger3 {
239 +- trigger-name = "external";
240 +- trigger-value = <0x13>;
241 +- trigger-external;
242 ++ trigger-name = "continuous";
243 ++ trigger-value = <0x6>;
244 + };
245 + };
246 +
247 +diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
248 +index e0db251e253f0..f68baaf58f9e3 100644
249 +--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
250 ++++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
251 +@@ -327,6 +327,8 @@
252 + regulator-name = "vddq_lcd";
253 + regulator-min-microvolt = <1800000>;
254 + regulator-max-microvolt = <1800000>;
255 ++ /* Supplies also GPK and GPJ */
256 ++ regulator-always-on;
257 + };
258 +
259 + ldo8_reg: LDO8 {
260 +@@ -637,11 +639,11 @@
261 + };
262 +
263 + &usbdrd_dwc3_0 {
264 +- dr_mode = "host";
265 ++ dr_mode = "peripheral";
266 + };
267 +
268 + &usbdrd_dwc3_1 {
269 +- dr_mode = "peripheral";
270 ++ dr_mode = "host";
271 + };
272 +
273 + &usbdrd3_0 {
274 +diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
275 +index 369a8a7f21050..481ee99aa9c97 100644
276 +--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
277 ++++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
278 +@@ -560,6 +560,34 @@
279 + interrupt-controller;
280 + #interrupt-cells = <2>;
281 + };
282 ++
283 ++ usb3_1_oc: usb3-1-oc {
284 ++ samsung,pins = "gpk2-4", "gpk2-5";
285 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
286 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
287 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
288 ++ };
289 ++
290 ++ usb3_1_vbusctrl: usb3-1-vbusctrl {
291 ++ samsung,pins = "gpk2-6", "gpk2-7";
292 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
293 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
294 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
295 ++ };
296 ++
297 ++ usb3_0_oc: usb3-0-oc {
298 ++ samsung,pins = "gpk3-0", "gpk3-1";
299 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
300 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
301 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
302 ++ };
303 ++
304 ++ usb3_0_vbusctrl: usb3-0-vbusctrl {
305 ++ samsung,pins = "gpk3-2", "gpk3-3";
306 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
307 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
308 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
309 ++ };
310 + };
311 +
312 + &pinctrl_2 {
313 +diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
314 +index e6f78b1cee7c8..d077373cf872d 100644
315 +--- a/arch/arm/boot/dts/exynos5410.dtsi
316 ++++ b/arch/arm/boot/dts/exynos5410.dtsi
317 +@@ -398,6 +398,8 @@
318 + &usbdrd3_0 {
319 + clocks = <&clock CLK_USBD300>;
320 + clock-names = "usbdrd30";
321 ++ pinctrl-names = "default";
322 ++ pinctrl-0 = <&usb3_0_oc>, <&usb3_0_vbusctrl>;
323 + };
324 +
325 + &usbdrd_phy0 {
326 +@@ -409,6 +411,8 @@
327 + &usbdrd3_1 {
328 + clocks = <&clock CLK_USBD301>;
329 + clock-names = "usbdrd30";
330 ++ pinctrl-names = "default";
331 ++ pinctrl-0 = <&usb3_1_oc>, <&usb3_1_vbusctrl>;
332 + };
333 +
334 + &usbdrd_dwc3_1 {
335 +diff --git a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
336 +index 81c7ebb4b3fbe..6acc8591219a7 100644
337 +--- a/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
338 ++++ b/arch/arm/boot/dts/imx6qdl-kontron-samx6i.dtsi
339 +@@ -551,7 +551,7 @@
340 +
341 + pinctrl_i2c3: i2c3grp {
342 + fsl,pins = <
343 +- MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1
344 ++ MX6QDL_PAD_GPIO_5__I2C3_SCL 0x4001b8b1
345 + MX6QDL_PAD_GPIO_16__I2C3_SDA 0x4001b8b1
346 + >;
347 + };
348 +diff --git a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
349 +index 93909796885a0..b9b698f72b261 100644
350 +--- a/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
351 ++++ b/arch/arm/boot/dts/imx6qdl-wandboard-revd1.dtsi
352 +@@ -166,7 +166,6 @@
353 + MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030
354 + MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030
355 + MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030
356 +- MX6QDL_PAD_GPIO_6__ENET_IRQ 0x000b1
357 + >;
358 + };
359 +
360 +diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
361 +index a24eccc354b95..0f9c71137bed5 100644
362 +--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
363 ++++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
364 +@@ -219,7 +219,7 @@
365 + reg = <0>;
366 +
367 + reset-assert-us = <10000>;
368 +- reset-deassert-us = <30000>;
369 ++ reset-deassert-us = <80000>;
370 + reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>;
371 +
372 + interrupt-parent = <&gpio_intc>;
373 +diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
374 +index d54477b1001ca..84b6ed51099db 100644
375 +--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
376 ++++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
377 +@@ -83,7 +83,7 @@
378 + reg = <0>;
379 +
380 + reset-assert-us = <10000>;
381 +- reset-deassert-us = <30000>;
382 ++ reset-deassert-us = <80000>;
383 + reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>;
384 + };
385 + };
386 +diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
387 +index 9dd307b526048..468ad1b641380 100644
388 +--- a/arch/arm/boot/dts/omap4-panda-es.dts
389 ++++ b/arch/arm/boot/dts/omap4-panda-es.dts
390 +@@ -46,7 +46,7 @@
391 +
392 + button_pins: pinmux_button_pins {
393 + pinctrl-single,pins = <
394 +- OMAP4_IOPAD(0x11b, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
395 ++ OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
396 + >;
397 + };
398 + };
399 +diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
400 +index 2e2c1a7b1d1dc..b05bab57f90a3 100644
401 +--- a/arch/arm/boot/dts/sama5d2.dtsi
402 ++++ b/arch/arm/boot/dts/sama5d2.dtsi
403 +@@ -648,6 +648,7 @@
404 + clocks = <&pmc PMC_TYPE_PERIPHERAL 51>;
405 + #address-cells = <1>;
406 + #size-cells = <1>;
407 ++ no-memory-wc;
408 + ranges = <0 0xf8044000 0x1420>;
409 + };
410 +
411 +@@ -716,7 +717,7 @@
412 +
413 + can0: can@f8054000 {
414 + compatible = "bosch,m_can";
415 +- reg = <0xf8054000 0x4000>, <0x210000 0x4000>;
416 ++ reg = <0xf8054000 0x4000>, <0x210000 0x1c00>;
417 + reg-names = "m_can", "message_ram";
418 + interrupts = <56 IRQ_TYPE_LEVEL_HIGH 7>,
419 + <64 IRQ_TYPE_LEVEL_HIGH 7>;
420 +@@ -938,7 +939,7 @@
421 +
422 + can1: can@fc050000 {
423 + compatible = "bosch,m_can";
424 +- reg = <0xfc050000 0x4000>, <0x210000 0x4000>;
425 ++ reg = <0xfc050000 0x4000>, <0x210000 0x3800>;
426 + reg-names = "m_can", "message_ram";
427 + interrupts = <57 IRQ_TYPE_LEVEL_HIGH 7>,
428 + <65 IRQ_TYPE_LEVEL_HIGH 7>;
429 +@@ -948,7 +949,7 @@
430 + assigned-clocks = <&pmc PMC_TYPE_GCK 57>;
431 + assigned-clock-parents = <&pmc PMC_TYPE_CORE PMC_UTMI>;
432 + assigned-clock-rates = <40000000>;
433 +- bosch,mram-cfg = <0x1100 0 0 64 0 0 32 32>;
434 ++ bosch,mram-cfg = <0x1c00 0 0 64 0 0 32 32>;
435 + status = "disabled";
436 + };
437 +
438 +diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
439 +index bb3987e101c29..0b3d9ae756503 100644
440 +--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
441 ++++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
442 +@@ -132,7 +132,7 @@
443 + pinctrl-names = "default";
444 + pinctrl-0 = <&gmac_rgmii_pins>;
445 + phy-handle = <&phy1>;
446 +- phy-mode = "rgmii";
447 ++ phy-mode = "rgmii-id";
448 + phy-supply = <&reg_gmac_3v3>;
449 + status = "okay";
450 + };
451 +diff --git a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
452 +index fce2f7fcd084a..bf38c66c1815b 100644
453 +--- a/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
454 ++++ b/arch/arm/boot/dts/sun7i-a20-pcduino3-nano.dts
455 +@@ -1,5 +1,5 @@
456 + /*
457 +- * Copyright 2015 Adam Sampson <ats@×××××.org>
458 ++ * Copyright 2015-2020 Adam Sampson <ats@×××××.org>
459 + *
460 + * This file is dual-licensed: you can use it either under the terms
461 + * of the GPL or the X11 license, at your option. Note that this dual
462 +@@ -115,7 +115,7 @@
463 + pinctrl-names = "default";
464 + pinctrl-0 = <&gmac_rgmii_pins>;
465 + phy-handle = <&phy1>;
466 +- phy-mode = "rgmii";
467 ++ phy-mode = "rgmii-id";
468 + status = "okay";
469 + };
470 +
471 +diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
472 +index 2abcba35d27e6..50c32cf72c65c 100644
473 +--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
474 ++++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
475 +@@ -423,7 +423,7 @@
476 + gic: interrupt-controller@1c81000 {
477 + compatible = "arm,gic-400";
478 + reg = <0x01c81000 0x1000>,
479 +- <0x01c82000 0x1000>,
480 ++ <0x01c82000 0x2000>,
481 + <0x01c84000 0x2000>,
482 + <0x01c86000 0x2000>;
483 + interrupt-controller;
484 +diff --git a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
485 +index 15c22b06fc4b6..47954551f5735 100644
486 +--- a/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
487 ++++ b/arch/arm/boot/dts/sun8i-v40-bananapi-m2-berry.dts
488 +@@ -120,7 +120,7 @@
489 + pinctrl-names = "default";
490 + pinctrl-0 = <&gmac_rgmii_pins>;
491 + phy-handle = <&phy1>;
492 +- phy-mode = "rgmii";
493 ++ phy-mode = "rgmii-id";
494 + phy-supply = <&reg_dc1sw>;
495 + status = "okay";
496 + };
497 +@@ -198,16 +198,16 @@
498 + };
499 +
500 + &reg_dc1sw {
501 +- regulator-min-microvolt = <3000000>;
502 +- regulator-max-microvolt = <3000000>;
503 ++ regulator-min-microvolt = <3300000>;
504 ++ regulator-max-microvolt = <3300000>;
505 + regulator-name = "vcc-gmac-phy";
506 + };
507 +
508 + &reg_dcdc1 {
509 + regulator-always-on;
510 +- regulator-min-microvolt = <3000000>;
511 +- regulator-max-microvolt = <3000000>;
512 +- regulator-name = "vcc-3v0";
513 ++ regulator-min-microvolt = <3300000>;
514 ++ regulator-max-microvolt = <3300000>;
515 ++ regulator-name = "vcc-3v3";
516 + };
517 +
518 + &reg_dcdc2 {
519 +diff --git a/arch/arm/crypto/aes-ce-core.S b/arch/arm/crypto/aes-ce-core.S
520 +index 4d1707388d941..312428d83eedb 100644
521 +--- a/arch/arm/crypto/aes-ce-core.S
522 ++++ b/arch/arm/crypto/aes-ce-core.S
523 +@@ -386,20 +386,32 @@ ENTRY(ce_aes_ctr_encrypt)
524 + .Lctrloop4x:
525 + subs r4, r4, #4
526 + bmi .Lctr1x
527 +- add r6, r6, #1
528 ++
529 ++ /*
530 ++ * NOTE: the sequence below has been carefully tweaked to avoid
531 ++ * a silicon erratum that exists in Cortex-A57 (#1742098) and
532 ++ * Cortex-A72 (#1655431) cores, where AESE/AESMC instruction pairs
533 ++ * may produce an incorrect result if they take their input from a
534 ++ * register of which a single 32-bit lane has been updated the last
535 ++ * time it was modified. To work around this, the lanes of registers
536 ++ * q0-q3 below are not manipulated individually, and the different
537 ++ * counter values are prepared by successive manipulations of q7.
538 ++ */
539 ++ add ip, r6, #1
540 + vmov q0, q7
541 ++ rev ip, ip
542 ++ add lr, r6, #2
543 ++ vmov s31, ip @ set lane 3 of q1 via q7
544 ++ add ip, r6, #3
545 ++ rev lr, lr
546 + vmov q1, q7
547 +- rev ip, r6
548 +- add r6, r6, #1
549 ++ vmov s31, lr @ set lane 3 of q2 via q7
550 ++ rev ip, ip
551 + vmov q2, q7
552 +- vmov s7, ip
553 +- rev ip, r6
554 +- add r6, r6, #1
555 ++ vmov s31, ip @ set lane 3 of q3 via q7
556 ++ add r6, r6, #4
557 + vmov q3, q7
558 +- vmov s11, ip
559 +- rev ip, r6
560 +- add r6, r6, #1
561 +- vmov s15, ip
562 ++
563 + vld1.8 {q4-q5}, [r1]!
564 + vld1.8 {q6}, [r1]!
565 + vld1.8 {q15}, [r1]!
566 +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
567 +index c49b39340ddbd..f1cdc1f369575 100644
568 +--- a/arch/arm/kernel/head.S
569 ++++ b/arch/arm/kernel/head.S
570 +@@ -671,12 +671,8 @@ ARM_BE8(rev16 ip, ip)
571 + ldrcc r7, [r4], #4 @ use branch for delay slot
572 + bcc 1b
573 + bx lr
574 +-#else
575 +-#ifdef CONFIG_CPU_ENDIAN_BE8
576 +- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
577 + #else
578 + moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
579 +-#endif
580 + b 2f
581 + 1: ldr ip, [r7, r3]
582 + #ifdef CONFIG_CPU_ENDIAN_BE8
583 +@@ -685,7 +681,7 @@ ARM_BE8(rev16 ip, ip)
584 + tst ip, #0x000f0000 @ check the rotation field
585 + orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
586 + biceq ip, ip, #0x00004000 @ clear bit 22
587 +- orreq ip, ip, r0 @ mask in offset bits 7-0
588 ++ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0
589 + #else
590 + bic ip, ip, #0x000000ff
591 + tst ip, #0xf00 @ check the rotation field
592 +diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
593 +index 933b6930f024f..a0ca5e7a68de2 100644
594 +--- a/arch/arm/mach-sunxi/sunxi.c
595 ++++ b/arch/arm/mach-sunxi/sunxi.c
596 +@@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = {
597 + "allwinner,sun8i-h2-plus",
598 + "allwinner,sun8i-h3",
599 + "allwinner,sun8i-r40",
600 ++ "allwinner,sun8i-v3",
601 + "allwinner,sun8i-v3s",
602 + NULL,
603 + };
604 +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
605 +index 17155fb73fce9..c48125bf9d1e3 100644
606 +--- a/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
607 ++++ b/arch/arm64/boot/dts/amlogic/meson-g12a-x96-max.dts
608 +@@ -340,7 +340,7 @@
609 + eee-broken-1000t;
610 +
611 + reset-assert-us = <10000>;
612 +- reset-deassert-us = <30000>;
613 ++ reset-deassert-us = <80000>;
614 + reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>;
615 +
616 + interrupt-parent = <&gpio_intc>;
617 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
618 +index 233eb1cd79671..d94b695916a35 100644
619 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
620 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts
621 +@@ -165,7 +165,7 @@
622 + reg = <0>;
623 +
624 + reset-assert-us = <10000>;
625 +- reset-deassert-us = <30000>;
626 ++ reset-deassert-us = <80000>;
627 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
628 +
629 + interrupt-parent = <&gpio_intc>;
630 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
631 +index b0b12e3898350..8828acb3fd4c5 100644
632 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
633 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
634 +@@ -138,7 +138,7 @@
635 + reg = <0>;
636 +
637 + reset-assert-us = <10000>;
638 +- reset-deassert-us = <30000>;
639 ++ reset-deassert-us = <80000>;
640 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
641 +
642 + interrupt-parent = <&gpio_intc>;
643 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
644 +index 43b11e3dfe119..29976215e1446 100644
645 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
646 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi
647 +@@ -126,7 +126,7 @@
648 + reg = <0>;
649 +
650 + reset-assert-us = <10000>;
651 +- reset-deassert-us = <30000>;
652 ++ reset-deassert-us = <80000>;
653 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
654 +
655 + interrupt-parent = <&gpio_intc>;
656 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
657 +index 4c539881fbb73..e3d17569d98ad 100644
658 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
659 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi
660 +@@ -147,7 +147,7 @@
661 + reg = <0>;
662 +
663 + reset-assert-us = <10000>;
664 +- reset-deassert-us = <30000>;
665 ++ reset-deassert-us = <80000>;
666 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
667 + };
668 + };
669 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
670 +index b08c4537f260d..b2ab05c220903 100644
671 +--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
672 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905d-p230.dts
673 +@@ -82,7 +82,7 @@
674 +
675 + /* External PHY reset is shared with internal PHY Led signal */
676 + reset-assert-us = <10000>;
677 +- reset-deassert-us = <30000>;
678 ++ reset-deassert-us = <80000>;
679 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
680 +
681 + interrupt-parent = <&gpio_intc>;
682 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
683 +index 3f43716d5c453..c8a4205117f15 100644
684 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
685 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
686 +@@ -251,7 +251,7 @@
687 + reg = <0>;
688 +
689 + reset-assert-us = <10000>;
690 +- reset-deassert-us = <30000>;
691 ++ reset-deassert-us = <80000>;
692 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
693 +
694 + interrupt-parent = <&gpio_intc>;
695 +@@ -395,7 +395,7 @@
696 + #size-cells = <1>;
697 + compatible = "winbond,w25q16", "jedec,spi-nor";
698 + reg = <0>;
699 +- spi-max-frequency = <3000000>;
700 ++ spi-max-frequency = <104000000>;
701 + };
702 + };
703 +
704 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
705 +index c2bd4dbbf38c5..8dccf91d68da7 100644
706 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
707 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts
708 +@@ -112,7 +112,7 @@
709 + max-speed = <1000>;
710 +
711 + reset-assert-us = <10000>;
712 +- reset-deassert-us = <30000>;
713 ++ reset-deassert-us = <80000>;
714 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
715 + };
716 + };
717 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
718 +index ea45ae0c71b7f..8edbfe040805c 100644
719 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
720 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-q200.dts
721 +@@ -64,7 +64,7 @@
722 +
723 + /* External PHY reset is shared with internal PHY Led signal */
724 + reset-assert-us = <10000>;
725 +- reset-deassert-us = <30000>;
726 ++ reset-deassert-us = <80000>;
727 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
728 +
729 + interrupt-parent = <&gpio_intc>;
730 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
731 +index 5cd4d35006d09..f72d29e33a9e4 100644
732 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
733 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts
734 +@@ -114,7 +114,7 @@
735 + max-speed = <1000>;
736 +
737 + reset-assert-us = <10000>;
738 +- reset-deassert-us = <30000>;
739 ++ reset-deassert-us = <80000>;
740 + reset-gpios = <&gpio GPIOZ_14 GPIO_ACTIVE_LOW>;
741 + };
742 + };
743 +diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
744 +index 521573f3a5bab..8ba3555ca3693 100644
745 +--- a/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
746 ++++ b/arch/arm64/boot/dts/amlogic/meson-sm1.dtsi
747 +@@ -90,7 +90,7 @@
748 + opp-microvolt = <790000>;
749 + };
750 +
751 +- opp-1512000000 {
752 ++ opp-1500000000 {
753 + opp-hz = /bits/ 64 <1500000000>;
754 + opp-microvolt = <800000>;
755 + };
756 +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
757 +index 0821489a874de..25549d9552ae2 100644
758 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
759 ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
760 +@@ -90,8 +90,10 @@
761 + };
762 +
763 + psci {
764 +- compatible = "arm,psci-0.2";
765 ++ compatible = "arm,psci";
766 + method = "smc";
767 ++ cpu_off = <0x84000002>;
768 ++ cpu_on = <0xC4000003>;
769 + };
770 +
771 + soc: soc {
772 +@@ -494,13 +496,6 @@
773 + pmu_system_controller: system-controller@105c0000 {
774 + compatible = "samsung,exynos7-pmu", "syscon";
775 + reg = <0x105c0000 0x5000>;
776 +-
777 +- reboot: syscon-reboot {
778 +- compatible = "syscon-reboot";
779 +- regmap = <&pmu_system_controller>;
780 +- offset = <0x0400>;
781 +- mask = <0x1>;
782 +- };
783 + };
784 +
785 + rtc: rtc@10590000 {
786 +@@ -650,3 +645,4 @@
787 + };
788 +
789 + #include "exynos7-pinctrl.dtsi"
790 ++#include "arm/exynos-syscon-restart.dtsi"
791 +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
792 +index 9589b15693d6e..795d6ca4bbd1f 100644
793 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
794 ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi
795 +@@ -673,7 +673,7 @@
796 + ethernet@0,4 {
797 + compatible = "fsl,enetc-ptp";
798 + reg = <0x000400 0 0 0 0>;
799 +- clocks = <&clockgen 4 0>;
800 ++ clocks = <&clockgen 2 3>;
801 + little-endian;
802 + };
803 + };
804 +diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
805 +index c3668187b8446..aa52927e2e9c2 100644
806 +--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
807 ++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
808 +@@ -144,7 +144,7 @@
809 + pinctrl-names = "default";
810 + pinctrl-0 = <&rgmii_pins>;
811 + phy-mode = "rgmii-id";
812 +- phy = <&phy1>;
813 ++ phy-handle = <&phy1>;
814 + status = "okay";
815 + };
816 +
817 +diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
818 +index 78f7e6e50beb0..0821754f0fd6d 100644
819 +--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
820 ++++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
821 +@@ -144,7 +144,7 @@
822 + nvidia,schmitt = <TEGRA_PIN_DISABLE>;
823 + nvidia,lpdr = <TEGRA_PIN_ENABLE>;
824 + nvidia,enable-input = <TEGRA_PIN_DISABLE>;
825 +- nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>;
826 ++ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
827 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
828 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
829 + };
830 +@@ -156,7 +156,7 @@
831 + nvidia,schmitt = <TEGRA_PIN_DISABLE>;
832 + nvidia,lpdr = <TEGRA_PIN_ENABLE>;
833 + nvidia,enable-input = <TEGRA_PIN_ENABLE>;
834 +- nvidia,io-high-voltage = <TEGRA_PIN_ENABLE>;
835 ++ nvidia,io-hv = <TEGRA_PIN_ENABLE>;
836 + nvidia,tristate = <TEGRA_PIN_DISABLE>;
837 + nvidia,pull = <TEGRA_PIN_PULL_NONE>;
838 + };
839 +diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
840 +index ded120d3aef58..f539b3655f6b9 100644
841 +--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
842 ++++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
843 +@@ -244,23 +244,28 @@
844 + status = "okay";
845 + clock-frequency = <400000>;
846 +
847 +- hid@15 {
848 ++ tsel: hid@15 {
849 + compatible = "hid-over-i2c";
850 + reg = <0x15>;
851 + hid-descr-addr = <0x1>;
852 +
853 +- interrupts-extended = <&tlmm 37 IRQ_TYPE_EDGE_RISING>;
854 ++ interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
855 ++
856 ++ pinctrl-names = "default";
857 ++ pinctrl-0 = <&i2c3_hid_active>;
858 + };
859 +
860 +- hid@2c {
861 ++ tsc2: hid@2c {
862 + compatible = "hid-over-i2c";
863 + reg = <0x2c>;
864 + hid-descr-addr = <0x20>;
865 +
866 +- interrupts-extended = <&tlmm 37 IRQ_TYPE_EDGE_RISING>;
867 ++ interrupts-extended = <&tlmm 37 IRQ_TYPE_LEVEL_HIGH>;
868 +
869 + pinctrl-names = "default";
870 +- pinctrl-0 = <&i2c2_hid_active>;
871 ++ pinctrl-0 = <&i2c3_hid_active>;
872 ++
873 ++ status = "disabled";
874 + };
875 + };
876 +
877 +@@ -268,15 +273,15 @@
878 + status = "okay";
879 + clock-frequency = <400000>;
880 +
881 +- hid@10 {
882 ++ tsc1: hid@10 {
883 + compatible = "hid-over-i2c";
884 + reg = <0x10>;
885 + hid-descr-addr = <0x1>;
886 +
887 +- interrupts-extended = <&tlmm 125 IRQ_TYPE_EDGE_FALLING>;
888 ++ interrupts-extended = <&tlmm 125 IRQ_TYPE_LEVEL_LOW>;
889 +
890 + pinctrl-names = "default";
891 +- pinctrl-0 = <&i2c6_hid_active>;
892 ++ pinctrl-0 = <&i2c5_hid_active>;
893 + };
894 + };
895 +
896 +@@ -284,7 +289,7 @@
897 + status = "okay";
898 + clock-frequency = <400000>;
899 +
900 +- hid@5c {
901 ++ ecsh: hid@5c {
902 + compatible = "hid-over-i2c";
903 + reg = <0x5c>;
904 + hid-descr-addr = <0x1>;
905 +@@ -292,7 +297,7 @@
906 + interrupts-extended = <&tlmm 92 IRQ_TYPE_LEVEL_LOW>;
907 +
908 + pinctrl-names = "default";
909 +- pinctrl-0 = <&i2c12_hid_active>;
910 ++ pinctrl-0 = <&i2c11_hid_active>;
911 + };
912 + };
913 +
914 +@@ -335,7 +340,7 @@
915 + &tlmm {
916 + gpio-reserved-ranges = <0 4>, <81 4>;
917 +
918 +- i2c2_hid_active: i2c2-hid-active {
919 ++ i2c3_hid_active: i2c2-hid-active {
920 + pins = <37>;
921 + function = "gpio";
922 +
923 +@@ -344,7 +349,7 @@
924 + drive-strength = <2>;
925 + };
926 +
927 +- i2c6_hid_active: i2c6-hid-active {
928 ++ i2c5_hid_active: i2c5-hid-active {
929 + pins = <125>;
930 + function = "gpio";
931 +
932 +@@ -353,7 +358,7 @@
933 + drive-strength = <2>;
934 + };
935 +
936 +- i2c12_hid_active: i2c12-hid-active {
937 ++ i2c11_hid_active: i2c11-hid-active {
938 + pins = <92>;
939 + function = "gpio";
940 +
941 +diff --git a/arch/arm64/boot/dts/renesas/cat875.dtsi b/arch/arm64/boot/dts/renesas/cat875.dtsi
942 +index aaefc3ae56d50..dbdb8b093e733 100644
943 +--- a/arch/arm64/boot/dts/renesas/cat875.dtsi
944 ++++ b/arch/arm64/boot/dts/renesas/cat875.dtsi
945 +@@ -22,7 +22,6 @@
946 + status = "okay";
947 +
948 + phy0: ethernet-phy@0 {
949 +- rxc-skew-ps = <1500>;
950 + reg = <0>;
951 + interrupt-parent = <&gpio2>;
952 + interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
953 +diff --git a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
954 +index 4280b190dc682..6a001cdfd38e2 100644
955 +--- a/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
956 ++++ b/arch/arm64/boot/dts/renesas/hihope-rzg2-ex.dtsi
957 +@@ -23,7 +23,6 @@
958 + status = "okay";
959 +
960 + phy0: ethernet-phy@0 {
961 +- rxc-skew-ps = <1500>;
962 + reg = <0>;
963 + interrupt-parent = <&gpio2>;
964 + interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
965 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
966 +index bb40c163b05dc..6c3368f795ca3 100644
967 +--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
968 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
969 +@@ -333,6 +333,7 @@
970 + };
971 +
972 + &usb20_otg {
973 ++ dr_mode = "host";
974 + status = "okay";
975 + };
976 +
977 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
978 +index 31cc1541f1f59..e0ed323935a4d 100644
979 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
980 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
981 +@@ -1190,8 +1190,8 @@
982 +
983 + uart0 {
984 + uart0_xfer: uart0-xfer {
985 +- rockchip,pins = <1 RK_PB1 1 &pcfg_pull_up>,
986 +- <1 RK_PB0 1 &pcfg_pull_none>;
987 ++ rockchip,pins = <1 RK_PB1 1 &pcfg_pull_none>,
988 ++ <1 RK_PB0 1 &pcfg_pull_up>;
989 + };
990 +
991 + uart0_cts: uart0-cts {
992 +@@ -1209,8 +1209,8 @@
993 +
994 + uart1 {
995 + uart1_xfer: uart1-xfer {
996 +- rockchip,pins = <3 RK_PA4 4 &pcfg_pull_up>,
997 +- <3 RK_PA6 4 &pcfg_pull_none>;
998 ++ rockchip,pins = <3 RK_PA4 4 &pcfg_pull_none>,
999 ++ <3 RK_PA6 4 &pcfg_pull_up>;
1000 + };
1001 +
1002 + uart1_cts: uart1-cts {
1003 +@@ -1228,15 +1228,15 @@
1004 +
1005 + uart2-0 {
1006 + uart2m0_xfer: uart2m0-xfer {
1007 +- rockchip,pins = <1 RK_PA0 2 &pcfg_pull_up>,
1008 +- <1 RK_PA1 2 &pcfg_pull_none>;
1009 ++ rockchip,pins = <1 RK_PA0 2 &pcfg_pull_none>,
1010 ++ <1 RK_PA1 2 &pcfg_pull_up>;
1011 + };
1012 + };
1013 +
1014 + uart2-1 {
1015 + uart2m1_xfer: uart2m1-xfer {
1016 +- rockchip,pins = <2 RK_PA0 1 &pcfg_pull_up>,
1017 +- <2 RK_PA1 1 &pcfg_pull_none>;
1018 ++ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
1019 ++ <2 RK_PA1 1 &pcfg_pull_up>;
1020 + };
1021 + };
1022 +
1023 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
1024 +index 7140701f65f91..dfa6dc4575bec 100644
1025 +--- a/arch/arm64/include/asm/kvm_host.h
1026 ++++ b/arch/arm64/include/asm/kvm_host.h
1027 +@@ -182,6 +182,7 @@ enum vcpu_sysreg {
1028 + #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
1029 + #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
1030 + #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
1031 ++#define c2_TTBCR2 (c2_TTBCR + 1) /* Translation Table Base Control R. 2 */
1032 + #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
1033 + #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
1034 + #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
1035 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
1036 +index 1457a0ba83dbc..f2d2dbbbfca20 100644
1037 +--- a/arch/arm64/kernel/syscall.c
1038 ++++ b/arch/arm64/kernel/syscall.c
1039 +@@ -102,8 +102,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
1040 + regs->syscallno = scno;
1041 +
1042 + cortex_a76_erratum_1463225_svc_handler();
1043 ++ user_exit_irqoff();
1044 + local_daif_restore(DAIF_PROCCTX);
1045 +- user_exit();
1046 +
1047 + if (has_syscall_work(flags)) {
1048 + /* set default errno for user-issued syscall(-1) */
1049 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
1050 +index f1f4f42e8ef46..6478635ff2142 100644
1051 +--- a/arch/arm64/kvm/sys_regs.c
1052 ++++ b/arch/arm64/kvm/sys_regs.c
1053 +@@ -1837,6 +1837,7 @@ static const struct sys_reg_desc cp15_regs[] = {
1054 + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
1055 + { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
1056 + { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
1057 ++ { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
1058 + { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
1059 + { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
1060 + { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
1061 +diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
1062 +index 6889f74e06f54..490bb6da74b7e 100644
1063 +--- a/arch/mips/bcm47xx/Kconfig
1064 ++++ b/arch/mips/bcm47xx/Kconfig
1065 +@@ -27,6 +27,7 @@ config BCM47XX_BCMA
1066 + select BCMA
1067 + select BCMA_HOST_SOC
1068 + select BCMA_DRIVER_MIPS
1069 ++ select BCMA_DRIVER_PCI if PCI
1070 + select BCMA_DRIVER_PCI_HOSTMODE if PCI
1071 + select BCMA_DRIVER_GPIO
1072 + default y
1073 +diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
1074 +index b8884de89c81e..82e44b31aad59 100644
1075 +--- a/arch/mips/kernel/setup.c
1076 ++++ b/arch/mips/kernel/setup.c
1077 +@@ -529,8 +529,8 @@ static void __init request_crashkernel(struct resource *res)
1078 +
1079 + static void __init check_kernel_sections_mem(void)
1080 + {
1081 +- phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
1082 +- phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
1083 ++ phys_addr_t start = __pa_symbol(&_text);
1084 ++ phys_addr_t size = __pa_symbol(&_end) - start;
1085 +
1086 + if (!memblock_is_region_memory(start, size)) {
1087 + pr_info("Kernel sections are not in the memory maps\n");
1088 +diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
1089 +index 7b6349be621a3..5325bd9c9b47b 100644
1090 +--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
1091 ++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
1092 +@@ -557,9 +557,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
1093 + if (pte_val(*ptep) & _PAGE_HASHPTE)
1094 + flush_hash_entry(mm, ptep, addr);
1095 + __asm__ __volatile__("\
1096 +- stw%U0%X0 %2,%0\n\
1097 ++ stw%X0 %2,%0\n\
1098 + eieio\n\
1099 +- stw%U0%X0 %L2,%1"
1100 ++ stw%X1 %L2,%1"
1101 + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
1102 + : "r" (pte) : "memory");
1103 +
1104 +diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h
1105 +index a116fe9317892..3bdd74739cb88 100644
1106 +--- a/arch/powerpc/include/asm/cpm1.h
1107 ++++ b/arch/powerpc/include/asm/cpm1.h
1108 +@@ -68,6 +68,7 @@ extern void cpm_reset(void);
1109 + #define PROFF_SPI ((uint)0x0180)
1110 + #define PROFF_SCC3 ((uint)0x0200)
1111 + #define PROFF_SMC1 ((uint)0x0280)
1112 ++#define PROFF_DSP1 ((uint)0x02c0)
1113 + #define PROFF_SCC4 ((uint)0x0300)
1114 + #define PROFF_SMC2 ((uint)0x0380)
1115 +
1116 +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
1117 +index cf00ff0d121de..235911fb0e24f 100644
1118 +--- a/arch/powerpc/include/asm/cputable.h
1119 ++++ b/arch/powerpc/include/asm/cputable.h
1120 +@@ -367,7 +367,7 @@ static inline void cpu_feature_keys_init(void) { }
1121 + CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
1122 + #define CPU_FTRS_82XX (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_NOEXECUTE)
1123 + #define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
1124 +- CPU_FTR_MAYBE_CAN_NAP)
1125 ++ CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NOEXECUTE)
1126 + #define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
1127 + CPU_FTR_MAYBE_CAN_NAP | \
1128 + CPU_FTR_COMMON | CPU_FTR_NOEXECUTE)
1129 +@@ -407,7 +407,6 @@ static inline void cpu_feature_keys_init(void) { }
1130 + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
1131 + CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
1132 + CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
1133 +-#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
1134 +
1135 + /* 64-bit CPUs */
1136 + #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \
1137 +@@ -507,8 +506,6 @@ enum {
1138 + CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
1139 + CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
1140 + CPU_FTRS_CLASSIC32 |
1141 +-#else
1142 +- CPU_FTRS_GENERIC_32 |
1143 + #endif
1144 + #ifdef CONFIG_PPC_8xx
1145 + CPU_FTRS_8XX |
1146 +@@ -585,8 +582,6 @@ enum {
1147 + CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
1148 + CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
1149 + CPU_FTRS_CLASSIC32 &
1150 +-#else
1151 +- CPU_FTRS_GENERIC_32 &
1152 + #endif
1153 + #ifdef CONFIG_PPC_8xx
1154 + CPU_FTRS_8XX &
1155 +diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
1156 +index 7fed9dc0f147a..3d2a78ab051a7 100644
1157 +--- a/arch/powerpc/include/asm/nohash/pgtable.h
1158 ++++ b/arch/powerpc/include/asm/nohash/pgtable.h
1159 +@@ -199,9 +199,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
1160 + */
1161 + if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
1162 + __asm__ __volatile__("\
1163 +- stw%U0%X0 %2,%0\n\
1164 ++ stw%X0 %2,%0\n\
1165 + eieio\n\
1166 +- stw%U0%X0 %L2,%1"
1167 ++ stw%X1 %L2,%1"
1168 + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
1169 + : "r" (pte) : "memory");
1170 + return;
1171 +diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
1172 +index 59260eb962916..afbd47b0a75cc 100644
1173 +--- a/arch/powerpc/kernel/Makefile
1174 ++++ b/arch/powerpc/kernel/Makefile
1175 +@@ -181,6 +181,9 @@ KCOV_INSTRUMENT_cputable.o := n
1176 + KCOV_INSTRUMENT_setup_64.o := n
1177 + KCOV_INSTRUMENT_paca.o := n
1178 +
1179 ++CFLAGS_setup_64.o += -fno-stack-protector
1180 ++CFLAGS_paca.o += -fno-stack-protector
1181 ++
1182 + extra-$(CONFIG_PPC_FPU) += fpu.o
1183 + extra-$(CONFIG_ALTIVEC) += vector.o
1184 + extra-$(CONFIG_PPC64) += entry_64.o
1185 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
1186 +index 780f527eabd2c..9019f1395d39a 100644
1187 +--- a/arch/powerpc/kernel/head_64.S
1188 ++++ b/arch/powerpc/kernel/head_64.S
1189 +@@ -420,6 +420,10 @@ generic_secondary_common_init:
1190 + /* From now on, r24 is expected to be logical cpuid */
1191 + mr r24,r5
1192 +
1193 ++ /* Create a temp kernel stack for use before relocation is on. */
1194 ++ ld r1,PACAEMERGSP(r13)
1195 ++ subi r1,r1,STACK_FRAME_OVERHEAD
1196 ++
1197 + /* See if we need to call a cpu state restore handler */
1198 + LOAD_REG_ADDR(r23, cur_cpu_spec)
1199 + ld r23,0(r23)
1200 +@@ -448,10 +452,6 @@ generic_secondary_common_init:
1201 + sync /* order paca.run and cur_cpu_spec */
1202 + isync /* In case code patching happened */
1203 +
1204 +- /* Create a temp kernel stack for use before relocation is on. */
1205 +- ld r1,PACAEMERGSP(r13)
1206 +- subi r1,r1,STACK_FRAME_OVERHEAD
1207 +-
1208 + b __secondary_start
1209 + #endif /* SMP */
1210 +
1211 +@@ -992,7 +992,7 @@ start_here_common:
1212 + bl start_kernel
1213 +
1214 + /* Not reached */
1215 +- trap
1216 ++0: trap
1217 + EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
1218 +
1219 + /*
1220 +diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
1221 +index 4ea0cca52e162..c786adfb9413f 100644
1222 +--- a/arch/powerpc/kernel/paca.c
1223 ++++ b/arch/powerpc/kernel/paca.c
1224 +@@ -176,7 +176,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
1225 + struct paca_struct **paca_ptrs __read_mostly;
1226 + EXPORT_SYMBOL(paca_ptrs);
1227 +
1228 +-void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int cpu)
1229 ++void __init initialise_paca(struct paca_struct *new_paca, int cpu)
1230 + {
1231 + #ifdef CONFIG_PPC_PSERIES
1232 + new_paca->lppaca_ptr = NULL;
1233 +@@ -205,7 +205,7 @@ void __init __nostackprotector initialise_paca(struct paca_struct *new_paca, int
1234 + }
1235 +
1236 + /* Put the paca pointer into r13 and SPRG_PACA */
1237 +-void __nostackprotector setup_paca(struct paca_struct *new_paca)
1238 ++void setup_paca(struct paca_struct *new_paca)
1239 + {
1240 + /* Setup r13 */
1241 + local_paca = new_paca;
1242 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
1243 +index c62ff66d44ad9..c1e2e351ebff8 100644
1244 +--- a/arch/powerpc/kernel/rtas.c
1245 ++++ b/arch/powerpc/kernel/rtas.c
1246 +@@ -978,7 +978,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
1247 + { "ibm,display-message", -1, 0, -1, -1, -1 },
1248 + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
1249 + { "ibm,close-errinjct", -1, -1, -1, -1, -1 },
1250 +- { "ibm,open-errinct", -1, -1, -1, -1, -1 },
1251 ++ { "ibm,open-errinjct", -1, -1, -1, -1, -1 },
1252 + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
1253 + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
1254 + { "ibm,get-indices", -1, 2, 3, -1, -1 },
1255 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
1256 +index 25aaa39030009..f281d011f4b97 100644
1257 +--- a/arch/powerpc/kernel/setup-common.c
1258 ++++ b/arch/powerpc/kernel/setup-common.c
1259 +@@ -903,8 +903,6 @@ void __init setup_arch(char **cmdline_p)
1260 +
1261 + /* On BookE, setup per-core TLB data structures. */
1262 + setup_tlb_core_data();
1263 +-
1264 +- smp_release_cpus();
1265 + #endif
1266 +
1267 + /* Print various info about the machine that has been gathered so far. */
1268 +@@ -925,6 +923,8 @@ void __init setup_arch(char **cmdline_p)
1269 + exc_lvl_early_init();
1270 + emergency_stack_init();
1271 +
1272 ++ smp_release_cpus();
1273 ++
1274 + initmem_init();
1275 +
1276 + early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
1277 +diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
1278 +index 1b02d338a5f55..c82577c4b15d3 100644
1279 +--- a/arch/powerpc/kernel/setup.h
1280 ++++ b/arch/powerpc/kernel/setup.h
1281 +@@ -8,12 +8,6 @@
1282 + #ifndef __ARCH_POWERPC_KERNEL_SETUP_H
1283 + #define __ARCH_POWERPC_KERNEL_SETUP_H
1284 +
1285 +-#ifdef CONFIG_CC_IS_CLANG
1286 +-#define __nostackprotector
1287 +-#else
1288 +-#define __nostackprotector __attribute__((__optimize__("no-stack-protector")))
1289 +-#endif
1290 +-
1291 + void initialize_cache_info(void);
1292 + void irqstack_early_init(void);
1293 +
1294 +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
1295 +index 480c236724da2..5bc7e753df4d0 100644
1296 +--- a/arch/powerpc/kernel/setup_64.c
1297 ++++ b/arch/powerpc/kernel/setup_64.c
1298 +@@ -284,7 +284,7 @@ void __init record_spr_defaults(void)
1299 + * device-tree is not accessible via normal means at this point.
1300 + */
1301 +
1302 +-void __init __nostackprotector early_setup(unsigned long dt_ptr)
1303 ++void __init early_setup(unsigned long dt_ptr)
1304 + {
1305 + static __initdata struct paca_struct boot_paca;
1306 +
1307 +diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
1308 +index 187047592d53c..bb01a862aaf8d 100644
1309 +--- a/arch/powerpc/mm/fault.c
1310 ++++ b/arch/powerpc/mm/fault.c
1311 +@@ -349,7 +349,6 @@ static inline void cmo_account_page_fault(void)
1312 + static inline void cmo_account_page_fault(void) { }
1313 + #endif /* CONFIG_PPC_SMLPAR */
1314 +
1315 +-#ifdef CONFIG_PPC_BOOK3S
1316 + static void sanity_check_fault(bool is_write, bool is_user,
1317 + unsigned long error_code, unsigned long address)
1318 + {
1319 +@@ -366,6 +365,9 @@ static void sanity_check_fault(bool is_write, bool is_user,
1320 + return;
1321 + }
1322 +
1323 ++ if (!IS_ENABLED(CONFIG_PPC_BOOK3S))
1324 ++ return;
1325 ++
1326 + /*
1327 + * For hash translation mode, we should never get a
1328 + * PROTFAULT. Any update to pte to reduce access will result in us
1329 +@@ -400,10 +402,6 @@ static void sanity_check_fault(bool is_write, bool is_user,
1330 +
1331 + WARN_ON_ONCE(error_code & DSISR_PROTFAULT);
1332 + }
1333 +-#else
1334 +-static void sanity_check_fault(bool is_write, bool is_user,
1335 +- unsigned long error_code, unsigned long address) { }
1336 +-#endif /* CONFIG_PPC_BOOK3S */
1337 +
1338 + /*
1339 + * Define the correct "is_write" bit in error_code based
1340 +diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
1341 +index 96ca90ce0264a..c48705c726ac6 100644
1342 +--- a/arch/powerpc/mm/mem.c
1343 ++++ b/arch/powerpc/mm/mem.c
1344 +@@ -530,7 +530,7 @@ void __flush_dcache_icache(void *p)
1345 + * space occurs, before returning to user space.
1346 + */
1347 +
1348 +- if (cpu_has_feature(MMU_FTR_TYPE_44x))
1349 ++ if (mmu_has_feature(MMU_FTR_TYPE_44x))
1350 + return;
1351 +
1352 + invalidate_icache_range(addr, addr + PAGE_SIZE);
1353 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1354 +index f582aa2d98078..02fc75ddcbb36 100644
1355 +--- a/arch/powerpc/perf/core-book3s.c
1356 ++++ b/arch/powerpc/perf/core-book3s.c
1357 +@@ -133,6 +133,9 @@ static void pmao_restore_workaround(bool ebb) { }
1358 +
1359 + bool is_sier_available(void)
1360 + {
1361 ++ if (!ppmu)
1362 ++ return false;
1363 ++
1364 + if (ppmu->flags & PPMU_HAS_SIER)
1365 + return true;
1366 +
1367 +@@ -2086,6 +2089,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1368 + local64_set(&event->hw.period_left, left);
1369 + perf_event_update_userpage(event);
1370 +
1371 ++ /*
1372 ++ * Due to hardware limitation, sometimes SIAR could sample a kernel
1373 ++ * address even when freeze on supervisor state (kernel) is set in
1374 ++ * MMCR2. Check attr.exclude_kernel and address to drop the sample in
1375 ++ * these cases.
1376 ++ */
1377 ++ if (event->attr.exclude_kernel && record)
1378 ++ if (is_kernel_addr(mfspr(SPRN_SIAR)))
1379 ++ record = 0;
1380 ++
1381 + /*
1382 + * Finally record data if requested.
1383 + */
1384 +diff --git a/arch/powerpc/platforms/8xx/micropatch.c b/arch/powerpc/platforms/8xx/micropatch.c
1385 +index c80bd7afd6c5e..b06c6d26dd722 100644
1386 +--- a/arch/powerpc/platforms/8xx/micropatch.c
1387 ++++ b/arch/powerpc/platforms/8xx/micropatch.c
1388 +@@ -361,6 +361,17 @@ void __init cpm_load_patch(cpm8xx_t *cp)
1389 + if (IS_ENABLED(CONFIG_SMC_UCODE_PATCH)) {
1390 + smc_uart_t *smp;
1391 +
1392 ++ if (IS_ENABLED(CONFIG_PPC_EARLY_DEBUG_CPM)) {
1393 ++ int i;
1394 ++
1395 ++ for (i = 0; i < sizeof(*smp); i += 4) {
1396 ++ u32 __iomem *src = (u32 __iomem *)&cp->cp_dparam[PROFF_SMC1 + i];
1397 ++ u32 __iomem *dst = (u32 __iomem *)&cp->cp_dparam[PROFF_DSP1 + i];
1398 ++
1399 ++ out_be32(dst, in_be32(src));
1400 ++ }
1401 ++ }
1402 ++
1403 + smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC1];
1404 + out_be16(&smp->smc_rpbase, 0x1ec0);
1405 + smp = (smc_uart_t *)&cp->cp_dparam[PROFF_SMC2];
1406 +diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
1407 +index eb2e75dac369a..b45ab455a18e8 100644
1408 +--- a/arch/powerpc/platforms/powernv/memtrace.c
1409 ++++ b/arch/powerpc/platforms/powernv/memtrace.c
1410 +@@ -30,6 +30,7 @@ struct memtrace_entry {
1411 + char name[16];
1412 + };
1413 +
1414 ++static DEFINE_MUTEX(memtrace_mutex);
1415 + static u64 memtrace_size;
1416 +
1417 + static struct memtrace_entry *memtrace_array;
1418 +@@ -67,6 +68,23 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
1419 + return 0;
1420 + }
1421 +
1422 ++static void memtrace_clear_range(unsigned long start_pfn,
1423 ++ unsigned long nr_pages)
1424 ++{
1425 ++ unsigned long pfn;
1426 ++
1427 ++ /*
1428 ++ * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM
1429 ++ * does not apply, avoid passing around "struct page" and use
1430 ++ * clear_page() instead directly.
1431 ++ */
1432 ++ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
1433 ++ if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
1434 ++ cond_resched();
1435 ++ clear_page(__va(PFN_PHYS(pfn)));
1436 ++ }
1437 ++}
1438 ++
1439 + /* called with device_hotplug_lock held */
1440 + static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
1441 + {
1442 +@@ -111,6 +129,11 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
1443 + lock_device_hotplug();
1444 + for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
1445 + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
1446 ++ /*
1447 ++ * Clear the range while we still have a linear
1448 ++ * mapping.
1449 ++ */
1450 ++ memtrace_clear_range(base_pfn, nr_pages);
1451 + /*
1452 + * Remove memory in memory block size chunks so that
1453 + * iomem resources are always split to the same size and
1454 +@@ -268,6 +291,7 @@ static int memtrace_online(void)
1455 +
1456 + static int memtrace_enable_set(void *data, u64 val)
1457 + {
1458 ++ int rc = -EAGAIN;
1459 + u64 bytes;
1460 +
1461 + /*
1462 +@@ -280,25 +304,31 @@ static int memtrace_enable_set(void *data, u64 val)
1463 + return -EINVAL;
1464 + }
1465 +
1466 ++ mutex_lock(&memtrace_mutex);
1467 ++
1468 + /* Re-add/online previously removed/offlined memory */
1469 + if (memtrace_size) {
1470 + if (memtrace_online())
1471 +- return -EAGAIN;
1472 ++ goto out_unlock;
1473 + }
1474 +
1475 +- if (!val)
1476 +- return 0;
1477 ++ if (!val) {
1478 ++ rc = 0;
1479 ++ goto out_unlock;
1480 ++ }
1481 +
1482 + /* Offline and remove memory */
1483 + if (memtrace_init_regions_runtime(val))
1484 +- return -EINVAL;
1485 ++ goto out_unlock;
1486 +
1487 + if (memtrace_init_debugfs())
1488 +- return -EINVAL;
1489 ++ goto out_unlock;
1490 +
1491 + memtrace_size = val;
1492 +-
1493 +- return 0;
1494 ++ rc = 0;
1495 ++out_unlock:
1496 ++ mutex_unlock(&memtrace_mutex);
1497 ++ return rc;
1498 + }
1499 +
1500 + static int memtrace_enable_get(void *data, u64 *val)
1501 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
1502 +index b95b9e3c4c98e..c640cb993209a 100644
1503 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
1504 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
1505 +@@ -384,7 +384,8 @@ static void pnv_npu_peers_take_ownership(struct iommu_table_group *table_group)
1506 + for (i = 0; i < npucomp->pe_num; ++i) {
1507 + struct pnv_ioda_pe *pe = npucomp->pe[i];
1508 +
1509 +- if (!pe->table_group.ops->take_ownership)
1510 ++ if (!pe->table_group.ops ||
1511 ++ !pe->table_group.ops->take_ownership)
1512 + continue;
1513 + pe->table_group.ops->take_ownership(&pe->table_group);
1514 + }
1515 +@@ -400,7 +401,8 @@ static void pnv_npu_peers_release_ownership(
1516 + for (i = 0; i < npucomp->pe_num; ++i) {
1517 + struct pnv_ioda_pe *pe = npucomp->pe[i];
1518 +
1519 +- if (!pe->table_group.ops->release_ownership)
1520 ++ if (!pe->table_group.ops ||
1521 ++ !pe->table_group.ops->release_ownership)
1522 + continue;
1523 + pe->table_group.ops->release_ownership(&pe->table_group);
1524 + }
1525 +@@ -560,6 +562,11 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
1526 + return -ENODEV;
1527 +
1528 + hose = pci_bus_to_host(npdev->bus);
1529 ++ if (hose->npu == NULL) {
1530 ++ dev_info_once(&npdev->dev, "Nvlink1 does not support contexts");
1531 ++ return 0;
1532 ++ }
1533 ++
1534 + nphb = hose->private_data;
1535 +
1536 + dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=%u\n",
1537 +@@ -607,6 +614,11 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
1538 + return -ENODEV;
1539 +
1540 + hose = pci_bus_to_host(npdev->bus);
1541 ++ if (hose->npu == NULL) {
1542 ++ dev_info_once(&npdev->dev, "Nvlink1 does not support contexts");
1543 ++ return 0;
1544 ++ }
1545 ++
1546 + nphb = hose->private_data;
1547 +
1548 + dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
1549 +diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
1550 +index f789693f61f40..e5ecadfb5dea2 100644
1551 +--- a/arch/powerpc/platforms/pseries/suspend.c
1552 ++++ b/arch/powerpc/platforms/pseries/suspend.c
1553 +@@ -13,7 +13,6 @@
1554 + #include <asm/mmu.h>
1555 + #include <asm/rtas.h>
1556 + #include <asm/topology.h>
1557 +-#include "../../kernel/cacheinfo.h"
1558 +
1559 + static u64 stream_id;
1560 + static struct device suspend_dev;
1561 +@@ -78,9 +77,7 @@ static void pseries_suspend_enable_irqs(void)
1562 + * Update configuration which can be modified based on device tree
1563 + * changes during resume.
1564 + */
1565 +- cacheinfo_cpu_offline(smp_processor_id());
1566 + post_mobility_fixup();
1567 +- cacheinfo_cpu_online(smp_processor_id());
1568 + }
1569 +
1570 + /**
1571 +@@ -190,7 +187,6 @@ static struct bus_type suspend_subsys = {
1572 +
1573 + static const struct platform_suspend_ops pseries_suspend_ops = {
1574 + .valid = suspend_valid_only_mem,
1575 +- .begin = pseries_suspend_begin,
1576 + .prepare_late = pseries_prepare_late,
1577 + .enter = pseries_suspend_enter,
1578 + };
1579 +diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
1580 +index 5c1a50912229a..9b0d85bff021e 100644
1581 +--- a/arch/powerpc/xmon/nonstdio.c
1582 ++++ b/arch/powerpc/xmon/nonstdio.c
1583 +@@ -178,7 +178,7 @@ void xmon_printf(const char *format, ...)
1584 +
1585 + if (n && rc == 0) {
1586 + /* No udbg hooks, fallback to printk() - dangerous */
1587 +- printk("%s", xmon_outbuf);
1588 ++ pr_cont("%s", xmon_outbuf);
1589 + }
1590 + }
1591 +
1592 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1593 +index 66d7ba61803c8..659d99af91566 100644
1594 +--- a/arch/s390/kernel/smp.c
1595 ++++ b/arch/s390/kernel/smp.c
1596 +@@ -885,24 +885,12 @@ static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
1597 + /* Upping and downing of CPUs */
1598 + int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1599 + {
1600 +- struct pcpu *pcpu;
1601 +- int base, i, rc;
1602 ++ struct pcpu *pcpu = pcpu_devices + cpu;
1603 ++ int rc;
1604 +
1605 +- pcpu = pcpu_devices + cpu;
1606 + if (pcpu->state != CPU_STATE_CONFIGURED)
1607 + return -EIO;
1608 +- base = smp_get_base_cpu(cpu);
1609 +- for (i = 0; i <= smp_cpu_mtid; i++) {
1610 +- if (base + i < nr_cpu_ids)
1611 +- if (cpu_online(base + i))
1612 +- break;
1613 +- }
1614 +- /*
1615 +- * If this is the first CPU of the core to get online
1616 +- * do an initial CPU reset.
1617 +- */
1618 +- if (i > smp_cpu_mtid &&
1619 +- pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
1620 ++ if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
1621 + SIGP_CC_ORDER_CODE_ACCEPTED)
1622 + return -EIO;
1623 +
1624 +diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
1625 +index 5a10ce34b95d1..3d1c31e0cf3dd 100644
1626 +--- a/arch/s390/purgatory/head.S
1627 ++++ b/arch/s390/purgatory/head.S
1628 +@@ -62,14 +62,15 @@
1629 + jh 10b
1630 + .endm
1631 +
1632 +-.macro START_NEXT_KERNEL base
1633 ++.macro START_NEXT_KERNEL base subcode
1634 + lg %r4,kernel_entry-\base(%r13)
1635 + lg %r5,load_psw_mask-\base(%r13)
1636 + ogr %r4,%r5
1637 + stg %r4,0(%r0)
1638 +
1639 + xgr %r0,%r0
1640 +- diag %r0,%r0,0x308
1641 ++ lghi %r1,\subcode
1642 ++ diag %r0,%r1,0x308
1643 + .endm
1644 +
1645 + .text
1646 +@@ -123,7 +124,7 @@ ENTRY(purgatory_start)
1647 + je .start_crash_kernel
1648 +
1649 + /* start normal kernel */
1650 +- START_NEXT_KERNEL .base_crash
1651 ++ START_NEXT_KERNEL .base_crash 0
1652 +
1653 + .return_old_kernel:
1654 + lmg %r6,%r15,gprregs-.base_crash(%r13)
1655 +@@ -227,7 +228,7 @@ ENTRY(purgatory_start)
1656 + MEMCPY %r9,%r10,%r11
1657 +
1658 + /* start crash kernel */
1659 +- START_NEXT_KERNEL .base_dst
1660 ++ START_NEXT_KERNEL .base_dst 1
1661 +
1662 +
1663 + load_psw_mask:
1664 +diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
1665 +index e6d91819da921..28b9ffd85db0b 100644
1666 +--- a/arch/sparc/mm/init_64.c
1667 ++++ b/arch/sparc/mm/init_64.c
1668 +@@ -2904,7 +2904,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm)
1669 + if (!page)
1670 + return NULL;
1671 + if (!pgtable_pte_page_ctor(page)) {
1672 +- free_unref_page(page);
1673 ++ __free_page(page);
1674 + return NULL;
1675 + }
1676 + return (pte_t *) page_address(page);
1677 +diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
1678 +index 4d80526a4236e..d8845d4aac6a7 100644
1679 +--- a/arch/um/drivers/chan_user.c
1680 ++++ b/arch/um/drivers/chan_user.c
1681 +@@ -26,10 +26,10 @@ int generic_read(int fd, char *c_out, void *unused)
1682 + n = read(fd, c_out, sizeof(*c_out));
1683 + if (n > 0)
1684 + return n;
1685 +- else if (errno == EAGAIN)
1686 +- return 0;
1687 + else if (n == 0)
1688 + return -EIO;
1689 ++ else if (errno == EAGAIN)
1690 ++ return 0;
1691 + return -errno;
1692 + }
1693 +
1694 +diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
1695 +index fc7f1e7467032..87ca4a47cd66e 100644
1696 +--- a/arch/um/drivers/xterm.c
1697 ++++ b/arch/um/drivers/xterm.c
1698 +@@ -18,6 +18,7 @@
1699 + struct xterm_chan {
1700 + int pid;
1701 + int helper_pid;
1702 ++ int chan_fd;
1703 + char *title;
1704 + int device;
1705 + int raw;
1706 +@@ -33,6 +34,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts)
1707 + return NULL;
1708 + *data = ((struct xterm_chan) { .pid = -1,
1709 + .helper_pid = -1,
1710 ++ .chan_fd = -1,
1711 + .device = device,
1712 + .title = opts->xterm_title,
1713 + .raw = opts->raw } );
1714 +@@ -149,6 +151,7 @@ static int xterm_open(int input, int output, int primary, void *d,
1715 + goto out_kill;
1716 + }
1717 +
1718 ++ data->chan_fd = fd;
1719 + new = xterm_fd(fd, &data->helper_pid);
1720 + if (new < 0) {
1721 + err = new;
1722 +@@ -206,6 +209,8 @@ static void xterm_close(int fd, void *d)
1723 + os_kill_process(data->helper_pid, 0);
1724 + data->helper_pid = -1;
1725 +
1726 ++ if (data->chan_fd != -1)
1727 ++ os_close_file(data->chan_fd);
1728 + os_close_file(fd);
1729 + }
1730 +
1731 +diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c
1732 +index d508310ee5e1e..f1732c308c615 100644
1733 +--- a/arch/um/os-Linux/irq.c
1734 ++++ b/arch/um/os-Linux/irq.c
1735 +@@ -48,7 +48,7 @@ int os_epoll_triggered(int index, int events)
1736 + int os_event_mask(int irq_type)
1737 + {
1738 + if (irq_type == IRQ_READ)
1739 +- return EPOLLIN | EPOLLPRI;
1740 ++ return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
1741 + if (irq_type == IRQ_WRITE)
1742 + return EPOLLOUT;
1743 + return 0;
1744 +diff --git a/arch/um/os-Linux/umid.c b/arch/um/os-Linux/umid.c
1745 +index 44def53a11cd6..ea5c60f4393e9 100644
1746 +--- a/arch/um/os-Linux/umid.c
1747 ++++ b/arch/um/os-Linux/umid.c
1748 +@@ -137,20 +137,13 @@ static inline int is_umdir_used(char *dir)
1749 + {
1750 + char pid[sizeof("nnnnn\0")], *end, *file;
1751 + int dead, fd, p, n, err;
1752 +- size_t filelen;
1753 ++ size_t filelen = strlen(dir) + sizeof("/pid") + 1;
1754 +
1755 +- err = asprintf(&file, "%s/pid", dir);
1756 +- if (err < 0)
1757 +- return 0;
1758 +-
1759 +- filelen = strlen(file);
1760 ++ file = malloc(filelen);
1761 ++ if (!file)
1762 ++ return -ENOMEM;
1763 +
1764 +- n = snprintf(file, filelen, "%s/pid", dir);
1765 +- if (n >= filelen) {
1766 +- printk(UM_KERN_ERR "is_umdir_used - pid filename too long\n");
1767 +- err = -E2BIG;
1768 +- goto out;
1769 +- }
1770 ++ snprintf(file, filelen, "%s/pid", dir);
1771 +
1772 + dead = 0;
1773 + fd = open(file, O_RDONLY);
1774 +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
1775 +index c4def90777475..b24c38090dd99 100644
1776 +--- a/arch/x86/events/intel/core.c
1777 ++++ b/arch/x86/events/intel/core.c
1778 +@@ -253,7 +253,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
1779 + INTEL_EVENT_CONSTRAINT_RANGE(0x48, 0x54, 0xf),
1780 + INTEL_EVENT_CONSTRAINT_RANGE(0x60, 0x8b, 0xf),
1781 + INTEL_UEVENT_CONSTRAINT(0x04a3, 0xff), /* CYCLE_ACTIVITY.STALLS_TOTAL */
1782 +- INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
1783 ++ INTEL_UEVENT_CONSTRAINT(0x10a3, 0xff), /* CYCLE_ACTIVITY.CYCLES_MEM_ANY */
1784 ++ INTEL_UEVENT_CONSTRAINT(0x14a3, 0xff), /* CYCLE_ACTIVITY.STALLS_MEM_ANY */
1785 + INTEL_EVENT_CONSTRAINT(0xa3, 0xf), /* CYCLE_ACTIVITY.* */
1786 + INTEL_EVENT_CONSTRAINT_RANGE(0xa8, 0xb0, 0xf),
1787 + INTEL_EVENT_CONSTRAINT_RANGE(0xb7, 0xbd, 0xf),
1788 +@@ -5057,7 +5058,7 @@ __init int intel_pmu_init(void)
1789 + extra_skl_attr = skl_format_attr;
1790 + mem_attr = icl_events_attrs;
1791 + tsx_attr = icl_tsx_events_attrs;
1792 +- x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xca, .umask=0x02);
1793 ++ x86_pmu.rtm_abort_event = X86_CONFIG(.event=0xc9, .umask=0x04);
1794 + x86_pmu.lbr_pt_coexist = true;
1795 + intel_pmu_pebs_data_source_skl(pmem);
1796 + pr_cont("Icelake events, ");
1797 +diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
1798 +index 1aaba2c8a9ba6..eb8bd0eeace7d 100644
1799 +--- a/arch/x86/events/intel/ds.c
1800 ++++ b/arch/x86/events/intel/ds.c
1801 +@@ -1912,7 +1912,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
1802 + * that caused the PEBS record. It's called collision.
1803 + * If collision happened, the record will be dropped.
1804 + */
1805 +- if (p->status != (1ULL << bit)) {
1806 ++ if (pebs_status != (1ULL << bit)) {
1807 + for_each_set_bit(i, (unsigned long *)&pebs_status, size)
1808 + error[i]++;
1809 + continue;
1810 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
1811 +index 19e94af9cc5d7..6016559ed1713 100644
1812 +--- a/arch/x86/include/asm/apic.h
1813 ++++ b/arch/x86/include/asm/apic.h
1814 +@@ -259,6 +259,7 @@ static inline u64 native_x2apic_icr_read(void)
1815 +
1816 + extern int x2apic_mode;
1817 + extern int x2apic_phys;
1818 ++extern void __init x2apic_set_max_apicid(u32 apicid);
1819 + extern void __init check_x2apic(void);
1820 + extern void x2apic_setup(void);
1821 + static inline int x2apic_enabled(void)
1822 +diff --git a/arch/x86/include/asm/cacheinfo.h b/arch/x86/include/asm/cacheinfo.h
1823 +index 86b63c7feab75..86b2e0dcc4bfe 100644
1824 +--- a/arch/x86/include/asm/cacheinfo.h
1825 ++++ b/arch/x86/include/asm/cacheinfo.h
1826 +@@ -2,7 +2,7 @@
1827 + #ifndef _ASM_X86_CACHEINFO_H
1828 + #define _ASM_X86_CACHEINFO_H
1829 +
1830 +-void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
1831 +-void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id);
1832 ++void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu);
1833 ++void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu);
1834 +
1835 + #endif /* _ASM_X86_CACHEINFO_H */
1836 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1837 +index fce94c799f015..06fa808d72032 100644
1838 +--- a/arch/x86/kernel/apic/apic.c
1839 ++++ b/arch/x86/kernel/apic/apic.c
1840 +@@ -1886,20 +1886,22 @@ static __init void try_to_enable_x2apic(int remap_mode)
1841 + return;
1842 +
1843 + if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1844 +- /* IR is required if there is APIC ID > 255 even when running
1845 +- * under KVM
1846 ++ /*
1847 ++ * Using X2APIC without IR is not architecturally supported
1848 ++ * on bare metal but may be supported in guests.
1849 + */
1850 +- if (max_physical_apicid > 255 ||
1851 +- !x86_init.hyper.x2apic_available()) {
1852 ++ if (!x86_init.hyper.x2apic_available()) {
1853 + pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1854 + x2apic_disable();
1855 + return;
1856 + }
1857 +
1858 + /*
1859 +- * without IR all CPUs can be addressed by IOAPIC/MSI
1860 +- * only in physical mode
1861 ++ * Without IR, all CPUs can be addressed by IOAPIC/MSI only
1862 ++ * in physical mode, and CPUs with an APIC ID that cannnot
1863 ++ * be addressed must not be brought online.
1864 + */
1865 ++ x2apic_set_max_apicid(255);
1866 + x2apic_phys = 1;
1867 + }
1868 + x2apic_enable();
1869 +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
1870 +index bc9693841353c..e14eae6d6ea71 100644
1871 +--- a/arch/x86/kernel/apic/x2apic_phys.c
1872 ++++ b/arch/x86/kernel/apic/x2apic_phys.c
1873 +@@ -8,6 +8,12 @@
1874 + int x2apic_phys;
1875 +
1876 + static struct apic apic_x2apic_phys;
1877 ++static u32 x2apic_max_apicid __ro_after_init;
1878 ++
1879 ++void __init x2apic_set_max_apicid(u32 apicid)
1880 ++{
1881 ++ x2apic_max_apicid = apicid;
1882 ++}
1883 +
1884 + static int __init set_x2apic_phys_mode(char *arg)
1885 + {
1886 +@@ -98,6 +104,9 @@ static int x2apic_phys_probe(void)
1887 + /* Common x2apic functions, also used by x2apic_cluster */
1888 + int x2apic_apic_id_valid(u32 apicid)
1889 + {
1890 ++ if (x2apic_max_apicid && apicid > x2apic_max_apicid)
1891 ++ return 0;
1892 ++
1893 + return 1;
1894 + }
1895 +
1896 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
1897 +index c553cafd0736b..52373921af2eb 100644
1898 +--- a/arch/x86/kernel/cpu/amd.c
1899 ++++ b/arch/x86/kernel/cpu/amd.c
1900 +@@ -335,7 +335,6 @@ static void amd_get_topology_early(struct cpuinfo_x86 *c)
1901 + */
1902 + static void amd_get_topology(struct cpuinfo_x86 *c)
1903 + {
1904 +- u8 node_id;
1905 + int cpu = smp_processor_id();
1906 +
1907 + /* get information required for multi-node processors */
1908 +@@ -345,7 +344,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
1909 +
1910 + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
1911 +
1912 +- node_id = ecx & 0xff;
1913 ++ c->cpu_die_id = ecx & 0xff;
1914 +
1915 + if (c->x86 == 0x15)
1916 + c->cu_id = ebx & 0xff;
1917 +@@ -365,15 +364,15 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
1918 + if (!err)
1919 + c->x86_coreid_bits = get_count_order(c->x86_max_cores);
1920 +
1921 +- cacheinfo_amd_init_llc_id(c, cpu, node_id);
1922 ++ cacheinfo_amd_init_llc_id(c, cpu);
1923 +
1924 + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
1925 + u64 value;
1926 +
1927 + rdmsrl(MSR_FAM10H_NODE_ID, value);
1928 +- node_id = value & 7;
1929 ++ c->cpu_die_id = value & 7;
1930 +
1931 +- per_cpu(cpu_llc_id, cpu) = node_id;
1932 ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
1933 + } else
1934 + return;
1935 +
1936 +@@ -398,7 +397,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
1937 + /* Convert the initial APIC ID into the socket ID */
1938 + c->phys_proc_id = c->initial_apicid >> bits;
1939 + /* use socket ID also for last level cache */
1940 +- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
1941 ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
1942 + }
1943 +
1944 + u16 amd_get_nb_id(int cpu)
1945 +diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
1946 +index c7503be92f359..30f33b75209a1 100644
1947 +--- a/arch/x86/kernel/cpu/cacheinfo.c
1948 ++++ b/arch/x86/kernel/cpu/cacheinfo.c
1949 +@@ -646,7 +646,7 @@ static int find_num_cache_leaves(struct cpuinfo_x86 *c)
1950 + return i;
1951 + }
1952 +
1953 +-void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
1954 ++void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu)
1955 + {
1956 + /*
1957 + * We may have multiple LLCs if L3 caches exist, so check if we
1958 +@@ -657,7 +657,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
1959 +
1960 + if (c->x86 < 0x17) {
1961 + /* LLC is at the node level. */
1962 +- per_cpu(cpu_llc_id, cpu) = node_id;
1963 ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
1964 + } else if (c->x86 == 0x17 && c->x86_model <= 0x1F) {
1965 + /*
1966 + * LLC is at the core complex level.
1967 +@@ -684,7 +684,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
1968 + }
1969 + }
1970 +
1971 +-void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
1972 ++void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu)
1973 + {
1974 + /*
1975 + * We may have multiple LLCs if L3 caches exist, so check if we
1976 +diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c
1977 +index 4e28c1fc87499..62e9a982adaf9 100644
1978 +--- a/arch/x86/kernel/cpu/hygon.c
1979 ++++ b/arch/x86/kernel/cpu/hygon.c
1980 +@@ -64,7 +64,6 @@ static void hygon_get_topology_early(struct cpuinfo_x86 *c)
1981 + */
1982 + static void hygon_get_topology(struct cpuinfo_x86 *c)
1983 + {
1984 +- u8 node_id;
1985 + int cpu = smp_processor_id();
1986 +
1987 + /* get information required for multi-node processors */
1988 +@@ -74,7 +73,7 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
1989 +
1990 + cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
1991 +
1992 +- node_id = ecx & 0xff;
1993 ++ c->cpu_die_id = ecx & 0xff;
1994 +
1995 + c->cpu_core_id = ebx & 0xff;
1996 +
1997 +@@ -92,14 +91,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c)
1998 + /* Socket ID is ApicId[6] for these processors. */
1999 + c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
2000 +
2001 +- cacheinfo_hygon_init_llc_id(c, cpu, node_id);
2002 ++ cacheinfo_hygon_init_llc_id(c, cpu);
2003 + } else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
2004 + u64 value;
2005 +
2006 + rdmsrl(MSR_FAM10H_NODE_ID, value);
2007 +- node_id = value & 7;
2008 ++ c->cpu_die_id = value & 7;
2009 +
2010 +- per_cpu(cpu_llc_id, cpu) = node_id;
2011 ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
2012 + } else
2013 + return;
2014 +
2015 +@@ -122,7 +121,7 @@ static void hygon_detect_cmp(struct cpuinfo_x86 *c)
2016 + /* Convert the initial APIC ID into the socket ID */
2017 + c->phys_proc_id = c->initial_apicid >> bits;
2018 + /* use socket ID also for last level cache */
2019 +- per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
2020 ++ per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
2021 + }
2022 +
2023 + static void srat_detect_node(struct cpuinfo_x86 *c)
2024 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
2025 +index 62c39baea39e3..5294018535d0c 100644
2026 +--- a/arch/x86/kernel/kprobes/core.c
2027 ++++ b/arch/x86/kernel/kprobes/core.c
2028 +@@ -1019,6 +1019,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
2029 + * So clear it by resetting the current kprobe:
2030 + */
2031 + regs->flags &= ~X86_EFLAGS_TF;
2032 ++ /*
2033 ++ * Since the single step (trap) has been cancelled,
2034 ++ * we need to restore BTF here.
2035 ++ */
2036 ++ restore_btf();
2037 +
2038 + /*
2039 + * If the TF flag was set before the kprobe hit,
2040 +diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
2041 +index fe7a12599d8eb..968d7005f4a72 100644
2042 +--- a/arch/x86/mm/ident_map.c
2043 ++++ b/arch/x86/mm/ident_map.c
2044 +@@ -62,6 +62,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
2045 + unsigned long addr, unsigned long end)
2046 + {
2047 + unsigned long next;
2048 ++ int result;
2049 +
2050 + for (; addr < end; addr = next) {
2051 + p4d_t *p4d = p4d_page + p4d_index(addr);
2052 +@@ -73,13 +74,20 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
2053 +
2054 + if (p4d_present(*p4d)) {
2055 + pud = pud_offset(p4d, 0);
2056 +- ident_pud_init(info, pud, addr, next);
2057 ++ result = ident_pud_init(info, pud, addr, next);
2058 ++ if (result)
2059 ++ return result;
2060 ++
2061 + continue;
2062 + }
2063 + pud = (pud_t *)info->alloc_pgt_page(info->context);
2064 + if (!pud)
2065 + return -ENOMEM;
2066 +- ident_pud_init(info, pud, addr, next);
2067 ++
2068 ++ result = ident_pud_init(info, pud, addr, next);
2069 ++ if (result)
2070 ++ return result;
2071 ++
2072 + set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
2073 + }
2074 +
2075 +diff --git a/block/blk-mq.c b/block/blk-mq.c
2076 +index b748d1e63f9c8..057a634396a90 100644
2077 +--- a/block/blk-mq.c
2078 ++++ b/block/blk-mq.c
2079 +@@ -1205,6 +1205,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
2080 +
2081 + #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
2082 +
2083 ++static void blk_mq_handle_dev_resource(struct request *rq,
2084 ++ struct list_head *list)
2085 ++{
2086 ++ struct request *next =
2087 ++ list_first_entry_or_null(list, struct request, queuelist);
2088 ++
2089 ++ /*
2090 ++ * If an I/O scheduler has been configured and we got a driver tag for
2091 ++ * the next request already, free it.
2092 ++ */
2093 ++ if (next)
2094 ++ blk_mq_put_driver_tag(next);
2095 ++
2096 ++ list_add(&rq->queuelist, list);
2097 ++ __blk_mq_requeue_request(rq);
2098 ++}
2099 ++
2100 + /*
2101 + * Returns true if we did some work AND can potentially do more.
2102 + */
2103 +@@ -1216,6 +1233,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
2104 + bool no_tag = false;
2105 + int errors, queued;
2106 + blk_status_t ret = BLK_STS_OK;
2107 ++ bool no_budget_avail = false;
2108 +
2109 + if (list_empty(list))
2110 + return false;
2111 +@@ -1234,6 +1252,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
2112 + hctx = rq->mq_hctx;
2113 + if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
2114 + blk_mq_put_driver_tag(rq);
2115 ++ no_budget_avail = true;
2116 + break;
2117 + }
2118 +
2119 +@@ -1274,17 +1293,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
2120 +
2121 + ret = q->mq_ops->queue_rq(hctx, &bd);
2122 + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
2123 +- /*
2124 +- * If an I/O scheduler has been configured and we got a
2125 +- * driver tag for the next request already, free it
2126 +- * again.
2127 +- */
2128 +- if (!list_empty(list)) {
2129 +- nxt = list_first_entry(list, struct request, queuelist);
2130 +- blk_mq_put_driver_tag(nxt);
2131 +- }
2132 +- list_add(&rq->queuelist, list);
2133 +- __blk_mq_requeue_request(rq);
2134 ++ blk_mq_handle_dev_resource(rq, list);
2135 + break;
2136 + }
2137 +
2138 +@@ -1349,13 +1358,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
2139 + *
2140 + * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
2141 + * bit is set, run queue after a delay to avoid IO stalls
2142 +- * that could otherwise occur if the queue is idle.
2143 ++ * that could otherwise occur if the queue is idle. We'll do
2144 ++ * similar if we couldn't get budget and SCHED_RESTART is set.
2145 + */
2146 + needs_restart = blk_mq_sched_needs_restart(hctx);
2147 + if (!needs_restart ||
2148 + (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
2149 + blk_mq_run_hw_queue(hctx, true);
2150 +- else if (needs_restart && (ret == BLK_STS_RESOURCE))
2151 ++ else if (needs_restart && (ret == BLK_STS_RESOURCE ||
2152 ++ no_budget_avail))
2153 + blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
2154 +
2155 + blk_mq_update_dispatch_busy(hctx, true);
2156 +diff --git a/block/blk-zoned.c b/block/blk-zoned.c
2157 +index 4bc5f260248a6..b17c094cb977c 100644
2158 +--- a/block/blk-zoned.c
2159 ++++ b/block/blk-zoned.c
2160 +@@ -202,32 +202,14 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector,
2161 + }
2162 + EXPORT_SYMBOL_GPL(blkdev_report_zones);
2163 +
2164 +-/*
2165 +- * Special case of zone reset operation to reset all zones in one command,
2166 +- * useful for applications like mkfs.
2167 +- */
2168 +-static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask)
2169 +-{
2170 +- struct bio *bio = bio_alloc(gfp_mask, 0);
2171 +- int ret;
2172 +-
2173 +- /* across the zones operations, don't need any sectors */
2174 +- bio_set_dev(bio, bdev);
2175 +- bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0);
2176 +-
2177 +- ret = submit_bio_wait(bio);
2178 +- bio_put(bio);
2179 +-
2180 +- return ret;
2181 +-}
2182 +-
2183 + static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev,
2184 ++ sector_t sector,
2185 + sector_t nr_sectors)
2186 + {
2187 + if (!blk_queue_zone_resetall(bdev_get_queue(bdev)))
2188 + return false;
2189 +
2190 +- if (nr_sectors != part_nr_sects_read(bdev->bd_part))
2191 ++ if (sector || nr_sectors != part_nr_sects_read(bdev->bd_part))
2192 + return false;
2193 + /*
2194 + * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is
2195 +@@ -271,9 +253,6 @@ int blkdev_reset_zones(struct block_device *bdev,
2196 + /* Out of range */
2197 + return -EINVAL;
2198 +
2199 +- if (blkdev_allow_reset_all_zones(bdev, nr_sectors))
2200 +- return __blkdev_reset_all_zones(bdev, gfp_mask);
2201 +-
2202 + /* Check alignment (handle eventual smaller last zone) */
2203 + zone_sectors = blk_queue_zone_sectors(q);
2204 + if (sector & (zone_sectors - 1))
2205 +@@ -285,17 +264,24 @@ int blkdev_reset_zones(struct block_device *bdev,
2206 +
2207 + blk_start_plug(&plug);
2208 + while (sector < end_sector) {
2209 +-
2210 + bio = blk_next_bio(bio, 0, gfp_mask);
2211 +- bio->bi_iter.bi_sector = sector;
2212 + bio_set_dev(bio, bdev);
2213 +- bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
2214 +
2215 ++ /*
2216 ++ * Special case for the zone reset operation that reset all
2217 ++ * zones, this is useful for applications like mkfs.
2218 ++ */
2219 ++ if (blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) {
2220 ++ bio->bi_opf = REQ_OP_ZONE_RESET_ALL;
2221 ++ break;
2222 ++ }
2223 ++
2224 ++ bio->bi_opf = REQ_OP_ZONE_RESET;
2225 ++ bio->bi_iter.bi_sector = sector;
2226 + sector += zone_sectors;
2227 +
2228 + /* This may take a while, so be nice to others */
2229 + cond_resched();
2230 +-
2231 + }
2232 +
2233 + ret = submit_bio_wait(bio);
2234 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
2235 +index 1d4b0157ee5dc..4a2e91baabdef 100644
2236 +--- a/crypto/af_alg.c
2237 ++++ b/crypto/af_alg.c
2238 +@@ -147,7 +147,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2239 + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
2240 + struct sock *sk = sock->sk;
2241 + struct alg_sock *ask = alg_sk(sk);
2242 +- struct sockaddr_alg *sa = (void *)uaddr;
2243 ++ struct sockaddr_alg_new *sa = (void *)uaddr;
2244 + const struct af_alg_type *type;
2245 + void *private;
2246 + int err;
2247 +@@ -155,7 +155,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2248 + if (sock->state == SS_CONNECTED)
2249 + return -EINVAL;
2250 +
2251 +- if (addr_len < sizeof(*sa))
2252 ++ BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
2253 ++ offsetof(struct sockaddr_alg, salg_name));
2254 ++ BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
2255 ++
2256 ++ if (addr_len < sizeof(*sa) + 1)
2257 + return -EINVAL;
2258 +
2259 + /* If caller uses non-allowed flag, return error. */
2260 +@@ -163,7 +167,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2261 + return -EINVAL;
2262 +
2263 + sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
2264 +- sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
2265 ++ sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
2266 +
2267 + type = alg_get_type(sa->salg_type);
2268 + if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
2269 +diff --git a/crypto/ecdh.c b/crypto/ecdh.c
2270 +index bd599053a8c4b..efa4ee72301f8 100644
2271 +--- a/crypto/ecdh.c
2272 ++++ b/crypto/ecdh.c
2273 +@@ -53,12 +53,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
2274 + return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
2275 + ctx->private_key);
2276 +
2277 +- if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
2278 +- (const u64 *)params.key, params.key_size) < 0)
2279 +- return -EINVAL;
2280 +-
2281 + memcpy(ctx->private_key, params.key, params.key_size);
2282 +
2283 ++ if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
2284 ++ ctx->private_key, params.key_size) < 0) {
2285 ++ memzero_explicit(ctx->private_key, params.key_size);
2286 ++ return -EINVAL;
2287 ++ }
2288 + return 0;
2289 + }
2290 +
2291 +diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
2292 +index f3039b93ff61a..101887528848d 100644
2293 +--- a/drivers/acpi/acpi_pnp.c
2294 ++++ b/drivers/acpi/acpi_pnp.c
2295 +@@ -317,6 +317,9 @@ static bool matching_id(const char *idstr, const char *list_id)
2296 + {
2297 + int i;
2298 +
2299 ++ if (strlen(idstr) != strlen(list_id))
2300 ++ return false;
2301 ++
2302 + if (memcmp(idstr, list_id, 3))
2303 + return false;
2304 +
2305 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
2306 +index 1a5956fb2cbce..72e6fad39a5e1 100644
2307 +--- a/drivers/acpi/device_pm.c
2308 ++++ b/drivers/acpi/device_pm.c
2309 +@@ -749,7 +749,7 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
2310 + static DEFINE_MUTEX(acpi_wakeup_lock);
2311 +
2312 + static int __acpi_device_wakeup_enable(struct acpi_device *adev,
2313 +- u32 target_state, int max_count)
2314 ++ u32 target_state)
2315 + {
2316 + struct acpi_device_wakeup *wakeup = &adev->wakeup;
2317 + acpi_status status;
2318 +@@ -757,9 +757,10 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
2319 +
2320 + mutex_lock(&acpi_wakeup_lock);
2321 +
2322 +- if (wakeup->enable_count >= max_count)
2323 ++ if (wakeup->enable_count >= INT_MAX) {
2324 ++ acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n");
2325 + goto out;
2326 +-
2327 ++ }
2328 + if (wakeup->enable_count > 0)
2329 + goto inc;
2330 +
2331 +@@ -799,7 +800,7 @@ out:
2332 + */
2333 + static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
2334 + {
2335 +- return __acpi_device_wakeup_enable(adev, target_state, 1);
2336 ++ return __acpi_device_wakeup_enable(adev, target_state);
2337 + }
2338 +
2339 + /**
2340 +@@ -829,8 +830,12 @@ out:
2341 + mutex_unlock(&acpi_wakeup_lock);
2342 + }
2343 +
2344 +-static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
2345 +- int max_count)
2346 ++/**
2347 ++ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
2348 ++ * @dev: Device to enable/disable to generate wakeup events.
2349 ++ * @enable: Whether to enable or disable the wakeup functionality.
2350 ++ */
2351 ++int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
2352 + {
2353 + struct acpi_device *adev;
2354 + int error;
2355 +@@ -850,36 +855,14 @@ static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
2356 + return 0;
2357 + }
2358 +
2359 +- error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
2360 +- max_count);
2361 ++ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state());
2362 + if (!error)
2363 + dev_dbg(dev, "Wakeup enabled by ACPI\n");
2364 +
2365 + return error;
2366 + }
2367 +-
2368 +-/**
2369 +- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
2370 +- * @dev: Device to enable/disable to generate wakeup events.
2371 +- * @enable: Whether to enable or disable the wakeup functionality.
2372 +- */
2373 +-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
2374 +-{
2375 +- return __acpi_pm_set_device_wakeup(dev, enable, 1);
2376 +-}
2377 + EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
2378 +
2379 +-/**
2380 +- * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
2381 +- * @dev: Bridge device to enable/disable to generate wakeup events.
2382 +- * @enable: Whether to enable or disable the wakeup functionality.
2383 +- */
2384 +-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
2385 +-{
2386 +- return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
2387 +-}
2388 +-EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
2389 +-
2390 + /**
2391 + * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
2392 + * @dev: Device to put into a low-power state.
2393 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
2394 +index 2a3e392751e0a..48ca9a844f06b 100644
2395 +--- a/drivers/acpi/resource.c
2396 ++++ b/drivers/acpi/resource.c
2397 +@@ -541,7 +541,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
2398 + ret = c->preproc(ares, c->preproc_data);
2399 + if (ret < 0) {
2400 + c->error = ret;
2401 +- return AE_CTRL_TERMINATE;
2402 ++ return AE_ABORT_METHOD;
2403 + } else if (ret > 0) {
2404 + return AE_OK;
2405 + }
2406 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
2407 +index b62b1ab6bb699..89b590c9573ff 100644
2408 +--- a/drivers/android/binder.c
2409 ++++ b/drivers/android/binder.c
2410 +@@ -3150,6 +3150,7 @@ static void binder_transaction(struct binder_proc *proc,
2411 + t->buffer->debug_id = t->debug_id;
2412 + t->buffer->transaction = t;
2413 + t->buffer->target_node = target_node;
2414 ++ t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2415 + trace_binder_transaction_alloc_buf(t->buffer);
2416 +
2417 + if (binder_alloc_copy_user_to_buffer(
2418 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
2419 +index 2048ba6c8b082..3526bb1488e5e 100644
2420 +--- a/drivers/android/binder_alloc.c
2421 ++++ b/drivers/android/binder_alloc.c
2422 +@@ -647,6 +647,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
2423 + binder_insert_free_buffer(alloc, buffer);
2424 + }
2425 +
2426 ++static void binder_alloc_clear_buf(struct binder_alloc *alloc,
2427 ++ struct binder_buffer *buffer);
2428 + /**
2429 + * binder_alloc_free_buf() - free a binder buffer
2430 + * @alloc: binder_alloc for this proc
2431 +@@ -657,6 +659,18 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
2432 + void binder_alloc_free_buf(struct binder_alloc *alloc,
2433 + struct binder_buffer *buffer)
2434 + {
2435 ++ /*
2436 ++ * We could eliminate the call to binder_alloc_clear_buf()
2437 ++ * from binder_alloc_deferred_release() by moving this to
2438 ++ * binder_alloc_free_buf_locked(). However, that could
2439 ++ * increase contention for the alloc mutex if clear_on_free
2440 ++ * is used frequently for large buffers. The mutex is not
2441 ++ * needed for correctness here.
2442 ++ */
2443 ++ if (buffer->clear_on_free) {
2444 ++ binder_alloc_clear_buf(alloc, buffer);
2445 ++ buffer->clear_on_free = false;
2446 ++ }
2447 + mutex_lock(&alloc->mutex);
2448 + binder_free_buf_locked(alloc, buffer);
2449 + mutex_unlock(&alloc->mutex);
2450 +@@ -753,6 +767,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
2451 + /* Transaction should already have been freed */
2452 + BUG_ON(buffer->transaction);
2453 +
2454 ++ if (buffer->clear_on_free) {
2455 ++ binder_alloc_clear_buf(alloc, buffer);
2456 ++ buffer->clear_on_free = false;
2457 ++ }
2458 + binder_free_buf_locked(alloc, buffer);
2459 + buffers++;
2460 + }
2461 +@@ -1086,6 +1104,36 @@ static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
2462 + return lru_page->page_ptr;
2463 + }
2464 +
2465 ++/**
2466 ++ * binder_alloc_clear_buf() - zero out buffer
2467 ++ * @alloc: binder_alloc for this proc
2468 ++ * @buffer: binder buffer to be cleared
2469 ++ *
2470 ++ * memset the given buffer to 0
2471 ++ */
2472 ++static void binder_alloc_clear_buf(struct binder_alloc *alloc,
2473 ++ struct binder_buffer *buffer)
2474 ++{
2475 ++ size_t bytes = binder_alloc_buffer_size(alloc, buffer);
2476 ++ binder_size_t buffer_offset = 0;
2477 ++
2478 ++ while (bytes) {
2479 ++ unsigned long size;
2480 ++ struct page *page;
2481 ++ pgoff_t pgoff;
2482 ++ void *kptr;
2483 ++
2484 ++ page = binder_alloc_get_page(alloc, buffer,
2485 ++ buffer_offset, &pgoff);
2486 ++ size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
2487 ++ kptr = kmap(page) + pgoff;
2488 ++ memset(kptr, 0, size);
2489 ++ kunmap(page);
2490 ++ bytes -= size;
2491 ++ buffer_offset += size;
2492 ++ }
2493 ++}
2494 ++
2495 + /**
2496 + * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
2497 + * @alloc: binder_alloc for this proc
2498 +diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
2499 +index db9c1b984695d..288d0f478aa38 100644
2500 +--- a/drivers/android/binder_alloc.h
2501 ++++ b/drivers/android/binder_alloc.h
2502 +@@ -23,6 +23,7 @@ struct binder_transaction;
2503 + * @entry: entry alloc->buffers
2504 + * @rb_node: node for allocated_buffers/free_buffers rb trees
2505 + * @free: %true if buffer is free
2506 ++ * @clear_on_free: %true if buffer must be zeroed after use
2507 + * @allow_user_free: %true if user is allowed to free buffer
2508 + * @async_transaction: %true if buffer is in use for an async txn
2509 + * @debug_id: unique ID for debugging
2510 +@@ -40,9 +41,10 @@ struct binder_buffer {
2511 + struct rb_node rb_node; /* free entry by size or allocated entry */
2512 + /* by address */
2513 + unsigned free:1;
2514 ++ unsigned clear_on_free:1;
2515 + unsigned allow_user_free:1;
2516 + unsigned async_transaction:1;
2517 +- unsigned debug_id:29;
2518 ++ unsigned debug_id:28;
2519 +
2520 + struct binder_transaction *transaction;
2521 +
2522 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
2523 +index 192ca58cc3c7f..040d7bb213978 100644
2524 +--- a/drivers/block/xen-blkback/xenbus.c
2525 ++++ b/drivers/block/xen-blkback/xenbus.c
2526 +@@ -256,6 +256,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
2527 +
2528 + if (ring->xenblkd) {
2529 + kthread_stop(ring->xenblkd);
2530 ++ ring->xenblkd = NULL;
2531 + wake_up(&ring->shutdown_wq);
2532 + }
2533 +
2534 +@@ -643,7 +644,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
2535 + /* setup back pointer */
2536 + be->blkif->be = be;
2537 +
2538 +- err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
2539 ++ err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
2540 ++ backend_changed,
2541 + "%s/%s", dev->nodename, "physical-device");
2542 + if (err)
2543 + goto fail;
2544 +diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
2545 +index b7de7cb8cca90..304178be1ef40 100644
2546 +--- a/drivers/bluetooth/btmtksdio.c
2547 ++++ b/drivers/bluetooth/btmtksdio.c
2548 +@@ -703,7 +703,7 @@ static int mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
2549 + err = mtk_hci_wmt_sync(hdev, &wmt_params);
2550 + if (err < 0) {
2551 + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
2552 +- return err;
2553 ++ goto free_fw;
2554 + }
2555 +
2556 + fw_ptr = fw->data;
2557 +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
2558 +index b326eeddaadf0..b92bd97b1c399 100644
2559 +--- a/drivers/bluetooth/btusb.c
2560 ++++ b/drivers/bluetooth/btusb.c
2561 +@@ -2812,7 +2812,7 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
2562 + err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params);
2563 + if (err < 0) {
2564 + bt_dev_err(hdev, "Failed to power on data RAM (%d)", err);
2565 +- return err;
2566 ++ goto err_release_fw;
2567 + }
2568 +
2569 + fw_ptr = fw->data;
2570 +diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
2571 +index 5df0651b6cd55..e11af747395dd 100644
2572 +--- a/drivers/bluetooth/hci_h5.c
2573 ++++ b/drivers/bluetooth/hci_h5.c
2574 +@@ -244,6 +244,9 @@ static int h5_close(struct hci_uart *hu)
2575 + skb_queue_purge(&h5->rel);
2576 + skb_queue_purge(&h5->unrel);
2577 +
2578 ++ kfree_skb(h5->rx_skb);
2579 ++ h5->rx_skb = NULL;
2580 ++
2581 + if (h5->vnd && h5->vnd->close)
2582 + h5->vnd->close(h5);
2583 +
2584 +diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
2585 +index cc7bb900f5249..95672306d3714 100644
2586 +--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
2587 ++++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
2588 +@@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
2589 + goto error;
2590 +
2591 + mc_adev = resource->data;
2592 +- if (!mc_adev)
2593 ++ if (!mc_adev) {
2594 ++ error = -EINVAL;
2595 + goto error;
2596 ++ }
2597 +
2598 + mc_adev->consumer_link = device_link_add(&mc_dev->dev,
2599 + &mc_adev->dev,
2600 +diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
2601 +index 1b14256376d24..7c1da45be166e 100644
2602 +--- a/drivers/bus/mips_cdmm.c
2603 ++++ b/drivers/bus/mips_cdmm.c
2604 +@@ -544,10 +544,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
2605 + dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
2606 + ++id;
2607 + ret = device_register(&dev->dev);
2608 +- if (ret) {
2609 ++ if (ret)
2610 + put_device(&dev->dev);
2611 +- kfree(dev);
2612 +- }
2613 + }
2614 + }
2615 +
2616 +diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
2617 +index 7338a3bc71eb1..e3f4c8f20223a 100644
2618 +--- a/drivers/clk/at91/sam9x60.c
2619 ++++ b/drivers/clk/at91/sam9x60.c
2620 +@@ -162,7 +162,6 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
2621 + struct regmap *regmap;
2622 + struct clk_hw *hw;
2623 + int i;
2624 +- bool bypass;
2625 +
2626 + i = of_property_match_string(np, "clock-names", "td_slck");
2627 + if (i < 0)
2628 +@@ -197,10 +196,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
2629 + if (IS_ERR(hw))
2630 + goto err_free;
2631 +
2632 +- bypass = of_property_read_bool(np, "atmel,osc-bypass");
2633 +-
2634 +- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
2635 +- bypass);
2636 ++ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, 0);
2637 + if (IS_ERR(hw))
2638 + goto err_free;
2639 +
2640 +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
2641 +index 2ce370c804aae..f19994ce7ca15 100644
2642 +--- a/drivers/clk/clk-s2mps11.c
2643 ++++ b/drivers/clk/clk-s2mps11.c
2644 +@@ -195,6 +195,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
2645 + return ret;
2646 +
2647 + err_reg:
2648 ++ of_node_put(s2mps11_clks[0].clk_np);
2649 + while (--i >= 0)
2650 + clkdev_drop(s2mps11_clks[i].lookup);
2651 +
2652 +diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
2653 +index 6e963031cd873..7490d4f4d9366 100644
2654 +--- a/drivers/clk/ingenic/cgu.c
2655 ++++ b/drivers/clk/ingenic/cgu.c
2656 +@@ -393,15 +393,21 @@ static unsigned int
2657 + ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
2658 + unsigned int div)
2659 + {
2660 +- unsigned int i;
2661 ++ unsigned int i, best_i = 0, best = (unsigned int)-1;
2662 +
2663 + for (i = 0; i < (1 << clk_info->div.bits)
2664 + && clk_info->div.div_table[i]; i++) {
2665 +- if (clk_info->div.div_table[i] >= div)
2666 +- return i;
2667 ++ if (clk_info->div.div_table[i] >= div &&
2668 ++ clk_info->div.div_table[i] < best) {
2669 ++ best = clk_info->div.div_table[i];
2670 ++ best_i = i;
2671 ++
2672 ++ if (div == best)
2673 ++ break;
2674 ++ }
2675 + }
2676 +
2677 +- return i - 1;
2678 ++ return best_i;
2679 + }
2680 +
2681 + static unsigned
2682 +diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
2683 +index dabeb435d0678..3f8dcdcdde499 100644
2684 +--- a/drivers/clk/meson/Kconfig
2685 ++++ b/drivers/clk/meson/Kconfig
2686 +@@ -103,6 +103,7 @@ config COMMON_CLK_G12A
2687 + select COMMON_CLK_MESON_AO_CLKC
2688 + select COMMON_CLK_MESON_EE_CLKC
2689 + select COMMON_CLK_MESON_CPU_DYNDIV
2690 ++ select COMMON_CLK_MESON_VID_PLL_DIV
2691 + select MFD_SYSCON
2692 + help
2693 + Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
2694 +diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
2695 +index e9e306d4e9af9..41271351cf1f4 100644
2696 +--- a/drivers/clk/mvebu/armada-37xx-xtal.c
2697 ++++ b/drivers/clk/mvebu/armada-37xx-xtal.c
2698 +@@ -13,8 +13,8 @@
2699 + #include <linux/platform_device.h>
2700 + #include <linux/regmap.h>
2701 +
2702 +-#define NB_GPIO1_LATCH 0xC
2703 +-#define XTAL_MODE BIT(31)
2704 ++#define NB_GPIO1_LATCH 0x8
2705 ++#define XTAL_MODE BIT(9)
2706 +
2707 + static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
2708 + {
2709 +diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
2710 +index 1907ee195a08c..f2dc625b745da 100644
2711 +--- a/drivers/clk/renesas/r9a06g032-clocks.c
2712 ++++ b/drivers/clk/renesas/r9a06g032-clocks.c
2713 +@@ -55,7 +55,7 @@ struct r9a06g032_clkdesc {
2714 + u16 sel, g1, r1, g2, r2;
2715 + } dual;
2716 + };
2717 +-} __packed;
2718 ++};
2719 +
2720 + #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
2721 + { .gate = _clk, .reset = _rst, \
2722 +diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2723 +index 5f66bf8797723..149cfde817cba 100644
2724 +--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2725 ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2726 +@@ -389,6 +389,7 @@ static struct clk_div_table ths_div_table[] = {
2727 + { .val = 1, .div = 2 },
2728 + { .val = 2, .div = 4 },
2729 + { .val = 3, .div = 6 },
2730 ++ { /* Sentinel */ },
2731 + };
2732 + static const char * const ths_parents[] = { "osc24M" };
2733 + static struct ccu_div ths_clk = {
2734 +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
2735 +index 6b636362379ee..7e629a4493afd 100644
2736 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
2737 ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
2738 +@@ -322,6 +322,7 @@ static struct clk_div_table ths_div_table[] = {
2739 + { .val = 1, .div = 2 },
2740 + { .val = 2, .div = 4 },
2741 + { .val = 3, .div = 6 },
2742 ++ { /* Sentinel */ },
2743 + };
2744 + static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M",
2745 + 0x074, 0, 2, ths_div_table, BIT(31), 0);
2746 +diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
2747 +index f8688c2ddf1ae..fdb46c5efc26c 100644
2748 +--- a/drivers/clk/tegra/clk-dfll.c
2749 ++++ b/drivers/clk/tegra/clk-dfll.c
2750 +@@ -1801,13 +1801,13 @@ static int dfll_fetch_pwm_params(struct tegra_dfll *td)
2751 + &td->reg_init_uV);
2752 + if (!ret) {
2753 + dev_err(td->dev, "couldn't get initialized voltage\n");
2754 +- return ret;
2755 ++ return -EINVAL;
2756 + }
2757 +
2758 + ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
2759 + if (!ret) {
2760 + dev_err(td->dev, "couldn't get PWM period\n");
2761 +- return ret;
2762 ++ return -EINVAL;
2763 + }
2764 + td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
2765 +
2766 +diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
2767 +index de466b4446da9..0efcb200dde5a 100644
2768 +--- a/drivers/clk/tegra/clk-id.h
2769 ++++ b/drivers/clk/tegra/clk-id.h
2770 +@@ -233,6 +233,7 @@ enum clk_id {
2771 + tegra_clk_sdmmc4,
2772 + tegra_clk_sdmmc4_8,
2773 + tegra_clk_se,
2774 ++ tegra_clk_se_10,
2775 + tegra_clk_soc_therm,
2776 + tegra_clk_soc_therm_8,
2777 + tegra_clk_sor0,
2778 +diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
2779 +index 49b9f2f85bad6..4dc11e1e61ba8 100644
2780 +--- a/drivers/clk/tegra/clk-tegra-periph.c
2781 ++++ b/drivers/clk/tegra/clk-tegra-periph.c
2782 +@@ -636,7 +636,7 @@ static struct tegra_periph_init_data periph_clks[] = {
2783 + INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
2784 + INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9),
2785 + INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
2786 +- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
2787 ++ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10),
2788 + INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
2789 + INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
2790 + INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
2791 +diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
2792 +index 95e36ba64accf..8024c6d2b9e95 100644
2793 +--- a/drivers/clk/ti/fapll.c
2794 ++++ b/drivers/clk/ti/fapll.c
2795 +@@ -498,6 +498,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
2796 + {
2797 + struct clk_init_data *init;
2798 + struct fapll_synth *synth;
2799 ++ struct clk *clk = ERR_PTR(-ENOMEM);
2800 +
2801 + init = kzalloc(sizeof(*init), GFP_KERNEL);
2802 + if (!init)
2803 +@@ -520,13 +521,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
2804 + synth->hw.init = init;
2805 + synth->clk_pll = pll_clk;
2806 +
2807 +- return clk_register(NULL, &synth->hw);
2808 ++ clk = clk_register(NULL, &synth->hw);
2809 ++ if (IS_ERR(clk)) {
2810 ++ pr_err("failed to register clock\n");
2811 ++ goto free;
2812 ++ }
2813 ++
2814 ++ return clk;
2815 +
2816 + free:
2817 + kfree(synth);
2818 + kfree(init);
2819 +
2820 +- return ERR_PTR(-ENOMEM);
2821 ++ return clk;
2822 + }
2823 +
2824 + static void __init ti_fapll_setup(struct device_node *node)
2825 +diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
2826 +index 4be83b4de2a0a..39cdda2c9a98b 100644
2827 +--- a/drivers/clocksource/arm_arch_timer.c
2828 ++++ b/drivers/clocksource/arm_arch_timer.c
2829 +@@ -392,10 +392,10 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
2830 + ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
2831 +
2832 + if (access == ARCH_TIMER_PHYS_ACCESS) {
2833 +- cval = evt + arch_counter_get_cntpct();
2834 ++ cval = evt + arch_counter_get_cntpct_stable();
2835 + write_sysreg(cval, cntp_cval_el0);
2836 + } else {
2837 +- cval = evt + arch_counter_get_cntvct();
2838 ++ cval = evt + arch_counter_get_cntvct_stable();
2839 + write_sysreg(cval, cntv_cval_el0);
2840 + }
2841 +
2842 +@@ -818,15 +818,24 @@ static void arch_timer_evtstrm_enable(int divider)
2843 +
2844 + static void arch_timer_configure_evtstream(void)
2845 + {
2846 +- int evt_stream_div, pos;
2847 ++ int evt_stream_div, lsb;
2848 ++
2849 ++ /*
2850 ++ * As the event stream can at most be generated at half the frequency
2851 ++ * of the counter, use half the frequency when computing the divider.
2852 ++ */
2853 ++ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
2854 ++
2855 ++ /*
2856 ++ * Find the closest power of two to the divisor. If the adjacent bit
2857 ++ * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
2858 ++ */
2859 ++ lsb = fls(evt_stream_div) - 1;
2860 ++ if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
2861 ++ lsb++;
2862 +
2863 +- /* Find the closest power of two to the divisor */
2864 +- evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
2865 +- pos = fls(evt_stream_div);
2866 +- if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
2867 +- pos--;
2868 + /* enable event stream */
2869 +- arch_timer_evtstrm_enable(min(pos, 15));
2870 ++ arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
2871 + }
2872 +
2873 + static void arch_counter_set_user_access(void)
2874 +diff --git a/drivers/clocksource/timer-cadence-ttc.c b/drivers/clocksource/timer-cadence-ttc.c
2875 +index 88fe2e9ba9a35..160bc6597de5b 100644
2876 +--- a/drivers/clocksource/timer-cadence-ttc.c
2877 ++++ b/drivers/clocksource/timer-cadence-ttc.c
2878 +@@ -411,10 +411,8 @@ static int __init ttc_setup_clockevent(struct clk *clk,
2879 + ttcce->ttc.clk = clk;
2880 +
2881 + err = clk_prepare_enable(ttcce->ttc.clk);
2882 +- if (err) {
2883 +- kfree(ttcce);
2884 +- return err;
2885 +- }
2886 ++ if (err)
2887 ++ goto out_kfree;
2888 +
2889 + ttcce->ttc.clk_rate_change_nb.notifier_call =
2890 + ttc_rate_change_clockevent_cb;
2891 +@@ -424,7 +422,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
2892 + &ttcce->ttc.clk_rate_change_nb);
2893 + if (err) {
2894 + pr_warn("Unable to register clock notifier.\n");
2895 +- return err;
2896 ++ goto out_kfree;
2897 + }
2898 +
2899 + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
2900 +@@ -453,15 +451,17 @@ static int __init ttc_setup_clockevent(struct clk *clk,
2901 +
2902 + err = request_irq(irq, ttc_clock_event_interrupt,
2903 + IRQF_TIMER, ttcce->ce.name, ttcce);
2904 +- if (err) {
2905 +- kfree(ttcce);
2906 +- return err;
2907 +- }
2908 ++ if (err)
2909 ++ goto out_kfree;
2910 +
2911 + clockevents_config_and_register(&ttcce->ce,
2912 + ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
2913 +
2914 + return 0;
2915 ++
2916 ++out_kfree:
2917 ++ kfree(ttcce);
2918 ++ return err;
2919 + }
2920 +
2921 + /**
2922 +diff --git a/drivers/clocksource/timer-orion.c b/drivers/clocksource/timer-orion.c
2923 +index 7d487107e3cd8..32b2563e2ad1b 100644
2924 +--- a/drivers/clocksource/timer-orion.c
2925 ++++ b/drivers/clocksource/timer-orion.c
2926 +@@ -149,7 +149,8 @@ static int __init orion_timer_init(struct device_node *np)
2927 + irq = irq_of_parse_and_map(np, 1);
2928 + if (irq <= 0) {
2929 + pr_err("%pOFn: unable to parse timer1 irq\n", np);
2930 +- return -EINVAL;
2931 ++ ret = -EINVAL;
2932 ++ goto out_unprep_clk;
2933 + }
2934 +
2935 + rate = clk_get_rate(clk);
2936 +@@ -166,7 +167,7 @@ static int __init orion_timer_init(struct device_node *np)
2937 + clocksource_mmio_readl_down);
2938 + if (ret) {
2939 + pr_err("Failed to initialize mmio timer\n");
2940 +- return ret;
2941 ++ goto out_unprep_clk;
2942 + }
2943 +
2944 + sched_clock_register(orion_read_sched_clock, 32, rate);
2945 +@@ -175,7 +176,7 @@ static int __init orion_timer_init(struct device_node *np)
2946 + ret = setup_irq(irq, &orion_clkevt_irq);
2947 + if (ret) {
2948 + pr_err("%pOFn: unable to setup irq\n", np);
2949 +- return ret;
2950 ++ goto out_unprep_clk;
2951 + }
2952 +
2953 + ticks_per_jiffy = (clk_get_rate(clk) + HZ/2) / HZ;
2954 +@@ -188,5 +189,9 @@ static int __init orion_timer_init(struct device_node *np)
2955 + orion_delay_timer_init(rate);
2956 +
2957 + return 0;
2958 ++
2959 ++out_unprep_clk:
2960 ++ clk_disable_unprepare(clk);
2961 ++ return ret;
2962 + }
2963 + TIMER_OF_DECLARE(orion_timer, "marvell,orion-timer", orion_timer_init);
2964 +diff --git a/drivers/cpufreq/armada-8k-cpufreq.c b/drivers/cpufreq/armada-8k-cpufreq.c
2965 +index 39e34f5066d3d..b0fc5e84f8570 100644
2966 +--- a/drivers/cpufreq/armada-8k-cpufreq.c
2967 ++++ b/drivers/cpufreq/armada-8k-cpufreq.c
2968 +@@ -204,6 +204,12 @@ static void __exit armada_8k_cpufreq_exit(void)
2969 + }
2970 + module_exit(armada_8k_cpufreq_exit);
2971 +
2972 ++static const struct of_device_id __maybe_unused armada_8k_cpufreq_of_match[] = {
2973 ++ { .compatible = "marvell,ap806-cpu-clock" },
2974 ++ { },
2975 ++};
2976 ++MODULE_DEVICE_TABLE(of, armada_8k_cpufreq_of_match);
2977 ++
2978 + MODULE_AUTHOR("Gregory Clement <gregory.clement@×××××××.com>");
2979 + MODULE_DESCRIPTION("Armada 8K cpufreq driver");
2980 + MODULE_LICENSE("GPL");
2981 +diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
2982 +index 5a7f6dafcddb6..ac57cddc5f2fe 100644
2983 +--- a/drivers/cpufreq/highbank-cpufreq.c
2984 ++++ b/drivers/cpufreq/highbank-cpufreq.c
2985 +@@ -101,6 +101,13 @@ out_put_node:
2986 + }
2987 + module_init(hb_cpufreq_driver_init);
2988 +
2989 ++static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = {
2990 ++ { .compatible = "calxeda,highbank" },
2991 ++ { .compatible = "calxeda,ecx-2000" },
2992 ++ { },
2993 ++};
2994 ++MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match);
2995 ++
2996 + MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@×××××××.com>");
2997 + MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
2998 + MODULE_LICENSE("GPL");
2999 +diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
3000 +index 0ea88778882ac..86f612593e497 100644
3001 +--- a/drivers/cpufreq/loongson1-cpufreq.c
3002 ++++ b/drivers/cpufreq/loongson1-cpufreq.c
3003 +@@ -216,6 +216,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = {
3004 +
3005 + module_platform_driver(ls1x_cpufreq_platdrv);
3006 +
3007 ++MODULE_ALIAS("platform:ls1x-cpufreq");
3008 + MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@×××××.com>");
3009 + MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
3010 + MODULE_LICENSE("GPL");
3011 +diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
3012 +index 0c98dd08273d0..927ebc582a385 100644
3013 +--- a/drivers/cpufreq/mediatek-cpufreq.c
3014 ++++ b/drivers/cpufreq/mediatek-cpufreq.c
3015 +@@ -540,6 +540,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
3016 +
3017 + { }
3018 + };
3019 ++MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
3020 +
3021 + static int __init mtk_cpufreq_driver_init(void)
3022 + {
3023 +diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c
3024 +index f0d2d5035413b..1e77d190f19f9 100644
3025 +--- a/drivers/cpufreq/qcom-cpufreq-nvmem.c
3026 ++++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c
3027 +@@ -305,6 +305,7 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
3028 + { .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
3029 + {},
3030 + };
3031 ++MODULE_DEVICE_TABLE(of, qcom_cpufreq_match_list);
3032 +
3033 + /*
3034 + * Since the driver depends on smem and nvmem drivers, which may
3035 +diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
3036 +index 2b51e0718c9f6..b341ffbf56bc3 100644
3037 +--- a/drivers/cpufreq/scpi-cpufreq.c
3038 ++++ b/drivers/cpufreq/scpi-cpufreq.c
3039 +@@ -239,6 +239,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
3040 + };
3041 + module_platform_driver(scpi_cpufreq_platdrv);
3042 +
3043 ++MODULE_ALIAS("platform:scpi-cpufreq");
3044 + MODULE_AUTHOR("Sudeep Holla <sudeep.holla@×××.com>");
3045 + MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver");
3046 + MODULE_LICENSE("GPL v2");
3047 +diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
3048 +index 2855b7878a204..7ade4070ca827 100644
3049 +--- a/drivers/cpufreq/sti-cpufreq.c
3050 ++++ b/drivers/cpufreq/sti-cpufreq.c
3051 +@@ -292,6 +292,13 @@ register_cpufreq_dt:
3052 + }
3053 + module_init(sti_cpufreq_init);
3054 +
3055 ++static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
3056 ++ { .compatible = "st,stih407" },
3057 ++ { .compatible = "st,stih410" },
3058 ++ { },
3059 ++};
3060 ++MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
3061 ++
3062 + MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver");
3063 + MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@××.com>");
3064 + MODULE_AUTHOR("Lee Jones <lee.jones@××××××.org>");
3065 +diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
3066 +index 9907a165135b7..2deed8d8773fa 100644
3067 +--- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c
3068 ++++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c
3069 +@@ -167,6 +167,7 @@ static const struct of_device_id sun50i_cpufreq_match_list[] = {
3070 + { .compatible = "allwinner,sun50i-h6" },
3071 + {}
3072 + };
3073 ++MODULE_DEVICE_TABLE(of, sun50i_cpufreq_match_list);
3074 +
3075 + static const struct of_device_id *sun50i_cpufreq_match_node(void)
3076 + {
3077 +diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
3078 +index 0952f059d967c..1f6308cdf79a2 100644
3079 +--- a/drivers/crypto/Kconfig
3080 ++++ b/drivers/crypto/Kconfig
3081 +@@ -544,6 +544,7 @@ config CRYPTO_DEV_ATMEL_SHA
3082 +
3083 + config CRYPTO_DEV_ATMEL_I2C
3084 + tristate
3085 ++ select BITREVERSE
3086 +
3087 + config CRYPTO_DEV_ATMEL_ECC
3088 + tristate "Support for Microchip / Atmel ECC hw accelerator"
3089 +diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
3090 +index 7d6b695c4ab3f..230e8902c727c 100644
3091 +--- a/drivers/crypto/amcc/crypto4xx_core.c
3092 ++++ b/drivers/crypto/amcc/crypto4xx_core.c
3093 +@@ -916,7 +916,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
3094 + }
3095 +
3096 + pd->pd_ctl.w = PD_CTL_HOST_READY |
3097 +- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
3098 ++ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
3099 + (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
3100 + PD_CTL_HASH_FINAL : 0);
3101 + pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
3102 +diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
3103 +index 991a4425f006a..4d9d97c59ee36 100644
3104 +--- a/drivers/crypto/inside-secure/safexcel.c
3105 ++++ b/drivers/crypto/inside-secure/safexcel.c
3106 +@@ -1467,7 +1467,7 @@ static int safexcel_probe_generic(void *pdev,
3107 +
3108 + priv->ring[i].rdr_req = devm_kcalloc(dev,
3109 + EIP197_DEFAULT_RING_SIZE,
3110 +- sizeof(priv->ring[i].rdr_req),
3111 ++ sizeof(*priv->ring[i].rdr_req),
3112 + GFP_KERNEL);
3113 + if (!priv->ring[i].rdr_req)
3114 + return -ENOMEM;
3115 +diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
3116 +index 2f53fbb741001..103e704c14697 100644
3117 +--- a/drivers/crypto/omap-aes.c
3118 ++++ b/drivers/crypto/omap-aes.c
3119 +@@ -1157,7 +1157,7 @@ static int omap_aes_probe(struct platform_device *pdev)
3120 + if (err < 0) {
3121 + dev_err(dev, "%s: failed to get_sync(%d)\n",
3122 + __func__, err);
3123 +- goto err_res;
3124 ++ goto err_pm_disable;
3125 + }
3126 +
3127 + omap_aes_dma_stop(dd);
3128 +@@ -1267,6 +1267,7 @@ err_engine:
3129 + omap_aes_dma_cleanup(dd);
3130 + err_irq:
3131 + tasklet_kill(&dd->done_task);
3132 ++err_pm_disable:
3133 + pm_runtime_disable(dev);
3134 + err_res:
3135 + dd = NULL;
3136 +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
3137 +index ff149e176f649..dac130bb807ae 100644
3138 +--- a/drivers/crypto/qat/qat_common/qat_hal.c
3139 ++++ b/drivers/crypto/qat/qat_common/qat_hal.c
3140 +@@ -1189,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
3141 + unsigned short mask;
3142 + unsigned short dr_offset = 0x10;
3143 +
3144 +- status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
3145 ++ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
3146 + if (CE_INUSE_CONTEXTS & ctx_enables) {
3147 + if (ctx & 0x1) {
3148 + pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
3149 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
3150 +index 56e3068c9947a..b7c66fc0ae0c2 100644
3151 +--- a/drivers/crypto/talitos.c
3152 ++++ b/drivers/crypto/talitos.c
3153 +@@ -460,7 +460,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
3154 + /*
3155 + * locate current (offending) descriptor
3156 + */
3157 +-static u32 current_desc_hdr(struct device *dev, int ch)
3158 ++static __be32 current_desc_hdr(struct device *dev, int ch)
3159 + {
3160 + struct talitos_private *priv = dev_get_drvdata(dev);
3161 + int tail, iter;
3162 +@@ -478,7 +478,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
3163 +
3164 + iter = tail;
3165 + while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
3166 +- priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
3167 ++ priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
3168 + iter = (iter + 1) & (priv->fifo_len - 1);
3169 + if (iter == tail) {
3170 + dev_err(dev, "couldn't locate current descriptor\n");
3171 +@@ -486,7 +486,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
3172 + }
3173 + }
3174 +
3175 +- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
3176 ++ if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
3177 + struct talitos_edesc *edesc;
3178 +
3179 + edesc = container_of(priv->chan[ch].fifo[iter].desc,
3180 +@@ -501,13 +501,13 @@ static u32 current_desc_hdr(struct device *dev, int ch)
3181 + /*
3182 + * user diagnostics; report root cause of error based on execution unit status
3183 + */
3184 +-static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
3185 ++static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
3186 + {
3187 + struct talitos_private *priv = dev_get_drvdata(dev);
3188 + int i;
3189 +
3190 + if (!desc_hdr)
3191 +- desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
3192 ++ desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
3193 +
3194 + switch (desc_hdr & DESC_HDR_SEL0_MASK) {
3195 + case DESC_HDR_SEL0_AFEU:
3196 +diff --git a/drivers/dax/super.c b/drivers/dax/super.c
3197 +index b936852881871..8074e5de815b9 100644
3198 +--- a/drivers/dax/super.c
3199 ++++ b/drivers/dax/super.c
3200 +@@ -720,6 +720,7 @@ err_chrdev:
3201 +
3202 + static void __exit dax_core_exit(void)
3203 + {
3204 ++ dax_bus_exit();
3205 + unregister_chrdev_region(dax_devt, MINORMASK+1);
3206 + ida_destroy(&dax_minor_ida);
3207 + dax_fs_exit();
3208 +diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
3209 +index 709002515550c..242a9ec295cf8 100644
3210 +--- a/drivers/dma-buf/dma-resv.c
3211 ++++ b/drivers/dma-buf/dma-resv.c
3212 +@@ -161,7 +161,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
3213 + max = max(old->shared_count + num_fences,
3214 + old->shared_max * 2);
3215 + } else {
3216 +- max = 4;
3217 ++ max = max(4ul, roundup_pow_of_two(num_fences));
3218 + }
3219 +
3220 + new = dma_resv_list_alloc(max);
3221 +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
3222 +index e3850f04f6763..889a94af4c851 100644
3223 +--- a/drivers/dma/mv_xor_v2.c
3224 ++++ b/drivers/dma/mv_xor_v2.c
3225 +@@ -766,8 +766,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
3226 + goto disable_clk;
3227 +
3228 + msi_desc = first_msi_entry(&pdev->dev);
3229 +- if (!msi_desc)
3230 ++ if (!msi_desc) {
3231 ++ ret = -ENODEV;
3232 + goto free_msi_irqs;
3233 ++ }
3234 + xor_dev->msi_desc = msi_desc;
3235 +
3236 + ret = devm_request_irq(&pdev->dev, msi_desc->irq,
3237 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
3238 +index 125a44d5a69e3..aed0f26c9af5d 100644
3239 +--- a/drivers/edac/amd64_edac.c
3240 ++++ b/drivers/edac/amd64_edac.c
3241 +@@ -22,6 +22,9 @@ static struct ecc_settings **ecc_stngs;
3242 + /* Number of Unified Memory Controllers */
3243 + static u8 num_umcs;
3244 +
3245 ++/* Device for the PCI component */
3246 ++static struct device *pci_ctl_dev;
3247 ++
3248 + /*
3249 + * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
3250 + * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
3251 +@@ -2672,6 +2675,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3252 + return -ENODEV;
3253 + }
3254 +
3255 ++ if (!pci_ctl_dev)
3256 ++ pci_ctl_dev = &pvt->F0->dev;
3257 ++
3258 + edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
3259 + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3260 + edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
3261 +@@ -2696,6 +2702,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
3262 + return -ENODEV;
3263 + }
3264 +
3265 ++ if (!pci_ctl_dev)
3266 ++ pci_ctl_dev = &pvt->F2->dev;
3267 ++
3268 + edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
3269 + edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
3270 + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
3271 +@@ -3626,21 +3635,10 @@ static void remove_one_instance(unsigned int nid)
3272 +
3273 + static void setup_pci_device(void)
3274 + {
3275 +- struct mem_ctl_info *mci;
3276 +- struct amd64_pvt *pvt;
3277 +-
3278 + if (pci_ctl)
3279 + return;
3280 +
3281 +- mci = edac_mc_find(0);
3282 +- if (!mci)
3283 +- return;
3284 +-
3285 +- pvt = mci->pvt_info;
3286 +- if (pvt->umc)
3287 +- pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
3288 +- else
3289 +- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
3290 ++ pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
3291 + if (!pci_ctl) {
3292 + pr_warn("%s(): Unable to create PCI control\n", __func__);
3293 + pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
3294 +@@ -3723,6 +3721,8 @@ static int __init amd64_edac_init(void)
3295 + return 0;
3296 +
3297 + err_pci:
3298 ++ pci_ctl_dev = NULL;
3299 ++
3300 + msrs_free(msrs);
3301 + msrs = NULL;
3302 +
3303 +@@ -3754,6 +3754,8 @@ static void __exit amd64_edac_exit(void)
3304 + kfree(ecc_stngs);
3305 + ecc_stngs = NULL;
3306 +
3307 ++ pci_ctl_dev = NULL;
3308 ++
3309 + msrs_free(msrs);
3310 + msrs = NULL;
3311 + }
3312 +diff --git a/drivers/edac/i10nm_base.c b/drivers/edac/i10nm_base.c
3313 +index c0c5b6ecdb2e4..dfcde7ed95006 100644
3314 +--- a/drivers/edac/i10nm_base.c
3315 ++++ b/drivers/edac/i10nm_base.c
3316 +@@ -6,6 +6,7 @@
3317 + */
3318 +
3319 + #include <linux/kernel.h>
3320 ++#include <linux/io.h>
3321 + #include <asm/cpu_device_id.h>
3322 + #include <asm/intel-family.h>
3323 + #include <asm/mce.h>
3324 +@@ -19,14 +20,16 @@
3325 + #define i10nm_printk(level, fmt, arg...) \
3326 + edac_printk(level, "i10nm", fmt, ##arg)
3327 +
3328 +-#define I10NM_GET_SCK_BAR(d, reg) \
3329 ++#define I10NM_GET_SCK_BAR(d, reg) \
3330 + pci_read_config_dword((d)->uracu, 0xd0, &(reg))
3331 + #define I10NM_GET_IMC_BAR(d, i, reg) \
3332 + pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
3333 + #define I10NM_GET_DIMMMTR(m, i, j) \
3334 +- (*(u32 *)((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4))
3335 ++ readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)
3336 + #define I10NM_GET_MCDDRTCFG(m, i, j) \
3337 +- (*(u32 *)((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4))
3338 ++ readl((m)->mbase + 0x20970 + (i) * 0x4000 + (j) * 4)
3339 ++#define I10NM_GET_MCMTR(m, i) \
3340 ++ readl((m)->mbase + 0x20ef8 + (i) * 0x4000)
3341 +
3342 + #define I10NM_GET_SCK_MMIO_BASE(reg) (GET_BITFIELD(reg, 0, 28) << 23)
3343 + #define I10NM_GET_IMC_MMIO_OFFSET(reg) (GET_BITFIELD(reg, 0, 10) << 12)
3344 +@@ -134,7 +137,7 @@ static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
3345 + {
3346 + u32 mcmtr;
3347 +
3348 +- mcmtr = *(u32 *)(imc->mbase + 0x20ef8 + chan * 0x4000);
3349 ++ mcmtr = I10NM_GET_MCMTR(imc, chan);
3350 + edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
3351 +
3352 + return !!GET_BITFIELD(mcmtr, 2, 2);
3353 +diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
3354 +index ea622c6f3a393..c19640a453f22 100644
3355 +--- a/drivers/edac/mce_amd.c
3356 ++++ b/drivers/edac/mce_amd.c
3357 +@@ -975,7 +975,7 @@ static void decode_smca_error(struct mce *m)
3358 + }
3359 +
3360 + if (bank_type == SMCA_UMC && xec == 0 && decode_dram_ecc)
3361 +- decode_dram_ecc(cpu_to_node(m->extcpu), m);
3362 ++ decode_dram_ecc(topology_die_id(m->extcpu), m);
3363 + }
3364 +
3365 + static inline void amd_decode_err_code(u16 ec)
3366 +diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
3367 +index 32fc5a66ffa98..26c7041f70698 100644
3368 +--- a/drivers/extcon/extcon-max77693.c
3369 ++++ b/drivers/extcon/extcon-max77693.c
3370 +@@ -1277,4 +1277,4 @@ module_platform_driver(max77693_muic_driver);
3371 + MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
3372 + MODULE_AUTHOR("Chanwoo Choi <cw00.choi@×××××××.com>");
3373 + MODULE_LICENSE("GPL");
3374 +-MODULE_ALIAS("platform:extcon-max77693");
3375 ++MODULE_ALIAS("platform:max77693-muic");
3376 +diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
3377 +index bb287f35cf408..a69b3faf51ef0 100644
3378 +--- a/drivers/gpio/gpio-eic-sprd.c
3379 ++++ b/drivers/gpio/gpio-eic-sprd.c
3380 +@@ -569,6 +569,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
3381 + const struct sprd_eic_variant_data *pdata;
3382 + struct gpio_irq_chip *irq;
3383 + struct sprd_eic *sprd_eic;
3384 ++ struct resource *res;
3385 + int ret, i;
3386 +
3387 + pdata = of_device_get_match_data(&pdev->dev);
3388 +@@ -595,9 +596,13 @@ static int sprd_eic_probe(struct platform_device *pdev)
3389 + * have one bank EIC, thus base[1] and base[2] can be
3390 + * optional.
3391 + */
3392 +- sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i);
3393 ++ res = platform_get_resource(pdev, IORESOURCE_MEM, i);
3394 ++ if (!res)
3395 ++ break;
3396 ++
3397 ++ sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
3398 + if (IS_ERR(sprd_eic->base[i]))
3399 +- continue;
3400 ++ return PTR_ERR(sprd_eic->base[i]);
3401 + }
3402 +
3403 + sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type];
3404 +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
3405 +index 6c06876943412..3985d6e1c17dc 100644
3406 +--- a/drivers/gpio/gpio-mvebu.c
3407 ++++ b/drivers/gpio/gpio-mvebu.c
3408 +@@ -1196,6 +1196,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
3409 +
3410 + devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
3411 +
3412 ++ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
3413 ++ if (IS_ENABLED(CONFIG_PWM)) {
3414 ++ err = mvebu_pwm_probe(pdev, mvchip, id);
3415 ++ if (err)
3416 ++ return err;
3417 ++ }
3418 ++
3419 + /* Some gpio controllers do not provide irq support */
3420 + if (!have_irqs)
3421 + return 0;
3422 +@@ -1205,7 +1212,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
3423 + if (!mvchip->domain) {
3424 + dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
3425 + mvchip->chip.label);
3426 +- return -ENODEV;
3427 ++ err = -ENODEV;
3428 ++ goto err_pwm;
3429 + }
3430 +
3431 + err = irq_alloc_domain_generic_chips(
3432 +@@ -1253,14 +1261,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
3433 + mvchip);
3434 + }
3435 +
3436 +- /* Some MVEBU SoCs have simple PWM support for GPIO lines */
3437 +- if (IS_ENABLED(CONFIG_PWM))
3438 +- return mvebu_pwm_probe(pdev, mvchip, id);
3439 +-
3440 + return 0;
3441 +
3442 + err_domain:
3443 + irq_domain_remove(mvchip->domain);
3444 ++err_pwm:
3445 ++ pwmchip_remove(&mvchip->mvpwm->chip);
3446 +
3447 + return err;
3448 + }
3449 +diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
3450 +index 7835aad6d1628..88b04d8a7fa7d 100644
3451 +--- a/drivers/gpio/gpio-zynq.c
3452 ++++ b/drivers/gpio/gpio-zynq.c
3453 +@@ -556,7 +556,7 @@ static int zynq_gpio_irq_reqres(struct irq_data *d)
3454 + struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
3455 + int ret;
3456 +
3457 +- ret = pm_runtime_get_sync(chip->parent);
3458 ++ ret = pm_runtime_resume_and_get(chip->parent);
3459 + if (ret < 0)
3460 + return ret;
3461 +
3462 +@@ -884,7 +884,7 @@ static int zynq_gpio_probe(struct platform_device *pdev)
3463 +
3464 + pm_runtime_set_active(&pdev->dev);
3465 + pm_runtime_enable(&pdev->dev);
3466 +- ret = pm_runtime_get_sync(&pdev->dev);
3467 ++ ret = pm_runtime_resume_and_get(&pdev->dev);
3468 + if (ret < 0)
3469 + goto err_pm_dis;
3470 +
3471 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3472 +index 1d3cd5c50d5f2..4a0ef9268918c 100644
3473 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3474 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
3475 +@@ -1664,6 +1664,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
3476 + }
3477 +
3478 + mutex_unlock(&p->mutex);
3479 ++ dma_buf_put(dmabuf);
3480 +
3481 + args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
3482 +
3483 +@@ -1673,6 +1674,7 @@ err_free:
3484 + amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
3485 + err_unlock:
3486 + mutex_unlock(&p->mutex);
3487 ++ dma_buf_put(dmabuf);
3488 + return r;
3489 + }
3490 +
3491 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3492 +index d2dd387c95d86..09410971615c4 100644
3493 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3494 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
3495 +@@ -1434,7 +1434,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
3496 +
3497 + drm_connector_update_edid_property(connector,
3498 + aconnector->edid);
3499 +- drm_add_edid_modes(connector, aconnector->edid);
3500 ++ aconnector->num_modes = drm_add_edid_modes(connector, aconnector->edid);
3501 ++ drm_connector_list_update(connector);
3502 +
3503 + if (aconnector->dc_link->aux_mode)
3504 + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3505 +diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
3506 +index dd92f9c295b45..9f301f8575a54 100644
3507 +--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
3508 ++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
3509 +@@ -97,8 +97,17 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
3510 + new_clocks->dppclk_khz = 100000;
3511 + }
3512 +
3513 +- if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
3514 +- if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
3515 ++ /*
3516 ++ * Temporally ignore thew 0 cases for disp and dpp clks.
3517 ++ * We may have a new feature that requires 0 clks in the future.
3518 ++ */
3519 ++ if (new_clocks->dppclk_khz == 0 || new_clocks->dispclk_khz == 0) {
3520 ++ new_clocks->dppclk_khz = clk_mgr_base->clks.dppclk_khz;
3521 ++ new_clocks->dispclk_khz = clk_mgr_base->clks.dispclk_khz;
3522 ++ }
3523 ++
3524 ++ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr_base->clks.dppclk_khz)) {
3525 ++ if (clk_mgr_base->clks.dppclk_khz > new_clocks->dppclk_khz)
3526 + dpp_clock_lowered = true;
3527 + clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
3528 + update_dppclk = true;
3529 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3530 +index 47cefc05fd3f5..fa92b88bc5a13 100644
3531 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3532 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
3533 +@@ -2909,8 +2909,12 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
3534 +
3535 + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
3536 + if (timing->flags.DSC) {
3537 +- kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel);
3538 +- kbps = kbps / 160 + ((kbps % 160) ? 1 : 0);
3539 ++ struct fixed31_32 link_bw_kbps;
3540 ++
3541 ++ link_bw_kbps = dc_fixpt_from_int(timing->pix_clk_100hz);
3542 ++ link_bw_kbps = dc_fixpt_div_int(link_bw_kbps, 160);
3543 ++ link_bw_kbps = dc_fixpt_mul_int(link_bw_kbps, timing->dsc_cfg.bits_per_pixel);
3544 ++ kbps = dc_fixpt_ceil(link_bw_kbps);
3545 + return kbps;
3546 + }
3547 + #endif
3548 +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
3549 +index 6dd2334dd5e60..959eb075d11ed 100644
3550 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
3551 ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
3552 +@@ -3378,7 +3378,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
3553 +
3554 + if (edp_config_set.bits.PANEL_MODE_EDP
3555 + != panel_mode_edp) {
3556 +- enum ddc_result result = DDC_RESULT_UNKNOWN;
3557 ++ enum dc_status result = DC_ERROR_UNEXPECTED;
3558 +
3559 + edp_config_set.bits.PANEL_MODE_EDP =
3560 + panel_mode_edp;
3561 +@@ -3388,7 +3388,7 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode)
3562 + &edp_config_set.raw,
3563 + sizeof(edp_config_set.raw));
3564 +
3565 +- ASSERT(result == DDC_RESULT_SUCESSFULL);
3566 ++ ASSERT(result == DC_OK);
3567 + }
3568 + }
3569 + DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d "
3570 +diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
3571 +index 51d07a4561ce9..e042d8ce05b4a 100644
3572 +--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
3573 ++++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
3574 +@@ -1576,7 +1576,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
3575 + struct pwl_float_data_ex *rgb = rgb_regamma;
3576 + const struct hw_x_point *coord_x = coordinates_x;
3577 +
3578 +- build_coefficients(&coeff, true);
3579 ++ build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
3580 +
3581 + i = 0;
3582 + while (i != hw_points_num + 1) {
3583 +diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
3584 +index 018383cfcfa79..5e95bcea43e92 100644
3585 +--- a/drivers/gpu/drm/aspeed/Kconfig
3586 ++++ b/drivers/gpu/drm/aspeed/Kconfig
3587 +@@ -3,6 +3,7 @@ config DRM_ASPEED_GFX
3588 + tristate "ASPEED BMC Display Controller"
3589 + depends on DRM && OF
3590 + depends on (COMPILE_TEST || ARCH_ASPEED)
3591 ++ depends on MMU
3592 + select DRM_KMS_HELPER
3593 + select DRM_KMS_CMA_HELPER
3594 + select DMA_CMA if HAVE_DMA_CONTIGUOUS
3595 +diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
3596 +index 0cfb386754c37..0d7f90c00f042 100644
3597 +--- a/drivers/gpu/drm/drm_dp_aux_dev.c
3598 ++++ b/drivers/gpu/drm/drm_dp_aux_dev.c
3599 +@@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
3600 +
3601 + mutex_lock(&aux_idr_mutex);
3602 + aux_dev = idr_find(&aux_idr, index);
3603 +- if (!kref_get_unless_zero(&aux_dev->refcount))
3604 ++ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
3605 + aux_dev = NULL;
3606 + mutex_unlock(&aux_idr_mutex);
3607 +
3608 +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
3609 +index 2ff4b35151bf8..87738650dd90b 100644
3610 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
3611 ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
3612 +@@ -2125,7 +2125,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
3613 + DRM_INFO("failed to retrieve link info, disabling eDP\n");
3614 + cdv_intel_dp_encoder_destroy(encoder);
3615 + cdv_intel_dp_destroy(connector);
3616 +- goto err_priv;
3617 ++ goto err_connector;
3618 + } else {
3619 + DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
3620 + intel_dp->dpcd[0], intel_dp->dpcd[1],
3621 +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3622 +index 3d8dff2d894ae..7f7d59445faed 100644
3623 +--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3624 ++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
3625 +@@ -367,7 +367,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
3626 + return true;
3627 +
3628 + if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
3629 +- (vma->node.start + vma->node.size - 1) >> 32)
3630 ++ (vma->node.start + vma->node.size + 4095) >> 32)
3631 + return true;
3632 +
3633 + if (flags & __EXEC_OBJECT_NEEDS_MAP &&
3634 +diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
3635 +index 16e5fb9ec784d..82946ffcb6d21 100644
3636 +--- a/drivers/gpu/drm/mcde/mcde_drv.c
3637 ++++ b/drivers/gpu/drm/mcde/mcde_drv.c
3638 +@@ -410,8 +410,8 @@ static int mcde_probe(struct platform_device *pdev)
3639 + }
3640 +
3641 + irq = platform_get_irq(pdev, 0);
3642 +- if (!irq) {
3643 +- ret = -EINVAL;
3644 ++ if (irq < 0) {
3645 ++ ret = irq;
3646 + goto clk_disable;
3647 + }
3648 +
3649 +diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
3650 +index 5223498502c49..23a74eb5d7f81 100644
3651 +--- a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
3652 ++++ b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
3653 +@@ -84,8 +84,9 @@ mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
3654 + hdmi_phy->conf->hdmi_phy_disable_tmds)
3655 + return &mtk_hdmi_phy_dev_ops;
3656 +
3657 +- dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
3658 +- return NULL;
3659 ++ if (hdmi_phy)
3660 ++ dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
3661 ++ return NULL;
3662 + }
3663 +
3664 + static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
3665 +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
3666 +index aa9385d5bfff9..33033b94935ed 100644
3667 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
3668 ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
3669 +@@ -559,6 +559,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
3670 + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
3671 + void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
3672 + u32 val;
3673 ++ int ret;
3674 +
3675 + val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
3676 + val &= ~0x3;
3677 +@@ -573,6 +574,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
3678 + val |= cached->pll_mux;
3679 + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
3680 +
3681 ++ ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
3682 ++ if (ret) {
3683 ++ DRM_DEV_ERROR(&pll_10nm->pdev->dev,
3684 ++ "restore vco rate failed. ret=%d\n", ret);
3685 ++ return ret;
3686 ++ }
3687 ++
3688 + DBG("DSI PLL%d", pll_10nm->id);
3689 +
3690 + return 0;
3691 +diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
3692 +index 252f5ebb1acc4..3dd6c0087edb6 100644
3693 +--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
3694 ++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
3695 +@@ -891,6 +891,7 @@ static int omap_dmm_probe(struct platform_device *dev)
3696 + &omap_dmm->refill_pa, GFP_KERNEL);
3697 + if (!omap_dmm->refill_va) {
3698 + dev_err(&dev->dev, "could not allocate refill memory\n");
3699 ++ ret = -ENOMEM;
3700 + goto fail;
3701 + }
3702 +
3703 +diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
3704 +index bc7cc32140f81..6833dfad7241b 100644
3705 +--- a/drivers/gpu/drm/tegra/drm.c
3706 ++++ b/drivers/gpu/drm/tegra/drm.c
3707 +@@ -256,7 +256,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
3708 + if (!fpriv)
3709 + return -ENOMEM;
3710 +
3711 +- idr_init(&fpriv->contexts);
3712 ++ idr_init_base(&fpriv->contexts, 1);
3713 + mutex_init(&fpriv->lock);
3714 + filp->driver_priv = fpriv;
3715 +
3716 +diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
3717 +index 75e65d9536d54..6c3d221652393 100644
3718 +--- a/drivers/gpu/drm/tegra/sor.c
3719 ++++ b/drivers/gpu/drm/tegra/sor.c
3720 +@@ -2899,6 +2899,7 @@ static int tegra_sor_init(struct host1x_client *client)
3721 + if (err < 0) {
3722 + dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
3723 + err);
3724 ++ clk_disable_unprepare(sor->clk);
3725 + return err;
3726 + }
3727 +
3728 +@@ -2906,12 +2907,17 @@ static int tegra_sor_init(struct host1x_client *client)
3729 + }
3730 +
3731 + err = clk_prepare_enable(sor->clk_safe);
3732 +- if (err < 0)
3733 ++ if (err < 0) {
3734 ++ clk_disable_unprepare(sor->clk);
3735 + return err;
3736 ++ }
3737 +
3738 + err = clk_prepare_enable(sor->clk_dp);
3739 +- if (err < 0)
3740 ++ if (err < 0) {
3741 ++ clk_disable_unprepare(sor->clk_safe);
3742 ++ clk_disable_unprepare(sor->clk);
3743 + return err;
3744 ++ }
3745 +
3746 + /*
3747 + * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
3748 +diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
3749 +index 416f24823c0aa..02836d4e80237 100644
3750 +--- a/drivers/gpu/drm/tve200/tve200_drv.c
3751 ++++ b/drivers/gpu/drm/tve200/tve200_drv.c
3752 +@@ -210,8 +210,8 @@ static int tve200_probe(struct platform_device *pdev)
3753 + }
3754 +
3755 + irq = platform_get_irq(pdev, 0);
3756 +- if (!irq) {
3757 +- ret = -EINVAL;
3758 ++ if (irq < 0) {
3759 ++ ret = irq;
3760 + goto clk_disable;
3761 + }
3762 +
3763 +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
3764 +index 35f3bfc3e6f59..8e0f67455c098 100644
3765 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
3766 ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
3767 +@@ -405,6 +405,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
3768 + },
3769 + .driver_data = (void *)&sipodev_desc
3770 + },
3771 ++ {
3772 ++ .ident = "Vero K147",
3773 ++ .matches = {
3774 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VERO"),
3775 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "K147"),
3776 ++ },
3777 ++ .driver_data = (void *)&sipodev_desc
3778 ++ },
3779 + { } /* Terminate list */
3780 + };
3781 +
3782 +diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
3783 +index 4bc4a201f0f6c..2be9c01e175ca 100644
3784 +--- a/drivers/hsi/controllers/omap_ssi_core.c
3785 ++++ b/drivers/hsi/controllers/omap_ssi_core.c
3786 +@@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
3787 +
3788 + err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
3789 + if (err < 0)
3790 +- goto out_err;
3791 ++ return err;
3792 + ssi->id = err;
3793 +
3794 + ssi->owner = THIS_MODULE;
3795 +diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c
3796 +index 8a51dcf055eab..026f70d7c5a43 100644
3797 +--- a/drivers/hwmon/ina3221.c
3798 ++++ b/drivers/hwmon/ina3221.c
3799 +@@ -403,7 +403,7 @@ static int ina3221_write_enable(struct device *dev, int channel, bool enable)
3800 +
3801 + /* For enabling routine, increase refcount and resume() at first */
3802 + if (enable) {
3803 +- ret = pm_runtime_get_sync(ina->pm_dev);
3804 ++ ret = pm_runtime_resume_and_get(ina->pm_dev);
3805 + if (ret < 0) {
3806 + dev_err(dev, "Failed to get PM runtime\n");
3807 + return ret;
3808 +diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
3809 +index 3810290e6d07a..95cba1a2cddf2 100644
3810 +--- a/drivers/hwtracing/coresight/coresight-etb10.c
3811 ++++ b/drivers/hwtracing/coresight/coresight-etb10.c
3812 +@@ -176,6 +176,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
3813 + unsigned long flags;
3814 + struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
3815 + struct perf_output_handle *handle = data;
3816 ++ struct cs_buffers *buf = etm_perf_sink_config(handle);
3817 +
3818 + spin_lock_irqsave(&drvdata->spinlock, flags);
3819 +
3820 +@@ -186,7 +187,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
3821 + }
3822 +
3823 + /* Get a handle on the pid of the process to monitor */
3824 +- pid = task_pid_nr(handle->event->owner);
3825 ++ pid = buf->pid;
3826 +
3827 + if (drvdata->pid != -1 && drvdata->pid != pid) {
3828 + ret = -EBUSY;
3829 +@@ -383,6 +384,7 @@ static void *etb_alloc_buffer(struct coresight_device *csdev,
3830 + if (!buf)
3831 + return NULL;
3832 +
3833 ++ buf->pid = task_pid_nr(event->owner);
3834 + buf->snapshot = overwrite;
3835 + buf->nr_pages = nr_pages;
3836 + buf->data_pages = pages;
3837 +diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
3838 +index 82e563cdc8794..56379d4a7ede7 100644
3839 +--- a/drivers/hwtracing/coresight/coresight-priv.h
3840 ++++ b/drivers/hwtracing/coresight/coresight-priv.h
3841 +@@ -86,6 +86,7 @@ enum cs_mode {
3842 + * struct cs_buffer - keep track of a recording session' specifics
3843 + * @cur: index of the current buffer
3844 + * @nr_pages: max number of pages granted to us
3845 ++ * @pid: PID this cs_buffer belongs to
3846 + * @offset: offset within the current buffer
3847 + * @data_size: how much we collected in this run
3848 + * @snapshot: is this run in snapshot mode
3849 +@@ -94,6 +95,7 @@ enum cs_mode {
3850 + struct cs_buffers {
3851 + unsigned int cur;
3852 + unsigned int nr_pages;
3853 ++ pid_t pid;
3854 + unsigned long offset;
3855 + local_t data_size;
3856 + bool snapshot;
3857 +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
3858 +index 6375504ba8b00..a5d70d09d2bd1 100644
3859 +--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
3860 ++++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
3861 +@@ -227,6 +227,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
3862 + unsigned long flags;
3863 + struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
3864 + struct perf_output_handle *handle = data;
3865 ++ struct cs_buffers *buf = etm_perf_sink_config(handle);
3866 +
3867 + spin_lock_irqsave(&drvdata->spinlock, flags);
3868 + do {
3869 +@@ -243,7 +244,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
3870 + }
3871 +
3872 + /* Get a handle on the pid of the process to monitor */
3873 +- pid = task_pid_nr(handle->event->owner);
3874 ++ pid = buf->pid;
3875 +
3876 + if (drvdata->pid != -1 && drvdata->pid != pid) {
3877 + ret = -EBUSY;
3878 +@@ -399,6 +400,7 @@ static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
3879 + if (!buf)
3880 + return NULL;
3881 +
3882 ++ buf->pid = task_pid_nr(event->owner);
3883 + buf->snapshot = overwrite;
3884 + buf->nr_pages = nr_pages;
3885 + buf->data_pages = pages;
3886 +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
3887 +index 625882bc8b08f..ed77c7f7b344b 100644
3888 +--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
3889 ++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
3890 +@@ -217,6 +217,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
3891 + } else {
3892 + page = alloc_pages_node(node,
3893 + GFP_KERNEL | __GFP_ZERO, 0);
3894 ++ if (!page)
3895 ++ goto err;
3896 + }
3897 + paddr = dma_map_page(real_dev, page, 0, PAGE_SIZE, dir);
3898 + if (dma_mapping_error(real_dev, paddr))
3899 +@@ -1533,7 +1535,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
3900 +
3901 + /* Insert barrier packets at the beginning, if there was an overflow */
3902 + if (lost)
3903 +- tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
3904 ++ tmc_etr_buf_insert_barrier_packet(etr_buf, offset);
3905 + tmc_etr_sync_perf_buffer(etr_perf, offset, size);
3906 +
3907 + /*
3908 +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
3909 +index aafc76ee93e02..17abf60c94aeb 100644
3910 +--- a/drivers/i2c/busses/i2c-qcom-geni.c
3911 ++++ b/drivers/i2c/busses/i2c-qcom-geni.c
3912 +@@ -368,6 +368,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
3913 + geni_se_select_mode(se, GENI_SE_FIFO);
3914 +
3915 + writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
3916 ++ geni_se_setup_m_cmd(se, I2C_READ, m_param);
3917 +
3918 + if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
3919 + geni_se_select_mode(se, GENI_SE_FIFO);
3920 +@@ -375,8 +376,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
3921 + dma_buf = NULL;
3922 + }
3923 +
3924 +- geni_se_setup_m_cmd(se, I2C_READ, m_param);
3925 +-
3926 + time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
3927 + if (!time_left)
3928 + geni_i2c_abort_xfer(gi2c);
3929 +@@ -410,6 +409,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
3930 + geni_se_select_mode(se, GENI_SE_FIFO);
3931 +
3932 + writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
3933 ++ geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
3934 +
3935 + if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
3936 + geni_se_select_mode(se, GENI_SE_FIFO);
3937 +@@ -417,8 +417,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
3938 + dma_buf = NULL;
3939 + }
3940 +
3941 +- geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
3942 +-
3943 + if (!dma_buf) /* Get FIFO IRQ */
3944 + writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
3945 +
3946 +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
3947 +index 582ba047c4a67..cddc3dfd2ab7b 100644
3948 +--- a/drivers/iio/adc/rockchip_saradc.c
3949 ++++ b/drivers/iio/adc/rockchip_saradc.c
3950 +@@ -372,7 +372,7 @@ static int rockchip_saradc_resume(struct device *dev)
3951 +
3952 + ret = clk_prepare_enable(info->clk);
3953 + if (ret)
3954 +- return ret;
3955 ++ clk_disable_unprepare(info->pclk);
3956 +
3957 + return ret;
3958 + }
3959 +diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
3960 +index 552c2be8d87ad..4b706949a67ff 100644
3961 +--- a/drivers/iio/adc/ti-ads124s08.c
3962 ++++ b/drivers/iio/adc/ti-ads124s08.c
3963 +@@ -97,6 +97,14 @@ struct ads124s_private {
3964 + struct gpio_desc *reset_gpio;
3965 + struct spi_device *spi;
3966 + struct mutex lock;
3967 ++ /*
3968 ++ * Used to correctly align data.
3969 ++ * Ensure timestamp is naturally aligned.
3970 ++ * Note that the full buffer length may not be needed if not
3971 ++ * all channels are enabled, as long as the alignment of the
3972 ++ * timestamp is maintained.
3973 ++ */
3974 ++ u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u32)] __aligned(8);
3975 + u8 data[5] ____cacheline_aligned;
3976 + };
3977 +
3978 +@@ -270,7 +278,6 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
3979 + struct iio_poll_func *pf = p;
3980 + struct iio_dev *indio_dev = pf->indio_dev;
3981 + struct ads124s_private *priv = iio_priv(indio_dev);
3982 +- u32 buffer[ADS124S08_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
3983 + int scan_index, j = 0;
3984 + int ret;
3985 +
3986 +@@ -285,7 +292,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
3987 + if (ret)
3988 + dev_err(&priv->spi->dev, "Start ADC conversions failed\n");
3989 +
3990 +- buffer[j] = ads124s_read(indio_dev, scan_index);
3991 ++ priv->buffer[j] = ads124s_read(indio_dev, scan_index);
3992 + ret = ads124s_write_cmd(indio_dev, ADS124S08_STOP_CONV);
3993 + if (ret)
3994 + dev_err(&priv->spi->dev, "Stop ADC conversions failed\n");
3995 +@@ -293,7 +300,7 @@ static irqreturn_t ads124s_trigger_handler(int irq, void *p)
3996 + j++;
3997 + }
3998 +
3999 +- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
4000 ++ iio_push_to_buffers_with_timestamp(indio_dev, priv->buffer,
4001 + pf->timestamp);
4002 +
4003 + iio_trigger_notify_done(indio_dev->trig);
4004 +diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
4005 +index 6af65d6f1d280..a5994899e3965 100644
4006 +--- a/drivers/iio/imu/bmi160/bmi160_core.c
4007 ++++ b/drivers/iio/imu/bmi160/bmi160_core.c
4008 +@@ -411,8 +411,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
4009 + struct iio_poll_func *pf = p;
4010 + struct iio_dev *indio_dev = pf->indio_dev;
4011 + struct bmi160_data *data = iio_priv(indio_dev);
4012 +- __le16 buf[16];
4013 +- /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
4014 ++ __le16 buf[12];
4015 ++ /* 2 sens x 3 axis x __le16 + 2 x __le16 pad + 4 x __le16 tstamp */
4016 + int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
4017 + __le16 sample;
4018 +
4019 +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
4020 +index 112225c0e4868..e099517283bef 100644
4021 +--- a/drivers/iio/industrialio-buffer.c
4022 ++++ b/drivers/iio/industrialio-buffer.c
4023 +@@ -845,12 +845,12 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
4024 + indio_dev->masklength,
4025 + in_ind + 1);
4026 + while (in_ind != out_ind) {
4027 +- in_ind = find_next_bit(indio_dev->active_scan_mask,
4028 +- indio_dev->masklength,
4029 +- in_ind + 1);
4030 + length = iio_storage_bytes_for_si(indio_dev, in_ind);
4031 + /* Make sure we are aligned */
4032 + in_loc = roundup(in_loc, length) + length;
4033 ++ in_ind = find_next_bit(indio_dev->active_scan_mask,
4034 ++ indio_dev->masklength,
4035 ++ in_ind + 1);
4036 + }
4037 + length = iio_storage_bytes_for_si(indio_dev, in_ind);
4038 + out_loc = roundup(out_loc, length);
4039 +diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
4040 +index a0a7aeae5a822..254c5b0199d4c 100644
4041 +--- a/drivers/iio/light/rpr0521.c
4042 ++++ b/drivers/iio/light/rpr0521.c
4043 +@@ -194,6 +194,17 @@ struct rpr0521_data {
4044 + bool pxs_need_dis;
4045 +
4046 + struct regmap *regmap;
4047 ++
4048 ++ /*
4049 ++ * Ensure correct naturally aligned timestamp.
4050 ++ * Note that the read will put garbage data into
4051 ++ * the padding but this should not be a problem
4052 ++ */
4053 ++ struct {
4054 ++ __le16 channels[3];
4055 ++ u8 garbage;
4056 ++ s64 ts __aligned(8);
4057 ++ } scan;
4058 + };
4059 +
4060 + static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL);
4061 +@@ -449,8 +460,6 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
4062 + struct rpr0521_data *data = iio_priv(indio_dev);
4063 + int err;
4064 +
4065 +- u8 buffer[16]; /* 3 16-bit channels + padding + ts */
4066 +-
4067 + /* Use irq timestamp when reasonable. */
4068 + if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) {
4069 + pf->timestamp = data->irq_timestamp;
4070 +@@ -461,11 +470,11 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
4071 + pf->timestamp = iio_get_time_ns(indio_dev);
4072 +
4073 + err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA,
4074 +- &buffer,
4075 ++ data->scan.channels,
4076 + (3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */
4077 + if (!err)
4078 + iio_push_to_buffers_with_timestamp(indio_dev,
4079 +- buffer, pf->timestamp);
4080 ++ &data->scan, pf->timestamp);
4081 + else
4082 + dev_err(&data->client->dev,
4083 + "Trigger consumer can't read from sensor.\n");
4084 +diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
4085 +index 78bc56aad1299..283086887caf5 100644
4086 +--- a/drivers/iio/light/st_uvis25.h
4087 ++++ b/drivers/iio/light/st_uvis25.h
4088 +@@ -27,6 +27,11 @@ struct st_uvis25_hw {
4089 + struct iio_trigger *trig;
4090 + bool enabled;
4091 + int irq;
4092 ++ /* Ensure timestamp is naturally aligned */
4093 ++ struct {
4094 ++ u8 chan;
4095 ++ s64 ts __aligned(8);
4096 ++ } scan;
4097 + };
4098 +
4099 + extern const struct dev_pm_ops st_uvis25_pm_ops;
4100 +diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
4101 +index d262c254b895a..9aaff35f49b0f 100644
4102 +--- a/drivers/iio/light/st_uvis25_core.c
4103 ++++ b/drivers/iio/light/st_uvis25_core.c
4104 +@@ -234,17 +234,19 @@ static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = {
4105 +
4106 + static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
4107 + {
4108 +- u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)];
4109 + struct iio_poll_func *pf = p;
4110 + struct iio_dev *iio_dev = pf->indio_dev;
4111 + struct st_uvis25_hw *hw = iio_priv(iio_dev);
4112 ++ unsigned int val;
4113 + int err;
4114 +
4115 +- err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer);
4116 ++ err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val);
4117 + if (err < 0)
4118 + goto out;
4119 +
4120 +- iio_push_to_buffers_with_timestamp(iio_dev, buffer,
4121 ++ hw->scan.chan = val;
4122 ++
4123 ++ iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
4124 + iio_get_time_ns(iio_dev));
4125 +
4126 + out:
4127 +diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
4128 +index fb16cfdd6fa66..b7b98dd4d2cbf 100644
4129 +--- a/drivers/iio/magnetometer/mag3110.c
4130 ++++ b/drivers/iio/magnetometer/mag3110.c
4131 +@@ -56,6 +56,12 @@ struct mag3110_data {
4132 + int sleep_val;
4133 + struct regulator *vdd_reg;
4134 + struct regulator *vddio_reg;
4135 ++ /* Ensure natural alignment of timestamp */
4136 ++ struct {
4137 ++ __be16 channels[3];
4138 ++ u8 temperature;
4139 ++ s64 ts __aligned(8);
4140 ++ } scan;
4141 + };
4142 +
4143 + static int mag3110_request(struct mag3110_data *data)
4144 +@@ -387,10 +393,9 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
4145 + struct iio_poll_func *pf = p;
4146 + struct iio_dev *indio_dev = pf->indio_dev;
4147 + struct mag3110_data *data = iio_priv(indio_dev);
4148 +- u8 buffer[16]; /* 3 16-bit channels + 1 byte temp + padding + ts */
4149 + int ret;
4150 +
4151 +- ret = mag3110_read(data, (__be16 *) buffer);
4152 ++ ret = mag3110_read(data, data->scan.channels);
4153 + if (ret < 0)
4154 + goto done;
4155 +
4156 +@@ -399,10 +404,10 @@ static irqreturn_t mag3110_trigger_handler(int irq, void *p)
4157 + MAG3110_DIE_TEMP);
4158 + if (ret < 0)
4159 + goto done;
4160 +- buffer[6] = ret;
4161 ++ data->scan.temperature = ret;
4162 + }
4163 +
4164 +- iio_push_to_buffers_with_timestamp(indio_dev, buffer,
4165 ++ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
4166 + iio_get_time_ns(indio_dev));
4167 +
4168 + done:
4169 +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
4170 +index d066f3c5a8a61..822b9e19688ef 100644
4171 +--- a/drivers/iio/pressure/mpl3115.c
4172 ++++ b/drivers/iio/pressure/mpl3115.c
4173 +@@ -144,7 +144,14 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
4174 + struct iio_poll_func *pf = p;
4175 + struct iio_dev *indio_dev = pf->indio_dev;
4176 + struct mpl3115_data *data = iio_priv(indio_dev);
4177 +- u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */
4178 ++ /*
4179 ++ * 32-bit channel + 16-bit channel + padding + ts
4180 ++ * Note that it is possible for only one of the first 2
4181 ++ * channels to be enabled. If that happens, the first element
4182 ++ * of the buffer may be either 16 or 32-bits. As such we cannot
4183 ++ * use a simple structure definition to express this data layout.
4184 ++ */
4185 ++ u8 buffer[16] __aligned(8);
4186 + int ret, pos = 0;
4187 +
4188 + mutex_lock(&data->lock);
4189 +diff --git a/drivers/iio/trigger/iio-trig-hrtimer.c b/drivers/iio/trigger/iio-trig-hrtimer.c
4190 +index a5e670726717f..58c1c30d5612b 100644
4191 +--- a/drivers/iio/trigger/iio-trig-hrtimer.c
4192 ++++ b/drivers/iio/trigger/iio-trig-hrtimer.c
4193 +@@ -102,7 +102,7 @@ static int iio_trig_hrtimer_set_state(struct iio_trigger *trig, bool state)
4194 +
4195 + if (state)
4196 + hrtimer_start(&trig_info->timer, trig_info->period,
4197 +- HRTIMER_MODE_REL);
4198 ++ HRTIMER_MODE_REL_HARD);
4199 + else
4200 + hrtimer_cancel(&trig_info->timer);
4201 +
4202 +@@ -132,7 +132,7 @@ static struct iio_sw_trigger *iio_trig_hrtimer_probe(const char *name)
4203 + trig_info->swt.trigger->ops = &iio_hrtimer_trigger_ops;
4204 + trig_info->swt.trigger->dev.groups = iio_hrtimer_attr_groups;
4205 +
4206 +- hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4207 ++ hrtimer_init(&trig_info->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
4208 + trig_info->timer.function = iio_hrtimer_trig_handler;
4209 +
4210 + trig_info->sampling_frequency = HRTIMER_DEFAULT_SAMPLING_FREQUENCY;
4211 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
4212 +index c1d6a068f50fe..fd7c84721b0de 100644
4213 +--- a/drivers/infiniband/core/cm.c
4214 ++++ b/drivers/infiniband/core/cm.c
4215 +@@ -1435,6 +1435,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
4216 + id.local_id);
4217 + if (IS_ERR(cm_id_priv->timewait_info)) {
4218 + ret = PTR_ERR(cm_id_priv->timewait_info);
4219 ++ cm_id_priv->timewait_info = NULL;
4220 + goto out;
4221 + }
4222 +
4223 +@@ -1961,6 +1962,7 @@ static int cm_req_handler(struct cm_work *work)
4224 + id.local_id);
4225 + if (IS_ERR(cm_id_priv->timewait_info)) {
4226 + ret = PTR_ERR(cm_id_priv->timewait_info);
4227 ++ cm_id_priv->timewait_info = NULL;
4228 + goto destroy;
4229 + }
4230 + cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
4231 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
4232 +index 98d2d74b96f78..ecac62a7b59e8 100644
4233 +--- a/drivers/infiniband/core/cma.c
4234 ++++ b/drivers/infiniband/core/cma.c
4235 +@@ -530,6 +530,10 @@ static void cma_release_dev(struct rdma_id_private *id_priv)
4236 + list_del(&id_priv->list);
4237 + cma_deref_dev(id_priv->cma_dev);
4238 + id_priv->cma_dev = NULL;
4239 ++ if (id_priv->id.route.addr.dev_addr.sgid_attr) {
4240 ++ rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
4241 ++ id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
4242 ++ }
4243 + mutex_unlock(&lock);
4244 + }
4245 +
4246 +@@ -1879,9 +1883,6 @@ void rdma_destroy_id(struct rdma_cm_id *id)
4247 +
4248 + kfree(id_priv->id.route.path_rec);
4249 +
4250 +- if (id_priv->id.route.addr.dev_addr.sgid_attr)
4251 +- rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
4252 +-
4253 + put_net(id_priv->id.route.addr.dev_addr.net);
4254 + kfree(id_priv);
4255 + }
4256 +diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
4257 +index 59dc9f3cfb376..256d379bba676 100644
4258 +--- a/drivers/infiniband/core/device.c
4259 ++++ b/drivers/infiniband/core/device.c
4260 +@@ -1387,9 +1387,6 @@ int ib_register_device(struct ib_device *device, const char *name)
4261 + }
4262 +
4263 + ret = enable_device_and_get(device);
4264 +- dev_set_uevent_suppress(&device->dev, false);
4265 +- /* Mark for userspace that device is ready */
4266 +- kobject_uevent(&device->dev.kobj, KOBJ_ADD);
4267 + if (ret) {
4268 + void (*dealloc_fn)(struct ib_device *);
4269 +
4270 +@@ -1409,8 +1406,12 @@ int ib_register_device(struct ib_device *device, const char *name)
4271 + ib_device_put(device);
4272 + __ib_unregister_device(device);
4273 + device->ops.dealloc_driver = dealloc_fn;
4274 ++ dev_set_uevent_suppress(&device->dev, false);
4275 + return ret;
4276 + }
4277 ++ dev_set_uevent_suppress(&device->dev, false);
4278 ++ /* Mark for userspace that device is ready */
4279 ++ kobject_uevent(&device->dev.kobj, KOBJ_ADD);
4280 + ib_device_put(device);
4281 +
4282 + return 0;
4283 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
4284 +index 3b05c0640338f..58c021648b7c8 100644
4285 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
4286 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
4287 +@@ -1793,6 +1793,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
4288 + goto out;
4289 + }
4290 + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
4291 ++ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
4292 + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
4293 + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
4294 + qp_attr->pkey_index = qplib_qp->pkey_index;
4295 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
4296 +index b1bb61c65f4f6..16b74591a68db 100644
4297 +--- a/drivers/infiniband/hw/cxgb4/cq.c
4298 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
4299 +@@ -1007,6 +1007,9 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
4300 + if (attr->flags)
4301 + return -EINVAL;
4302 +
4303 ++ if (entries < 1 || entries > ibdev->attrs.max_cqe)
4304 ++ return -EINVAL;
4305 ++
4306 + if (vector >= rhp->rdev.lldi.nciq)
4307 + return -EINVAL;
4308 +
4309 +diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
4310 +index 119b2573c9a08..26c3408dcacae 100644
4311 +--- a/drivers/infiniband/hw/mthca/mthca_cq.c
4312 ++++ b/drivers/infiniband/hw/mthca/mthca_cq.c
4313 +@@ -604,7 +604,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
4314 + entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
4315 + break;
4316 + default:
4317 +- entry->opcode = MTHCA_OPCODE_INVALID;
4318 ++ entry->opcode = 0xFF;
4319 + break;
4320 + }
4321 + } else {
4322 +diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
4323 +index bfd4eebc1182f..58d46449b0e86 100644
4324 +--- a/drivers/infiniband/hw/mthca/mthca_dev.h
4325 ++++ b/drivers/infiniband/hw/mthca/mthca_dev.h
4326 +@@ -105,7 +105,6 @@ enum {
4327 + MTHCA_OPCODE_ATOMIC_CS = 0x11,
4328 + MTHCA_OPCODE_ATOMIC_FA = 0x12,
4329 + MTHCA_OPCODE_BIND_MW = 0x18,
4330 +- MTHCA_OPCODE_INVALID = 0xff
4331 + };
4332 +
4333 + enum {
4334 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
4335 +index e5031172c0193..a4d6e0b7901e9 100644
4336 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
4337 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
4338 +@@ -664,7 +664,8 @@ next_wqe:
4339 + }
4340 +
4341 + if (unlikely(qp_type(qp) == IB_QPT_RC &&
4342 +- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
4343 ++ psn_compare(qp->req.psn, (qp->comp.psn +
4344 ++ RXE_MAX_UNACKED_PSNS)) > 0)) {
4345 + qp->req.wait_psn = 1;
4346 + goto exit;
4347 + }
4348 +diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
4349 +index 8d4d9786cc745..cae262b6ff398 100644
4350 +--- a/drivers/input/keyboard/cros_ec_keyb.c
4351 ++++ b/drivers/input/keyboard/cros_ec_keyb.c
4352 +@@ -183,6 +183,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
4353 + "changed: [r%d c%d]: byte %02x\n",
4354 + row, col, new_state);
4355 +
4356 ++ input_event(idev, EV_MSC, MSC_SCAN, pos);
4357 + input_report_key(idev, keycodes[pos],
4358 + new_state);
4359 + }
4360 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
4361 +index d6c924032aaa8..dd16f7b3c7ef6 100644
4362 +--- a/drivers/input/keyboard/omap4-keypad.c
4363 ++++ b/drivers/input/keyboard/omap4-keypad.c
4364 +@@ -186,12 +186,8 @@ static int omap4_keypad_open(struct input_dev *input)
4365 + return 0;
4366 + }
4367 +
4368 +-static void omap4_keypad_close(struct input_dev *input)
4369 ++static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
4370 + {
4371 +- struct omap4_keypad *keypad_data = input_get_drvdata(input);
4372 +-
4373 +- disable_irq(keypad_data->irq);
4374 +-
4375 + /* Disable interrupts and wake-up events */
4376 + kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
4377 + OMAP4_VAL_IRQDISABLE);
4378 +@@ -200,7 +196,15 @@ static void omap4_keypad_close(struct input_dev *input)
4379 + /* clear pending interrupts */
4380 + kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
4381 + kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
4382 ++}
4383 ++
4384 ++static void omap4_keypad_close(struct input_dev *input)
4385 ++{
4386 ++ struct omap4_keypad *keypad_data;
4387 +
4388 ++ keypad_data = input_get_drvdata(input);
4389 ++ disable_irq(keypad_data->irq);
4390 ++ omap4_keypad_stop(keypad_data);
4391 + enable_irq(keypad_data->irq);
4392 +
4393 + pm_runtime_put_sync(input->dev.parent);
4394 +@@ -223,13 +227,37 @@ static int omap4_keypad_parse_dt(struct device *dev,
4395 + return 0;
4396 + }
4397 +
4398 ++static int omap4_keypad_check_revision(struct device *dev,
4399 ++ struct omap4_keypad *keypad_data)
4400 ++{
4401 ++ unsigned int rev;
4402 ++
4403 ++ rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
4404 ++ rev &= 0x03 << 30;
4405 ++ rev >>= 30;
4406 ++ switch (rev) {
4407 ++ case KBD_REVISION_OMAP4:
4408 ++ keypad_data->reg_offset = 0x00;
4409 ++ keypad_data->irqreg_offset = 0x00;
4410 ++ break;
4411 ++ case KBD_REVISION_OMAP5:
4412 ++ keypad_data->reg_offset = 0x10;
4413 ++ keypad_data->irqreg_offset = 0x0c;
4414 ++ break;
4415 ++ default:
4416 ++ dev_err(dev, "Keypad reports unsupported revision %d", rev);
4417 ++ return -EINVAL;
4418 ++ }
4419 ++
4420 ++ return 0;
4421 ++}
4422 ++
4423 + static int omap4_keypad_probe(struct platform_device *pdev)
4424 + {
4425 + struct omap4_keypad *keypad_data;
4426 + struct input_dev *input_dev;
4427 + struct resource *res;
4428 + unsigned int max_keys;
4429 +- int rev;
4430 + int irq;
4431 + int error;
4432 +
4433 +@@ -269,41 +297,33 @@ static int omap4_keypad_probe(struct platform_device *pdev)
4434 + goto err_release_mem;
4435 + }
4436 +
4437 ++ pm_runtime_enable(&pdev->dev);
4438 +
4439 + /*
4440 + * Enable clocks for the keypad module so that we can read
4441 + * revision register.
4442 + */
4443 +- pm_runtime_enable(&pdev->dev);
4444 + error = pm_runtime_get_sync(&pdev->dev);
4445 + if (error) {
4446 + dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
4447 +- goto err_unmap;
4448 +- }
4449 +- rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
4450 +- rev &= 0x03 << 30;
4451 +- rev >>= 30;
4452 +- switch (rev) {
4453 +- case KBD_REVISION_OMAP4:
4454 +- keypad_data->reg_offset = 0x00;
4455 +- keypad_data->irqreg_offset = 0x00;
4456 +- break;
4457 +- case KBD_REVISION_OMAP5:
4458 +- keypad_data->reg_offset = 0x10;
4459 +- keypad_data->irqreg_offset = 0x0c;
4460 +- break;
4461 +- default:
4462 +- dev_err(&pdev->dev,
4463 +- "Keypad reports unsupported revision %d", rev);
4464 +- error = -EINVAL;
4465 +- goto err_pm_put_sync;
4466 ++ pm_runtime_put_noidle(&pdev->dev);
4467 ++ } else {
4468 ++ error = omap4_keypad_check_revision(&pdev->dev,
4469 ++ keypad_data);
4470 ++ if (!error) {
4471 ++ /* Ensure device does not raise interrupts */
4472 ++ omap4_keypad_stop(keypad_data);
4473 ++ }
4474 ++ pm_runtime_put_sync(&pdev->dev);
4475 + }
4476 ++ if (error)
4477 ++ goto err_pm_disable;
4478 +
4479 + /* input device allocation */
4480 + keypad_data->input = input_dev = input_allocate_device();
4481 + if (!input_dev) {
4482 + error = -ENOMEM;
4483 +- goto err_pm_put_sync;
4484 ++ goto err_pm_disable;
4485 + }
4486 +
4487 + input_dev->name = pdev->name;
4488 +@@ -349,28 +369,25 @@ static int omap4_keypad_probe(struct platform_device *pdev)
4489 + goto err_free_keymap;
4490 + }
4491 +
4492 +- device_init_wakeup(&pdev->dev, true);
4493 +- pm_runtime_put_sync(&pdev->dev);
4494 +-
4495 + error = input_register_device(keypad_data->input);
4496 + if (error < 0) {
4497 + dev_err(&pdev->dev, "failed to register input device\n");
4498 +- goto err_pm_disable;
4499 ++ goto err_free_irq;
4500 + }
4501 +
4502 ++ device_init_wakeup(&pdev->dev, true);
4503 + platform_set_drvdata(pdev, keypad_data);
4504 ++
4505 + return 0;
4506 +
4507 +-err_pm_disable:
4508 +- pm_runtime_disable(&pdev->dev);
4509 ++err_free_irq:
4510 + free_irq(keypad_data->irq, keypad_data);
4511 + err_free_keymap:
4512 + kfree(keypad_data->keymap);
4513 + err_free_input:
4514 + input_free_device(input_dev);
4515 +-err_pm_put_sync:
4516 +- pm_runtime_put_sync(&pdev->dev);
4517 +-err_unmap:
4518 ++err_pm_disable:
4519 ++ pm_runtime_disable(&pdev->dev);
4520 + iounmap(keypad_data->base);
4521 + err_release_mem:
4522 + release_mem_region(res->start, resource_size(res));
4523 +diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
4524 +index c1b524ab46232..ba50f57134239 100644
4525 +--- a/drivers/input/mouse/cyapa_gen6.c
4526 ++++ b/drivers/input/mouse/cyapa_gen6.c
4527 +@@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
4528 +
4529 + memset(&cmd, 0, sizeof(cmd));
4530 + put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
4531 +- put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
4532 ++ put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length);
4533 + cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
4534 + cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
4535 + put_unaligned_le16(read_offset, &cmd.read_offset);
4536 +diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
4537 +index 51ddb204ca1ba..d247d0ae82d26 100644
4538 +--- a/drivers/input/touchscreen/ads7846.c
4539 ++++ b/drivers/input/touchscreen/ads7846.c
4540 +@@ -33,6 +33,7 @@
4541 + #include <linux/regulator/consumer.h>
4542 + #include <linux/module.h>
4543 + #include <asm/irq.h>
4544 ++#include <asm/unaligned.h>
4545 +
4546 + /*
4547 + * This code has been heavily tested on a Nokia 770, and lightly
4548 +@@ -199,6 +200,26 @@ struct ads7846 {
4549 + #define REF_ON (READ_12BIT_DFR(x, 1, 1))
4550 + #define REF_OFF (READ_12BIT_DFR(y, 0, 0))
4551 +
4552 ++static int get_pendown_state(struct ads7846 *ts)
4553 ++{
4554 ++ if (ts->get_pendown_state)
4555 ++ return ts->get_pendown_state();
4556 ++
4557 ++ return !gpio_get_value(ts->gpio_pendown);
4558 ++}
4559 ++
4560 ++static void ads7846_report_pen_up(struct ads7846 *ts)
4561 ++{
4562 ++ struct input_dev *input = ts->input;
4563 ++
4564 ++ input_report_key(input, BTN_TOUCH, 0);
4565 ++ input_report_abs(input, ABS_PRESSURE, 0);
4566 ++ input_sync(input);
4567 ++
4568 ++ ts->pendown = false;
4569 ++ dev_vdbg(&ts->spi->dev, "UP\n");
4570 ++}
4571 ++
4572 + /* Must be called with ts->lock held */
4573 + static void ads7846_stop(struct ads7846 *ts)
4574 + {
4575 +@@ -215,6 +236,10 @@ static void ads7846_stop(struct ads7846 *ts)
4576 + static void ads7846_restart(struct ads7846 *ts)
4577 + {
4578 + if (!ts->disabled && !ts->suspended) {
4579 ++ /* Check if pen was released since last stop */
4580 ++ if (ts->pendown && !get_pendown_state(ts))
4581 ++ ads7846_report_pen_up(ts);
4582 ++
4583 + /* Tell IRQ thread that it may poll the device. */
4584 + ts->stopped = false;
4585 + mb();
4586 +@@ -410,7 +435,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
4587 +
4588 + if (status == 0) {
4589 + /* BE12 value, then padding */
4590 +- status = be16_to_cpu(*((u16 *)&req->sample[1]));
4591 ++ status = get_unaligned_be16(&req->sample[1]);
4592 + status = status >> 3;
4593 + status &= 0x0fff;
4594 + }
4595 +@@ -605,14 +630,6 @@ static const struct attribute_group ads784x_attr_group = {
4596 +
4597 + /*--------------------------------------------------------------------------*/
4598 +
4599 +-static int get_pendown_state(struct ads7846 *ts)
4600 +-{
4601 +- if (ts->get_pendown_state)
4602 +- return ts->get_pendown_state();
4603 +-
4604 +- return !gpio_get_value(ts->gpio_pendown);
4605 +-}
4606 +-
4607 + static void null_wait_for_sync(void)
4608 + {
4609 + }
4610 +@@ -785,10 +802,11 @@ static void ads7846_report_state(struct ads7846 *ts)
4611 + /* compute touch pressure resistance using equation #2 */
4612 + Rt = z2;
4613 + Rt -= z1;
4614 +- Rt *= x;
4615 + Rt *= ts->x_plate_ohms;
4616 ++ Rt = DIV_ROUND_CLOSEST(Rt, 16);
4617 ++ Rt *= x;
4618 + Rt /= z1;
4619 +- Rt = (Rt + 2047) >> 12;
4620 ++ Rt = DIV_ROUND_CLOSEST(Rt, 256);
4621 + } else {
4622 + Rt = 0;
4623 + }
4624 +@@ -867,16 +885,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
4625 + msecs_to_jiffies(TS_POLL_PERIOD));
4626 + }
4627 +
4628 +- if (ts->pendown && !ts->stopped) {
4629 +- struct input_dev *input = ts->input;
4630 +-
4631 +- input_report_key(input, BTN_TOUCH, 0);
4632 +- input_report_abs(input, ABS_PRESSURE, 0);
4633 +- input_sync(input);
4634 +-
4635 +- ts->pendown = false;
4636 +- dev_vdbg(&ts->spi->dev, "UP\n");
4637 +- }
4638 ++ if (ts->pendown && !ts->stopped)
4639 ++ ads7846_report_pen_up(ts);
4640 +
4641 + return IRQ_HANDLED;
4642 + }
4643 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
4644 +index 37b35ab97beb2..bfb945fc33a17 100644
4645 +--- a/drivers/input/touchscreen/goodix.c
4646 ++++ b/drivers/input/touchscreen/goodix.c
4647 +@@ -137,6 +137,18 @@ static const struct dmi_system_id rotated_screen[] = {
4648 + DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
4649 + },
4650 + },
4651 ++ {
4652 ++ .ident = "Teclast X98 Pro",
4653 ++ .matches = {
4654 ++ /*
4655 ++ * Only match BIOS date, because the manufacturers
4656 ++ * BIOS does not report the board name at all
4657 ++ * (sometimes)...
4658 ++ */
4659 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
4660 ++ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
4661 ++ },
4662 ++ },
4663 + {
4664 + .ident = "WinBook TW100",
4665 + .matches = {
4666 +diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
4667 +index 23a3b877f7f1d..ede02dc2bcd0b 100644
4668 +--- a/drivers/irqchip/irq-alpine-msi.c
4669 ++++ b/drivers/irqchip/irq-alpine-msi.c
4670 +@@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
4671 + return 0;
4672 +
4673 + err_sgi:
4674 +- while (--i >= 0)
4675 +- irq_domain_free_irqs_parent(domain, virq, i);
4676 ++ irq_domain_free_irqs_parent(domain, virq, i - 1);
4677 + alpine_msix_free_sgi(priv, sgi, nr_irqs);
4678 + return err;
4679 + }
4680 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
4681 +index ac83f5002ce5f..1c5133f71af39 100644
4682 +--- a/drivers/md/dm-ioctl.c
4683 ++++ b/drivers/md/dm-ioctl.c
4684 +@@ -1600,6 +1600,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
4685 +
4686 + if (!argc) {
4687 + DMWARN("Empty message received.");
4688 ++ r = -EINVAL;
4689 + goto out_argv;
4690 + }
4691 +
4692 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
4693 +index 13ad791126618..6dd56afa048c2 100644
4694 +--- a/drivers/md/dm-table.c
4695 ++++ b/drivers/md/dm-table.c
4696 +@@ -1320,12 +1320,6 @@ void dm_table_event_callback(struct dm_table *t,
4697 +
4698 + void dm_table_event(struct dm_table *t)
4699 + {
4700 +- /*
4701 +- * You can no longer call dm_table_event() from interrupt
4702 +- * context, use a bottom half instead.
4703 +- */
4704 +- BUG_ON(in_interrupt());
4705 +-
4706 + mutex_lock(&_event_lock);
4707 + if (t->event_fn)
4708 + t->event_fn(t->event_context);
4709 +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
4710 +index afbbc552c3275..794e1d5891046 100644
4711 +--- a/drivers/md/md-cluster.c
4712 ++++ b/drivers/md/md-cluster.c
4713 +@@ -664,9 +664,27 @@ out:
4714 + * Takes the lock on the TOKEN lock resource so no other
4715 + * node can communicate while the operation is underway.
4716 + */
4717 +-static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
4718 ++static int lock_token(struct md_cluster_info *cinfo)
4719 + {
4720 +- int error, set_bit = 0;
4721 ++ int error;
4722 ++
4723 ++ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
4724 ++ if (error) {
4725 ++ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
4726 ++ __func__, __LINE__, error);
4727 ++ } else {
4728 ++ /* Lock the receive sequence */
4729 ++ mutex_lock(&cinfo->recv_mutex);
4730 ++ }
4731 ++ return error;
4732 ++}
4733 ++
4734 ++/* lock_comm()
4735 ++ * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
4736 ++ */
4737 ++static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
4738 ++{
4739 ++ int rv, set_bit = 0;
4740 + struct mddev *mddev = cinfo->mddev;
4741 +
4742 + /*
4743 +@@ -677,34 +695,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
4744 + */
4745 + if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
4746 + &cinfo->state)) {
4747 +- error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
4748 ++ rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
4749 + &cinfo->state);
4750 +- WARN_ON_ONCE(error);
4751 ++ WARN_ON_ONCE(rv);
4752 + md_wakeup_thread(mddev->thread);
4753 + set_bit = 1;
4754 + }
4755 +- error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
4756 +- if (set_bit)
4757 +- clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
4758 +
4759 +- if (error)
4760 +- pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
4761 +- __func__, __LINE__, error);
4762 +-
4763 +- /* Lock the receive sequence */
4764 +- mutex_lock(&cinfo->recv_mutex);
4765 +- return error;
4766 +-}
4767 +-
4768 +-/* lock_comm()
4769 +- * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
4770 +- */
4771 +-static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
4772 +-{
4773 + wait_event(cinfo->wait,
4774 + !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
4775 +-
4776 +- return lock_token(cinfo, mddev_locked);
4777 ++ rv = lock_token(cinfo);
4778 ++ if (set_bit)
4779 ++ clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
4780 ++ return rv;
4781 + }
4782 +
4783 + static void unlock_comm(struct md_cluster_info *cinfo)
4784 +@@ -784,9 +787,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
4785 + {
4786 + int ret;
4787 +
4788 +- lock_comm(cinfo, mddev_locked);
4789 +- ret = __sendmsg(cinfo, cmsg);
4790 +- unlock_comm(cinfo);
4791 ++ ret = lock_comm(cinfo, mddev_locked);
4792 ++ if (!ret) {
4793 ++ ret = __sendmsg(cinfo, cmsg);
4794 ++ unlock_comm(cinfo);
4795 ++ }
4796 + return ret;
4797 + }
4798 +
4799 +@@ -1061,7 +1066,7 @@ static int metadata_update_start(struct mddev *mddev)
4800 + return 0;
4801 + }
4802 +
4803 +- ret = lock_token(cinfo, 1);
4804 ++ ret = lock_token(cinfo);
4805 + clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
4806 + return ret;
4807 + }
4808 +@@ -1255,7 +1260,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
4809 + int raid_slot = -1;
4810 +
4811 + md_update_sb(mddev, 1);
4812 +- lock_comm(cinfo, 1);
4813 ++ if (lock_comm(cinfo, 1)) {
4814 ++ pr_err("%s: lock_comm failed\n", __func__);
4815 ++ return;
4816 ++ }
4817 +
4818 + memset(&cmsg, 0, sizeof(cmsg));
4819 + cmsg.type = cpu_to_le32(METADATA_UPDATED);
4820 +@@ -1407,7 +1415,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
4821 + cmsg.type = cpu_to_le32(NEWDISK);
4822 + memcpy(cmsg.uuid, uuid, 16);
4823 + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
4824 +- lock_comm(cinfo, 1);
4825 ++ if (lock_comm(cinfo, 1))
4826 ++ return -EAGAIN;
4827 + ret = __sendmsg(cinfo, &cmsg);
4828 + if (ret) {
4829 + unlock_comm(cinfo);
4830 +diff --git a/drivers/md/md.c b/drivers/md/md.c
4831 +index acef01e519d06..ec5dfb7ae4e16 100644
4832 +--- a/drivers/md/md.c
4833 ++++ b/drivers/md/md.c
4834 +@@ -6721,8 +6721,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
4835 + goto busy;
4836 +
4837 + kick_rdev:
4838 +- if (mddev_is_clustered(mddev))
4839 +- md_cluster_ops->remove_disk(mddev, rdev);
4840 ++ if (mddev_is_clustered(mddev)) {
4841 ++ if (md_cluster_ops->remove_disk(mddev, rdev))
4842 ++ goto busy;
4843 ++ }
4844 +
4845 + md_kick_rdev_from_array(rdev);
4846 + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
4847 +@@ -7052,6 +7054,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
4848 + return -EINVAL;
4849 + if (mddev->sync_thread ||
4850 + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
4851 ++ test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
4852 + mddev->reshape_position != MaxSector)
4853 + return -EBUSY;
4854 +
4855 +@@ -7371,8 +7374,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
4856 + err = -EBUSY;
4857 + goto out;
4858 + }
4859 +- WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
4860 +- set_bit(MD_CLOSING, &mddev->flags);
4861 ++ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
4862 ++ mutex_unlock(&mddev->open_mutex);
4863 ++ err = -EBUSY;
4864 ++ goto out;
4865 ++ }
4866 + did_set_md_closing = true;
4867 + mutex_unlock(&mddev->open_mutex);
4868 + sync_blockdev(bdev);
4869 +@@ -9420,8 +9426,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
4870 + }
4871 + }
4872 +
4873 +- if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
4874 +- update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
4875 ++ if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
4876 ++ ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
4877 ++ if (ret)
4878 ++ pr_warn("md: updating array disks failed. %d\n", ret);
4879 ++ }
4880 +
4881 + /*
4882 + * Since mddev->delta_disks has already updated in update_raid_disks,
4883 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
4884 +index 88f90dfd368b1..ae17407e477a4 100644
4885 +--- a/drivers/media/common/siano/smsdvb-main.c
4886 ++++ b/drivers/media/common/siano/smsdvb-main.c
4887 +@@ -1169,12 +1169,15 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
4888 + rc = dvb_create_media_graph(&client->adapter, true);
4889 + if (rc < 0) {
4890 + pr_err("dvb_create_media_graph failed %d\n", rc);
4891 +- goto client_error;
4892 ++ goto media_graph_error;
4893 + }
4894 +
4895 + pr_info("DVB interface registered.\n");
4896 + return 0;
4897 +
4898 ++media_graph_error:
4899 ++ smsdvb_debugfs_release(client);
4900 ++
4901 + client_error:
4902 + dvb_unregister_frontend(&client->frontend);
4903 +
4904 +diff --git a/drivers/media/i2c/imx214.c b/drivers/media/i2c/imx214.c
4905 +index 159a3a604f0ed..24659cb0d0833 100644
4906 +--- a/drivers/media/i2c/imx214.c
4907 ++++ b/drivers/media/i2c/imx214.c
4908 +@@ -785,7 +785,7 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
4909 + if (ret < 0)
4910 + goto err_rpm_put;
4911 + } else {
4912 +- ret = imx214_start_streaming(imx214);
4913 ++ ret = imx214_stop_streaming(imx214);
4914 + if (ret < 0)
4915 + goto err_rpm_put;
4916 + pm_runtime_put(imx214->dev);
4917 +diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
4918 +index 19a3ceea3bc20..a62d7e2ac3567 100644
4919 +--- a/drivers/media/i2c/max2175.c
4920 ++++ b/drivers/media/i2c/max2175.c
4921 +@@ -503,7 +503,7 @@ static void max2175_set_bbfilter(struct max2175 *ctx)
4922 + }
4923 + }
4924 +
4925 +-static bool max2175_set_csm_mode(struct max2175 *ctx,
4926 ++static int max2175_set_csm_mode(struct max2175 *ctx,
4927 + enum max2175_csm_mode new_mode)
4928 + {
4929 + int ret = max2175_poll_csm_ready(ctx);
4930 +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
4931 +index 1adfdc7ab0dbb..253f05aef3b1f 100644
4932 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
4933 ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
4934 +@@ -799,6 +799,7 @@ static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
4935 + atomic_dec(&q->bufs_queued);
4936 + vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
4937 + state);
4938 ++ q->bufs[i] = NULL;
4939 + }
4940 + }
4941 + }
4942 +@@ -1243,29 +1244,15 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
4943 + struct v4l2_subdev_format *fmt)
4944 + {
4945 + struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
4946 +- struct v4l2_subdev_format format;
4947 +- int ret;
4948 +-
4949 +- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
4950 +- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
4951 +- return 0;
4952 +- }
4953 +
4954 +- if (fmt->pad == CIO2_PAD_SINK) {
4955 +- format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
4956 +- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
4957 +- &format);
4958 ++ mutex_lock(&q->subdev_lock);
4959 +
4960 +- if (ret)
4961 +- return ret;
4962 +- /* update colorspace etc */
4963 +- q->subdev_fmt.colorspace = format.format.colorspace;
4964 +- q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
4965 +- q->subdev_fmt.quantization = format.format.quantization;
4966 +- q->subdev_fmt.xfer_func = format.format.xfer_func;
4967 +- }
4968 ++ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
4969 ++ fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
4970 ++ else
4971 ++ fmt->format = q->subdev_fmt;
4972 +
4973 +- fmt->format = q->subdev_fmt;
4974 ++ mutex_unlock(&q->subdev_lock);
4975 +
4976 + return 0;
4977 + }
4978 +@@ -1282,6 +1269,9 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
4979 + struct v4l2_subdev_format *fmt)
4980 + {
4981 + struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
4982 ++ struct v4l2_mbus_framefmt *mbus;
4983 ++ u32 mbus_code = fmt->format.code;
4984 ++ unsigned int i;
4985 +
4986 + /*
4987 + * Only allow setting sink pad format;
4988 +@@ -1290,16 +1280,29 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
4989 + if (fmt->pad == CIO2_PAD_SOURCE)
4990 + return cio2_subdev_get_fmt(sd, cfg, fmt);
4991 +
4992 +- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
4993 +- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
4994 +- } else {
4995 +- /* It's the sink, allow changing frame size */
4996 +- q->subdev_fmt.width = fmt->format.width;
4997 +- q->subdev_fmt.height = fmt->format.height;
4998 +- q->subdev_fmt.code = fmt->format.code;
4999 +- fmt->format = q->subdev_fmt;
5000 ++ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
5001 ++ mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
5002 ++ else
5003 ++ mbus = &q->subdev_fmt;
5004 ++
5005 ++ fmt->format.code = formats[0].mbus_code;
5006 ++
5007 ++ for (i = 0; i < ARRAY_SIZE(formats); i++) {
5008 ++ if (formats[i].mbus_code == fmt->format.code) {
5009 ++ fmt->format.code = mbus_code;
5010 ++ break;
5011 ++ }
5012 + }
5013 +
5014 ++ fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
5015 ++ fmt->format.height = min_t(u32, fmt->format.height,
5016 ++ CIO2_IMAGE_MAX_LENGTH);
5017 ++ fmt->format.field = V4L2_FIELD_NONE;
5018 ++
5019 ++ mutex_lock(&q->subdev_lock);
5020 ++ *mbus = fmt->format;
5021 ++ mutex_unlock(&q->subdev_lock);
5022 ++
5023 + return 0;
5024 + }
5025 +
5026 +@@ -1558,6 +1561,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
5027 +
5028 + /* Initialize miscellaneous variables */
5029 + mutex_init(&q->lock);
5030 ++ mutex_init(&q->subdev_lock);
5031 +
5032 + /* Initialize formats to default values */
5033 + fmt = &q->subdev_fmt;
5034 +@@ -1676,6 +1680,7 @@ fail_vdev_media_entity:
5035 + fail_subdev_media_entity:
5036 + cio2_fbpt_exit(q, &cio2->pci_dev->dev);
5037 + fail_fbpt:
5038 ++ mutex_destroy(&q->subdev_lock);
5039 + mutex_destroy(&q->lock);
5040 +
5041 + return r;
5042 +@@ -1689,6 +1694,7 @@ static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
5043 + v4l2_device_unregister_subdev(&q->subdev);
5044 + media_entity_cleanup(&q->subdev.entity);
5045 + cio2_fbpt_exit(q, &cio2->pci_dev->dev);
5046 ++ mutex_destroy(&q->subdev_lock);
5047 + mutex_destroy(&q->lock);
5048 + }
5049 +
5050 +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
5051 +index 7caab9b8c2b99..af5855662112a 100644
5052 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
5053 ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
5054 +@@ -332,6 +332,7 @@ struct cio2_queue {
5055 +
5056 + /* Subdev, /dev/v4l-subdevX */
5057 + struct v4l2_subdev subdev;
5058 ++ struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */
5059 + struct media_pad subdev_pads[CIO2_PADS];
5060 + struct v4l2_mbus_framefmt subdev_fmt;
5061 + atomic_t frame_sequence;
5062 +diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
5063 +index d4f12c250f91a..526042d8afae5 100644
5064 +--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
5065 ++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
5066 +@@ -175,7 +175,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
5067 + struct spi_master *master;
5068 + struct netup_spi *nspi;
5069 +
5070 +- master = spi_alloc_master(&ndev->pci_dev->dev,
5071 ++ master = devm_spi_alloc_master(&ndev->pci_dev->dev,
5072 + sizeof(struct netup_spi));
5073 + if (!master) {
5074 + dev_err(&ndev->pci_dev->dev,
5075 +@@ -208,6 +208,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
5076 + ndev->pci_slot,
5077 + ndev->pci_func);
5078 + if (!spi_new_device(master, &netup_spi_board)) {
5079 ++ spi_unregister_master(master);
5080 + ndev->spi = NULL;
5081 + dev_err(&ndev->pci_dev->dev,
5082 + "%s(): unable to create SPI device\n", __func__);
5083 +@@ -226,13 +227,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
5084 + if (!spi)
5085 + return;
5086 +
5087 ++ spi_unregister_master(spi->master);
5088 + spin_lock_irqsave(&spi->lock, flags);
5089 + reg = readw(&spi->regs->control_stat);
5090 + writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
5091 + reg = readw(&spi->regs->control_stat);
5092 + writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
5093 + spin_unlock_irqrestore(&spi->lock, flags);
5094 +- spi_unregister_master(spi->master);
5095 + ndev->spi = NULL;
5096 + }
5097 +
5098 +diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
5099 +index e6a71c17566d2..952ea250feda0 100644
5100 +--- a/drivers/media/pci/saa7146/mxb.c
5101 ++++ b/drivers/media/pci/saa7146/mxb.c
5102 +@@ -641,16 +641,17 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *
5103 + struct mxb *mxb = (struct mxb *)dev->ext_priv;
5104 +
5105 + DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
5106 +- if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) {
5107 +- if (mxb->cur_audinput != a->index) {
5108 +- mxb->cur_audinput = a->index;
5109 +- tea6420_route(mxb, a->index);
5110 +- if (mxb->cur_audinput == 0)
5111 +- mxb_update_audmode(mxb);
5112 +- }
5113 +- return 0;
5114 ++ if (a->index >= 32 ||
5115 ++ !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index)))
5116 ++ return -EINVAL;
5117 ++
5118 ++ if (mxb->cur_audinput != a->index) {
5119 ++ mxb->cur_audinput = a->index;
5120 ++ tea6420_route(mxb, a->index);
5121 ++ if (mxb->cur_audinput == 0)
5122 ++ mxb_update_audmode(mxb);
5123 + }
5124 +- return -EINVAL;
5125 ++ return 0;
5126 + }
5127 +
5128 + #ifdef CONFIG_VIDEO_ADV_DEBUG
5129 +diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
5130 +index 30c8f2ec9c3cc..809e4e65bb6e7 100644
5131 +--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
5132 ++++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
5133 +@@ -399,7 +399,7 @@ int solo_g723_init(struct solo_dev *solo_dev)
5134 +
5135 + ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
5136 + if (ret < 0)
5137 +- return ret;
5138 ++ goto snd_error;
5139 +
5140 + ret = solo_snd_pcm_init(solo_dev);
5141 + if (ret < 0)
5142 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
5143 +index 5a6ec8fb52daa..f9bbd0000bf3e 100644
5144 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
5145 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
5146 +@@ -48,11 +48,14 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
5147 + dec_clk->clk_info = devm_kcalloc(&pdev->dev,
5148 + dec_clk->clk_num, sizeof(*clk_info),
5149 + GFP_KERNEL);
5150 +- if (!dec_clk->clk_info)
5151 +- return -ENOMEM;
5152 ++ if (!dec_clk->clk_info) {
5153 ++ ret = -ENOMEM;
5154 ++ goto put_device;
5155 ++ }
5156 + } else {
5157 + mtk_v4l2_err("Failed to get vdec clock count");
5158 +- return -EINVAL;
5159 ++ ret = -EINVAL;
5160 ++ goto put_device;
5161 + }
5162 +
5163 + for (i = 0; i < dec_clk->clk_num; i++) {
5164 +@@ -61,25 +64,29 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
5165 + "clock-names", i, &clk_info->clk_name);
5166 + if (ret) {
5167 + mtk_v4l2_err("Failed to get clock name id = %d", i);
5168 +- return ret;
5169 ++ goto put_device;
5170 + }
5171 + clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
5172 + clk_info->clk_name);
5173 + if (IS_ERR(clk_info->vcodec_clk)) {
5174 + mtk_v4l2_err("devm_clk_get (%d)%s fail", i,
5175 + clk_info->clk_name);
5176 +- return PTR_ERR(clk_info->vcodec_clk);
5177 ++ ret = PTR_ERR(clk_info->vcodec_clk);
5178 ++ goto put_device;
5179 + }
5180 + }
5181 +
5182 + pm_runtime_enable(&pdev->dev);
5183 +-
5184 ++ return 0;
5185 ++put_device:
5186 ++ put_device(pm->larbvdec);
5187 + return ret;
5188 + }
5189 +
5190 + void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
5191 + {
5192 + pm_runtime_disable(dev->pm.dev);
5193 ++ put_device(dev->pm.larbvdec);
5194 + }
5195 +
5196 + void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
5197 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
5198 +index 3e2bfded79a66..e682bdb1ed453 100644
5199 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
5200 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
5201 +@@ -49,14 +49,16 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
5202 + node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
5203 + if (!node) {
5204 + mtk_v4l2_err("no mediatek,larb found");
5205 +- return -ENODEV;
5206 ++ ret = -ENODEV;
5207 ++ goto put_larbvenc;
5208 + }
5209 +
5210 + pdev = of_find_device_by_node(node);
5211 + of_node_put(node);
5212 + if (!pdev) {
5213 + mtk_v4l2_err("no mediatek,larb device found");
5214 +- return -ENODEV;
5215 ++ ret = -ENODEV;
5216 ++ goto put_larbvenc;
5217 + }
5218 +
5219 + pm->larbvenclt = &pdev->dev;
5220 +@@ -69,11 +71,14 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
5221 + enc_clk->clk_info = devm_kcalloc(&pdev->dev,
5222 + enc_clk->clk_num, sizeof(*clk_info),
5223 + GFP_KERNEL);
5224 +- if (!enc_clk->clk_info)
5225 +- return -ENOMEM;
5226 ++ if (!enc_clk->clk_info) {
5227 ++ ret = -ENOMEM;
5228 ++ goto put_larbvenclt;
5229 ++ }
5230 + } else {
5231 + mtk_v4l2_err("Failed to get venc clock count");
5232 +- return -EINVAL;
5233 ++ ret = -EINVAL;
5234 ++ goto put_larbvenclt;
5235 + }
5236 +
5237 + for (i = 0; i < enc_clk->clk_num; i++) {
5238 +@@ -82,17 +87,24 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
5239 + "clock-names", i, &clk_info->clk_name);
5240 + if (ret) {
5241 + mtk_v4l2_err("venc failed to get clk name %d", i);
5242 +- return ret;
5243 ++ goto put_larbvenclt;
5244 + }
5245 + clk_info->vcodec_clk = devm_clk_get(&pdev->dev,
5246 + clk_info->clk_name);
5247 + if (IS_ERR(clk_info->vcodec_clk)) {
5248 + mtk_v4l2_err("venc devm_clk_get (%d)%s fail", i,
5249 + clk_info->clk_name);
5250 +- return PTR_ERR(clk_info->vcodec_clk);
5251 ++ ret = PTR_ERR(clk_info->vcodec_clk);
5252 ++ goto put_larbvenclt;
5253 + }
5254 + }
5255 +
5256 ++ return 0;
5257 ++
5258 ++put_larbvenclt:
5259 ++ put_device(pm->larbvenclt);
5260 ++put_larbvenc:
5261 ++ put_device(pm->larbvenc);
5262 + return ret;
5263 + }
5264 +
5265 +diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
5266 +index e222b4c98be42..78c5a4d0f2794 100644
5267 +--- a/drivers/media/rc/sunxi-cir.c
5268 ++++ b/drivers/media/rc/sunxi-cir.c
5269 +@@ -137,6 +137,8 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
5270 + } else if (status & REG_RXSTA_RPE) {
5271 + ir_raw_event_set_idle(ir->rc, true);
5272 + ir_raw_event_handle(ir->rc);
5273 ++ } else {
5274 ++ ir_raw_event_handle(ir->rc);
5275 + }
5276 +
5277 + spin_unlock(&ir->ir_lock);
5278 +diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
5279 +index c1b307bbe540d..4210826cc9106 100644
5280 +--- a/drivers/media/usb/gspca/gspca.c
5281 ++++ b/drivers/media/usb/gspca/gspca.c
5282 +@@ -1575,6 +1575,7 @@ out:
5283 + input_unregister_device(gspca_dev->input_dev);
5284 + #endif
5285 + v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
5286 ++ v4l2_device_unregister(&gspca_dev->v4l2_dev);
5287 + kfree(gspca_dev->usb_buf);
5288 + kfree(gspca_dev);
5289 + return ret;
5290 +diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
5291 +index 65be6f140fe83..1c60dfb647e5c 100644
5292 +--- a/drivers/media/usb/msi2500/msi2500.c
5293 ++++ b/drivers/media/usb/msi2500/msi2500.c
5294 +@@ -1230,7 +1230,7 @@ static int msi2500_probe(struct usb_interface *intf,
5295 + }
5296 +
5297 + dev->master = master;
5298 +- master->bus_num = 0;
5299 ++ master->bus_num = -1;
5300 + master->num_chipselect = 1;
5301 + master->transfer_one_message = msi2500_transfer_one_message;
5302 + spi_master_set_devdata(master, dev);
5303 +diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
5304 +index c07a81a6cbe29..c46cbcfafab3f 100644
5305 +--- a/drivers/media/usb/tm6000/tm6000-video.c
5306 ++++ b/drivers/media/usb/tm6000/tm6000-video.c
5307 +@@ -461,11 +461,12 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
5308 + if (dev->urb_buffer)
5309 + return 0;
5310 +
5311 +- dev->urb_buffer = kmalloc_array(num_bufs, sizeof(void *), GFP_KERNEL);
5312 ++ dev->urb_buffer = kmalloc_array(num_bufs, sizeof(*dev->urb_buffer),
5313 ++ GFP_KERNEL);
5314 + if (!dev->urb_buffer)
5315 + return -ENOMEM;
5316 +
5317 +- dev->urb_dma = kmalloc_array(num_bufs, sizeof(dma_addr_t *),
5318 ++ dev->urb_dma = kmalloc_array(num_bufs, sizeof(*dev->urb_dma),
5319 + GFP_KERNEL);
5320 + if (!dev->urb_dma)
5321 + return -ENOMEM;
5322 +diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
5323 +index 3bd1888787eb3..48c3b9f72722a 100644
5324 +--- a/drivers/media/v4l2-core/v4l2-fwnode.c
5325 ++++ b/drivers/media/v4l2-core/v4l2-fwnode.c
5326 +@@ -93,7 +93,7 @@ v4l2_fwnode_bus_type_to_mbus(enum v4l2_fwnode_bus_type type)
5327 + const struct v4l2_fwnode_bus_conv *conv =
5328 + get_v4l2_fwnode_bus_conv_by_fwnode_bus(type);
5329 +
5330 +- return conv ? conv->mbus_type : V4L2_MBUS_UNKNOWN;
5331 ++ return conv ? conv->mbus_type : V4L2_MBUS_INVALID;
5332 + }
5333 +
5334 + static const char *
5335 +@@ -436,6 +436,10 @@ static int __v4l2_fwnode_endpoint_parse(struct fwnode_handle *fwnode,
5336 + v4l2_fwnode_mbus_type_to_string(vep->bus_type),
5337 + vep->bus_type);
5338 + mbus_type = v4l2_fwnode_bus_type_to_mbus(bus_type);
5339 ++ if (mbus_type == V4L2_MBUS_INVALID) {
5340 ++ pr_debug("unsupported bus type %u\n", bus_type);
5341 ++ return -EINVAL;
5342 ++ }
5343 +
5344 + if (vep->bus_type != V4L2_MBUS_UNKNOWN) {
5345 + if (mbus_type != V4L2_MBUS_UNKNOWN &&
5346 +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
5347 +index ef03d6fafc5ce..12bc3f5a6cbbd 100644
5348 +--- a/drivers/memstick/core/memstick.c
5349 ++++ b/drivers/memstick/core/memstick.c
5350 +@@ -468,7 +468,6 @@ static void memstick_check(struct work_struct *work)
5351 + host->card = card;
5352 + if (device_register(&card->dev)) {
5353 + put_device(&card->dev);
5354 +- kfree(host->card);
5355 + host->card = NULL;
5356 + }
5357 + } else
5358 +diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
5359 +index dd3a1f3dcc191..d2ef46337191c 100644
5360 +--- a/drivers/memstick/host/r592.c
5361 ++++ b/drivers/memstick/host/r592.c
5362 +@@ -759,8 +759,10 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5363 + goto error3;
5364 +
5365 + dev->mmio = pci_ioremap_bar(pdev, 0);
5366 +- if (!dev->mmio)
5367 ++ if (!dev->mmio) {
5368 ++ error = -ENOMEM;
5369 + goto error4;
5370 ++ }
5371 +
5372 + dev->irq = pdev->irq;
5373 + spin_lock_init(&dev->irq_lock);
5374 +@@ -786,12 +788,14 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5375 + &dev->dummy_dma_page_physical_address, GFP_KERNEL);
5376 + r592_stop_dma(dev , 0);
5377 +
5378 +- if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
5379 +- DRV_NAME, dev))
5380 ++ error = request_irq(dev->irq, &r592_irq, IRQF_SHARED,
5381 ++ DRV_NAME, dev);
5382 ++ if (error)
5383 + goto error6;
5384 +
5385 + r592_update_card_detect(dev);
5386 +- if (memstick_add_host(host))
5387 ++ error = memstick_add_host(host);
5388 ++ if (error)
5389 + goto error7;
5390 +
5391 + message("driver successfully loaded");
5392 +diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
5393 +index a7a4fed4d8995..3eeb1920ddb43 100644
5394 +--- a/drivers/misc/habanalabs/device.c
5395 ++++ b/drivers/misc/habanalabs/device.c
5396 +@@ -229,16 +229,16 @@ delete_cdev_device:
5397 +
5398 + static void device_cdev_sysfs_del(struct hl_device *hdev)
5399 + {
5400 +- /* device_release() won't be called so must free devices explicitly */
5401 +- if (!hdev->cdev_sysfs_created) {
5402 +- kfree(hdev->dev_ctrl);
5403 +- kfree(hdev->dev);
5404 +- return;
5405 +- }
5406 ++ if (!hdev->cdev_sysfs_created)
5407 ++ goto put_devices;
5408 +
5409 + hl_sysfs_fini(hdev);
5410 + cdev_device_del(&hdev->cdev_ctrl, hdev->dev_ctrl);
5411 + cdev_device_del(&hdev->cdev, hdev->dev);
5412 ++
5413 ++put_devices:
5414 ++ put_device(hdev->dev);
5415 ++ put_device(hdev->dev_ctrl);
5416 + }
5417 +
5418 + /*
5419 +@@ -1285,9 +1285,9 @@ sw_fini:
5420 + early_fini:
5421 + device_early_fini(hdev);
5422 + free_dev_ctrl:
5423 +- kfree(hdev->dev_ctrl);
5424 ++ put_device(hdev->dev_ctrl);
5425 + free_dev:
5426 +- kfree(hdev->dev);
5427 ++ put_device(hdev->dev);
5428 + out_disabled:
5429 + hdev->disabled = true;
5430 + if (add_cdev_sysfs_on_err)
5431 +diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
5432 +index b2bbcb09a49e6..953e7457137a2 100644
5433 +--- a/drivers/mmc/host/pxamci.c
5434 ++++ b/drivers/mmc/host/pxamci.c
5435 +@@ -729,6 +729,7 @@ static int pxamci_probe(struct platform_device *pdev)
5436 +
5437 + host->power = devm_gpiod_get_optional(dev, "power", GPIOD_OUT_LOW);
5438 + if (IS_ERR(host->power)) {
5439 ++ ret = PTR_ERR(host->power);
5440 + dev_err(dev, "Failed requesting gpio_power\n");
5441 + goto out;
5442 + }
5443 +diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
5444 +index ef89947ee3191..2390ed077a2fc 100644
5445 +--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
5446 ++++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
5447 +@@ -149,8 +149,10 @@ static int gpmi_init(struct gpmi_nand_data *this)
5448 + int ret;
5449 +
5450 + ret = pm_runtime_get_sync(this->dev);
5451 +- if (ret < 0)
5452 ++ if (ret < 0) {
5453 ++ pm_runtime_put_noidle(this->dev);
5454 + return ret;
5455 ++ }
5456 +
5457 + ret = gpmi_reset_block(r->gpmi_regs, false);
5458 + if (ret)
5459 +@@ -2406,7 +2408,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
5460 + void *buf_read = NULL;
5461 + const void *buf_write = NULL;
5462 + bool direct = false;
5463 +- struct completion *completion;
5464 ++ struct completion *dma_completion, *bch_completion;
5465 + unsigned long to;
5466 +
5467 + this->ntransfers = 0;
5468 +@@ -2414,8 +2416,10 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
5469 + this->transfers[i].direction = DMA_NONE;
5470 +
5471 + ret = pm_runtime_get_sync(this->dev);
5472 +- if (ret < 0)
5473 ++ if (ret < 0) {
5474 ++ pm_runtime_put_noidle(this->dev);
5475 + return ret;
5476 ++ }
5477 +
5478 + /*
5479 + * This driver currently supports only one NAND chip. Plus, dies share
5480 +@@ -2498,22 +2502,24 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
5481 + this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
5482 + }
5483 +
5484 ++ desc->callback = dma_irq_callback;
5485 ++ desc->callback_param = this;
5486 ++ dma_completion = &this->dma_done;
5487 ++ bch_completion = NULL;
5488 ++
5489 ++ init_completion(dma_completion);
5490 ++
5491 + if (this->bch && buf_read) {
5492 + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
5493 + this->resources.bch_regs + HW_BCH_CTRL_SET);
5494 +- completion = &this->bch_done;
5495 +- } else {
5496 +- desc->callback = dma_irq_callback;
5497 +- desc->callback_param = this;
5498 +- completion = &this->dma_done;
5499 ++ bch_completion = &this->bch_done;
5500 ++ init_completion(bch_completion);
5501 + }
5502 +
5503 +- init_completion(completion);
5504 +-
5505 + dmaengine_submit(desc);
5506 + dma_async_issue_pending(get_dma_chan(this));
5507 +
5508 +- to = wait_for_completion_timeout(completion, msecs_to_jiffies(1000));
5509 ++ to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
5510 + if (!to) {
5511 + dev_err(this->dev, "DMA timeout, last DMA\n");
5512 + gpmi_dump_info(this);
5513 +@@ -2521,6 +2527,16 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
5514 + goto unmap;
5515 + }
5516 +
5517 ++ if (this->bch && buf_read) {
5518 ++ to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
5519 ++ if (!to) {
5520 ++ dev_err(this->dev, "BCH timeout, last DMA\n");
5521 ++ gpmi_dump_info(this);
5522 ++ ret = -ETIMEDOUT;
5523 ++ goto unmap;
5524 ++ }
5525 ++ }
5526 ++
5527 + writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
5528 + this->resources.bch_regs + HW_BCH_CTRL_CLR);
5529 + gpmi_clear_bch(this);
5530 +diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c
5531 +index 1b82b687e5a50..ab7ab6a279aac 100644
5532 +--- a/drivers/mtd/nand/raw/meson_nand.c
5533 ++++ b/drivers/mtd/nand/raw/meson_nand.c
5534 +@@ -510,7 +510,7 @@ static int meson_nfc_dma_buffer_setup(struct nand_chip *nand, void *databuf,
5535 + }
5536 +
5537 + static void meson_nfc_dma_buffer_release(struct nand_chip *nand,
5538 +- int infolen, int datalen,
5539 ++ int datalen, int infolen,
5540 + enum dma_data_direction dir)
5541 + {
5542 + struct meson_nfc *nfc = nand_get_controller_data(nand);
5543 +@@ -1041,9 +1041,12 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
5544 +
5545 + ret = clk_set_rate(nfc->device_clk, 24000000);
5546 + if (ret)
5547 +- goto err_phase_rx;
5548 ++ goto err_disable_rx;
5549 +
5550 + return 0;
5551 ++
5552 ++err_disable_rx:
5553 ++ clk_disable_unprepare(nfc->phase_rx);
5554 + err_phase_rx:
5555 + clk_disable_unprepare(nfc->phase_tx);
5556 + err_phase_tx:
5557 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
5558 +index c1c53b02b35f4..963ebcdfcbce3 100644
5559 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
5560 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
5561 +@@ -1570,6 +1570,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
5562 + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
5563 + int i;
5564 +
5565 ++ nandc_read_buffer_sync(nandc, true);
5566 ++
5567 + for (i = 0; i < cw_cnt; i++) {
5568 + u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
5569 +
5570 +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
5571 +index 0d21c68bfe245..671700af91804 100644
5572 +--- a/drivers/mtd/nand/spi/core.c
5573 ++++ b/drivers/mtd/nand/spi/core.c
5574 +@@ -317,6 +317,10 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
5575 + buf += ret;
5576 + }
5577 +
5578 ++ if (req->ooblen)
5579 ++ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
5580 ++ req->ooblen);
5581 ++
5582 + return 0;
5583 + }
5584 +
5585 +diff --git a/drivers/mtd/parsers/cmdlinepart.c b/drivers/mtd/parsers/cmdlinepart.c
5586 +index 0625b25620ca7..0dca51549128d 100644
5587 +--- a/drivers/mtd/parsers/cmdlinepart.c
5588 ++++ b/drivers/mtd/parsers/cmdlinepart.c
5589 +@@ -218,7 +218,7 @@ static int mtdpart_setup_real(char *s)
5590 + struct cmdline_mtd_partition *this_mtd;
5591 + struct mtd_partition *parts;
5592 + int mtd_id_len, num_parts;
5593 +- char *p, *mtd_id, *semicol;
5594 ++ char *p, *mtd_id, *semicol, *open_parenth;
5595 +
5596 + /*
5597 + * Replace the first ';' by a NULL char so strrchr can work
5598 +@@ -228,6 +228,14 @@ static int mtdpart_setup_real(char *s)
5599 + if (semicol)
5600 + *semicol = '\0';
5601 +
5602 ++ /*
5603 ++ * make sure that part-names with ":" will not be handled as
5604 ++ * part of the mtd-id with an ":"
5605 ++ */
5606 ++ open_parenth = strchr(s, '(');
5607 ++ if (open_parenth)
5608 ++ *open_parenth = '\0';
5609 ++
5610 + mtd_id = s;
5611 +
5612 + /*
5613 +@@ -237,6 +245,10 @@ static int mtdpart_setup_real(char *s)
5614 + */
5615 + p = strrchr(s, ':');
5616 +
5617 ++ /* Restore the '(' now. */
5618 ++ if (open_parenth)
5619 ++ *open_parenth = '(';
5620 ++
5621 + /* Restore the ';' now. */
5622 + if (semicol)
5623 + *semicol = ';';
5624 +diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
5625 +index c84114b44ee07..d2bb9a87eff9a 100644
5626 +--- a/drivers/net/can/m_can/m_can.c
5627 ++++ b/drivers/net/can/m_can/m_can.c
5628 +@@ -379,10 +379,6 @@ void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
5629 + cccr &= ~CCCR_CSR;
5630 +
5631 + if (enable) {
5632 +- /* Clear the Clock stop request if it was set */
5633 +- if (cccr & CCCR_CSR)
5634 +- cccr &= ~CCCR_CSR;
5635 +-
5636 + /* enable m_can configuration */
5637 + m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
5638 + udelay(5);
5639 +diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
5640 +index 8242fb287cbbe..16e3f55efa311 100644
5641 +--- a/drivers/net/can/softing/softing_main.c
5642 ++++ b/drivers/net/can/softing/softing_main.c
5643 +@@ -382,8 +382,13 @@ static int softing_netdev_open(struct net_device *ndev)
5644 +
5645 + /* check or determine and set bittime */
5646 + ret = open_candev(ndev);
5647 +- if (!ret)
5648 +- ret = softing_startstop(ndev, 1);
5649 ++ if (ret)
5650 ++ return ret;
5651 ++
5652 ++ ret = softing_startstop(ndev, 1);
5653 ++ if (ret < 0)
5654 ++ close_candev(ndev);
5655 ++
5656 + return ret;
5657 + }
5658 +
5659 +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
5660 +index ff318472a3eef..95155a1f9f9dc 100644
5661 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
5662 ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
5663 +@@ -845,13 +845,13 @@ static int emac_probe(struct platform_device *pdev)
5664 + db->clk = devm_clk_get(&pdev->dev, NULL);
5665 + if (IS_ERR(db->clk)) {
5666 + ret = PTR_ERR(db->clk);
5667 +- goto out_iounmap;
5668 ++ goto out_dispose_mapping;
5669 + }
5670 +
5671 + ret = clk_prepare_enable(db->clk);
5672 + if (ret) {
5673 + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
5674 +- goto out_iounmap;
5675 ++ goto out_dispose_mapping;
5676 + }
5677 +
5678 + ret = sunxi_sram_claim(&pdev->dev);
5679 +@@ -910,6 +910,8 @@ out_release_sram:
5680 + sunxi_sram_release(&pdev->dev);
5681 + out_clk_disable_unprepare:
5682 + clk_disable_unprepare(db->clk);
5683 ++out_dispose_mapping:
5684 ++ irq_dispose_mapping(ndev->irq);
5685 + out_iounmap:
5686 + iounmap(db->membase);
5687 + out:
5688 +@@ -928,6 +930,7 @@ static int emac_remove(struct platform_device *pdev)
5689 + unregister_netdev(ndev);
5690 + sunxi_sram_release(&pdev->dev);
5691 + clk_disable_unprepare(db->clk);
5692 ++ irq_dispose_mapping(ndev->irq);
5693 + iounmap(db->membase);
5694 + free_netdev(ndev);
5695 +
5696 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
5697 +index 03f82786c0b98..b27da024aa9d9 100644
5698 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
5699 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
5700 +@@ -3584,8 +3584,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
5701 + clk_disable_unprepare(priv->clk);
5702 +
5703 + err = register_netdev(dev);
5704 +- if (err)
5705 ++ if (err) {
5706 ++ bcmgenet_mii_exit(dev);
5707 + goto err;
5708 ++ }
5709 +
5710 + return err;
5711 +
5712 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
5713 +index e3f29dc8b290a..f47841f3a69d5 100644
5714 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
5715 ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
5716 +@@ -1195,6 +1195,11 @@ clear_counts:
5717 + rc->total_packets = 0;
5718 + }
5719 +
5720 ++static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
5721 ++{
5722 ++ return &rx_ring->rx_bi[idx];
5723 ++}
5724 ++
5725 + /**
5726 + * i40e_reuse_rx_page - page flip buffer and store it back on the ring
5727 + * @rx_ring: rx descriptor ring to store buffers on
5728 +@@ -1208,7 +1213,7 @@ static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
5729 + struct i40e_rx_buffer *new_buff;
5730 + u16 nta = rx_ring->next_to_alloc;
5731 +
5732 +- new_buff = &rx_ring->rx_bi[nta];
5733 ++ new_buff = i40e_rx_bi(rx_ring, nta);
5734 +
5735 + /* update, and store next to alloc */
5736 + nta++;
5737 +@@ -1272,7 +1277,7 @@ struct i40e_rx_buffer *i40e_clean_programming_status(
5738 + ntc = rx_ring->next_to_clean;
5739 +
5740 + /* fetch, update, and store next to clean */
5741 +- rx_buffer = &rx_ring->rx_bi[ntc++];
5742 ++ rx_buffer = i40e_rx_bi(rx_ring, ntc++);
5743 + ntc = (ntc < rx_ring->count) ? ntc : 0;
5744 + rx_ring->next_to_clean = ntc;
5745 +
5746 +@@ -1361,7 +1366,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
5747 +
5748 + /* Free all the Rx ring sk_buffs */
5749 + for (i = 0; i < rx_ring->count; i++) {
5750 +- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
5751 ++ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
5752 +
5753 + if (!rx_bi->page)
5754 + continue;
5755 +@@ -1576,7 +1581,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
5756 + return false;
5757 +
5758 + rx_desc = I40E_RX_DESC(rx_ring, ntu);
5759 +- bi = &rx_ring->rx_bi[ntu];
5760 ++ bi = i40e_rx_bi(rx_ring, ntu);
5761 +
5762 + do {
5763 + if (!i40e_alloc_mapped_page(rx_ring, bi))
5764 +@@ -1598,7 +1603,7 @@ bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
5765 + ntu++;
5766 + if (unlikely(ntu == rx_ring->count)) {
5767 + rx_desc = I40E_RX_DESC(rx_ring, 0);
5768 +- bi = rx_ring->rx_bi;
5769 ++ bi = i40e_rx_bi(rx_ring, 0);
5770 + ntu = 0;
5771 + }
5772 +
5773 +@@ -1864,6 +1869,7 @@ static inline bool i40e_page_is_reusable(struct page *page)
5774 + * the adapter for another receive
5775 + *
5776 + * @rx_buffer: buffer containing the page
5777 ++ * @rx_buffer_pgcnt: buffer page refcount pre xdp_do_redirect() call
5778 + *
5779 + * If page is reusable, rx_buffer->page_offset is adjusted to point to
5780 + * an unused region in the page.
5781 +@@ -1886,7 +1892,8 @@ static inline bool i40e_page_is_reusable(struct page *page)
5782 + *
5783 + * In either case, if the page is reusable its refcount is increased.
5784 + **/
5785 +-static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
5786 ++static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer,
5787 ++ int rx_buffer_pgcnt)
5788 + {
5789 + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
5790 + struct page *page = rx_buffer->page;
5791 +@@ -1897,7 +1904,7 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
5792 +
5793 + #if (PAGE_SIZE < 8192)
5794 + /* if we are only owner of page we can reuse it */
5795 +- if (unlikely((page_count(page) - pagecnt_bias) > 1))
5796 ++ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
5797 + return false;
5798 + #else
5799 + #define I40E_LAST_OFFSET \
5800 +@@ -1956,17 +1963,25 @@ static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
5801 + * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
5802 + * @rx_ring: rx descriptor ring to transact packets on
5803 + * @size: size of buffer to add to skb
5804 ++ * @rx_buffer_pgcnt: buffer page refcount
5805 + *
5806 + * This function will pull an Rx buffer from the ring and synchronize it
5807 + * for use by the CPU.
5808 + */
5809 + static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
5810 +- const unsigned int size)
5811 ++ const unsigned int size,
5812 ++ int *rx_buffer_pgcnt)
5813 + {
5814 + struct i40e_rx_buffer *rx_buffer;
5815 +
5816 +- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
5817 +- prefetchw(rx_buffer->page);
5818 ++ rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
5819 ++ *rx_buffer_pgcnt =
5820 ++#if (PAGE_SIZE < 8192)
5821 ++ page_count(rx_buffer->page);
5822 ++#else
5823 ++ 0;
5824 ++#endif
5825 ++ prefetch_page_address(rx_buffer->page);
5826 +
5827 + /* we are reusing so sync this buffer for CPU use */
5828 + dma_sync_single_range_for_cpu(rx_ring->dev,
5829 +@@ -2120,14 +2135,16 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
5830 + * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
5831 + * @rx_ring: rx descriptor ring to transact packets on
5832 + * @rx_buffer: rx buffer to pull data from
5833 ++ * @rx_buffer_pgcnt: rx buffer page refcount pre xdp_do_redirect() call
5834 + *
5835 + * This function will clean up the contents of the rx_buffer. It will
5836 + * either recycle the buffer or unmap it and free the associated resources.
5837 + */
5838 + static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
5839 +- struct i40e_rx_buffer *rx_buffer)
5840 ++ struct i40e_rx_buffer *rx_buffer,
5841 ++ int rx_buffer_pgcnt)
5842 + {
5843 +- if (i40e_can_reuse_rx_page(rx_buffer)) {
5844 ++ if (i40e_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
5845 + /* hand second half of page back to the ring */
5846 + i40e_reuse_rx_page(rx_ring, rx_buffer);
5847 + } else {
5848 +@@ -2340,6 +2357,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
5849 + while (likely(total_rx_packets < (unsigned int)budget)) {
5850 + struct i40e_rx_buffer *rx_buffer;
5851 + union i40e_rx_desc *rx_desc;
5852 ++ int rx_buffer_pgcnt;
5853 + unsigned int size;
5854 + u64 qword;
5855 +
5856 +@@ -2379,7 +2397,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
5857 + break;
5858 +
5859 + i40e_trace(clean_rx_irq, rx_ring, rx_desc, skb);
5860 +- rx_buffer = i40e_get_rx_buffer(rx_ring, size);
5861 ++ rx_buffer = i40e_get_rx_buffer(rx_ring, size, &rx_buffer_pgcnt);
5862 +
5863 + /* retrieve a buffer from the ring */
5864 + if (!skb) {
5865 +@@ -2419,7 +2437,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
5866 + break;
5867 + }
5868 +
5869 +- i40e_put_rx_buffer(rx_ring, rx_buffer);
5870 ++ i40e_put_rx_buffer(rx_ring, rx_buffer, rx_buffer_pgcnt);
5871 + cleaned_count++;
5872 +
5873 + if (i40e_is_non_eop(rx_ring, rx_desc, skb))
5874 +diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
5875 +index 3156de786d955..c9d4534fbdf02 100644
5876 +--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
5877 ++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
5878 +@@ -9,6 +9,11 @@
5879 + #include "i40e_txrx_common.h"
5880 + #include "i40e_xsk.h"
5881 +
5882 ++static struct i40e_rx_buffer *i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
5883 ++{
5884 ++ return &rx_ring->rx_bi[idx];
5885 ++}
5886 ++
5887 + /**
5888 + * i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
5889 + * @vsi: Current VSI
5890 +@@ -321,7 +326,7 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
5891 + bool ok = true;
5892 +
5893 + rx_desc = I40E_RX_DESC(rx_ring, ntu);
5894 +- bi = &rx_ring->rx_bi[ntu];
5895 ++ bi = i40e_rx_bi(rx_ring, ntu);
5896 + do {
5897 + if (!alloc(rx_ring, bi)) {
5898 + ok = false;
5899 +@@ -340,7 +345,7 @@ __i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count,
5900 +
5901 + if (unlikely(ntu == rx_ring->count)) {
5902 + rx_desc = I40E_RX_DESC(rx_ring, 0);
5903 +- bi = rx_ring->rx_bi;
5904 ++ bi = i40e_rx_bi(rx_ring, 0);
5905 + ntu = 0;
5906 + }
5907 +
5908 +@@ -402,7 +407,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
5909 + {
5910 + struct i40e_rx_buffer *bi;
5911 +
5912 +- bi = &rx_ring->rx_bi[rx_ring->next_to_clean];
5913 ++ bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
5914 +
5915 + /* we are reusing so sync this buffer for CPU use */
5916 + dma_sync_single_range_for_cpu(rx_ring->dev,
5917 +@@ -424,7 +429,8 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer_zc(struct i40e_ring *rx_ring,
5918 + static void i40e_reuse_rx_buffer_zc(struct i40e_ring *rx_ring,
5919 + struct i40e_rx_buffer *old_bi)
5920 + {
5921 +- struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc];
5922 ++ struct i40e_rx_buffer *new_bi = i40e_rx_bi(rx_ring,
5923 ++ rx_ring->next_to_alloc);
5924 + u16 nta = rx_ring->next_to_alloc;
5925 +
5926 + /* update, and store next to alloc */
5927 +@@ -456,7 +462,7 @@ void i40e_zca_free(struct zero_copy_allocator *alloc, unsigned long handle)
5928 + mask = rx_ring->xsk_umem->chunk_mask;
5929 +
5930 + nta = rx_ring->next_to_alloc;
5931 +- bi = &rx_ring->rx_bi[nta];
5932 ++ bi = i40e_rx_bi(rx_ring, nta);
5933 +
5934 + nta++;
5935 + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
5936 +@@ -824,7 +830,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
5937 + u16 i;
5938 +
5939 + for (i = 0; i < rx_ring->count; i++) {
5940 +- struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
5941 ++ struct i40e_rx_buffer *rx_bi = i40e_rx_bi(rx_ring, i);
5942 +
5943 + if (!rx_bi->addr)
5944 + continue;
5945 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5946 +index 5336bfcd2d701..f605540644035 100644
5947 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5948 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5949 +@@ -1947,7 +1947,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
5950 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
5951 + }
5952 +
5953 +-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
5954 ++static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
5955 ++ int rx_buffer_pgcnt)
5956 + {
5957 + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
5958 + struct page *page = rx_buffer->page;
5959 +@@ -1958,7 +1959,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
5960 +
5961 + #if (PAGE_SIZE < 8192)
5962 + /* if we are only owner of page we can reuse it */
5963 +- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
5964 ++ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
5965 + return false;
5966 + #else
5967 + /* The last offset is a bit aggressive in that we assume the
5968 +@@ -2023,11 +2024,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
5969 + static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
5970 + union ixgbe_adv_rx_desc *rx_desc,
5971 + struct sk_buff **skb,
5972 +- const unsigned int size)
5973 ++ const unsigned int size,
5974 ++ int *rx_buffer_pgcnt)
5975 + {
5976 + struct ixgbe_rx_buffer *rx_buffer;
5977 +
5978 + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
5979 ++ *rx_buffer_pgcnt =
5980 ++#if (PAGE_SIZE < 8192)
5981 ++ page_count(rx_buffer->page);
5982 ++#else
5983 ++ 0;
5984 ++#endif
5985 + prefetchw(rx_buffer->page);
5986 + *skb = rx_buffer->skb;
5987 +
5988 +@@ -2057,9 +2065,10 @@ skip_sync:
5989 +
5990 + static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
5991 + struct ixgbe_rx_buffer *rx_buffer,
5992 +- struct sk_buff *skb)
5993 ++ struct sk_buff *skb,
5994 ++ int rx_buffer_pgcnt)
5995 + {
5996 +- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
5997 ++ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
5998 + /* hand second half of page back to the ring */
5999 + ixgbe_reuse_rx_page(rx_ring, rx_buffer);
6000 + } else {
6001 +@@ -2295,6 +2304,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
6002 + union ixgbe_adv_rx_desc *rx_desc;
6003 + struct ixgbe_rx_buffer *rx_buffer;
6004 + struct sk_buff *skb;
6005 ++ int rx_buffer_pgcnt;
6006 + unsigned int size;
6007 +
6008 + /* return some buffers to hardware, one at a time is too slow */
6009 +@@ -2314,7 +2324,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
6010 + */
6011 + dma_rmb();
6012 +
6013 +- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
6014 ++ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
6015 +
6016 + /* retrieve a buffer from the ring */
6017 + if (!skb) {
6018 +@@ -2356,7 +2366,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
6019 + break;
6020 + }
6021 +
6022 +- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
6023 ++ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
6024 + cleaned_count++;
6025 +
6026 + /* place incomplete frames back on ring for completion */
6027 +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
6028 +index 993f495e2bf7b..9f804e2aba359 100644
6029 +--- a/drivers/net/ethernet/korina.c
6030 ++++ b/drivers/net/ethernet/korina.c
6031 +@@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
6032 + dev_kfree_skb_any(skb);
6033 + spin_unlock_irqrestore(&lp->lock, flags);
6034 +
6035 +- return NETDEV_TX_BUSY;
6036 ++ return NETDEV_TX_OK;
6037 + }
6038 + }
6039 +
6040 +diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6041 +index 63c0334430134..931d1a56b79ca 100644
6042 +--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6043 ++++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
6044 +@@ -4745,12 +4745,16 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6045 + eth_hw_addr_random(dev);
6046 + }
6047 +
6048 ++static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
6049 ++{
6050 ++ return container_of(config, struct mvpp2_port, phylink_config);
6051 ++}
6052 ++
6053 + static void mvpp2_phylink_validate(struct phylink_config *config,
6054 + unsigned long *supported,
6055 + struct phylink_link_state *state)
6056 + {
6057 +- struct mvpp2_port *port = container_of(config, struct mvpp2_port,
6058 +- phylink_config);
6059 ++ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6060 + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
6061 +
6062 + /* Invalid combinations */
6063 +@@ -4877,8 +4881,7 @@ static void mvpp2_gmac_link_state(struct mvpp2_port *port,
6064 + static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
6065 + struct phylink_link_state *state)
6066 + {
6067 +- struct mvpp2_port *port = container_of(config, struct mvpp2_port,
6068 +- phylink_config);
6069 ++ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6070 +
6071 + if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
6072 + u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
6073 +@@ -4896,8 +4899,7 @@ static int mvpp2_phylink_mac_link_state(struct phylink_config *config,
6074 +
6075 + static void mvpp2_mac_an_restart(struct phylink_config *config)
6076 + {
6077 +- struct mvpp2_port *port = container_of(config, struct mvpp2_port,
6078 +- phylink_config);
6079 ++ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6080 + u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6081 +
6082 + writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
6083 +@@ -5085,13 +5087,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6084 + static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6085 + const struct phylink_link_state *state)
6086 + {
6087 +- struct net_device *dev = to_net_dev(config->dev);
6088 +- struct mvpp2_port *port = netdev_priv(dev);
6089 ++ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6090 + bool change_interface = port->phy_interface != state->interface;
6091 +
6092 + /* Check for invalid configuration */
6093 + if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
6094 +- netdev_err(dev, "Invalid mode on %s\n", dev->name);
6095 ++ netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6096 + return;
6097 + }
6098 +
6099 +@@ -5128,8 +5129,7 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6100 + static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
6101 + phy_interface_t interface, struct phy_device *phy)
6102 + {
6103 +- struct net_device *dev = to_net_dev(config->dev);
6104 +- struct mvpp2_port *port = netdev_priv(dev);
6105 ++ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6106 + u32 val;
6107 +
6108 + if (!phylink_autoneg_inband(mode)) {
6109 +@@ -5150,14 +5150,13 @@ static void mvpp2_mac_link_up(struct phylink_config *config, unsigned int mode,
6110 +
6111 + mvpp2_egress_enable(port);
6112 + mvpp2_ingress_enable(port);
6113 +- netif_tx_wake_all_queues(dev);
6114 ++ netif_tx_wake_all_queues(port->dev);
6115 + }
6116 +
6117 + static void mvpp2_mac_link_down(struct phylink_config *config,
6118 + unsigned int mode, phy_interface_t interface)
6119 + {
6120 +- struct net_device *dev = to_net_dev(config->dev);
6121 +- struct mvpp2_port *port = netdev_priv(dev);
6122 ++ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6123 + u32 val;
6124 +
6125 + if (!phylink_autoneg_inband(mode)) {
6126 +@@ -5174,7 +5173,7 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
6127 + }
6128 + }
6129 +
6130 +- netif_tx_stop_all_queues(dev);
6131 ++ netif_tx_stop_all_queues(port->dev);
6132 + mvpp2_egress_disable(port);
6133 + mvpp2_ingress_disable(port);
6134 +
6135 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
6136 +index 7c0a726277b00..f2657cd3ffa4f 100644
6137 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
6138 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
6139 +@@ -50,6 +50,7 @@
6140 + #ifdef CONFIG_RFS_ACCEL
6141 + #include <linux/cpu_rmap.h>
6142 + #endif
6143 ++#include <linux/version.h>
6144 + #include <net/devlink.h>
6145 + #include "mlx5_core.h"
6146 + #include "lib/eq.h"
6147 +@@ -227,7 +228,10 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
6148 + strncat(string, ",", remaining_size);
6149 +
6150 + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
6151 +- strncat(string, DRIVER_VERSION, remaining_size);
6152 ++
6153 ++ snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
6154 ++ (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff),
6155 ++ (u16)(LINUX_VERSION_CODE & 0xffff));
6156 +
6157 + /*Send the command*/
6158 + MLX5_SET(set_driver_version_in, in, opcode,
6159 +diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
6160 +index 7526af27a59da..4bbdc53eaf3f3 100644
6161 +--- a/drivers/net/ethernet/microchip/lan743x_main.c
6162 ++++ b/drivers/net/ethernet/microchip/lan743x_main.c
6163 +@@ -1899,6 +1899,14 @@ static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
6164 + length, GFP_ATOMIC | GFP_DMA);
6165 + }
6166 +
6167 ++static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
6168 ++{
6169 ++ /* update the tail once per 8 descriptors */
6170 ++ if ((index & 7) == 7)
6171 ++ lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
6172 ++ index);
6173 ++}
6174 ++
6175 + static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
6176 + struct sk_buff *skb)
6177 + {
6178 +@@ -1929,6 +1937,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
6179 + descriptor->data0 = (RX_DESC_DATA0_OWN_ |
6180 + (length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
6181 + skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
6182 ++ lan743x_rx_update_tail(rx, index);
6183 +
6184 + return 0;
6185 + }
6186 +@@ -1947,6 +1956,7 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
6187 + descriptor->data0 = (RX_DESC_DATA0_OWN_ |
6188 + ((buffer_info->buffer_length) &
6189 + RX_DESC_DATA0_BUF_LENGTH_MASK_));
6190 ++ lan743x_rx_update_tail(rx, index);
6191 + }
6192 +
6193 + static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
6194 +@@ -2158,6 +2168,7 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
6195 + {
6196 + struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
6197 + struct lan743x_adapter *adapter = rx->adapter;
6198 ++ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
6199 + u32 rx_tail_flags = 0;
6200 + int count;
6201 +
6202 +@@ -2166,27 +2177,19 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
6203 + lan743x_csr_write(adapter, DMAC_INT_STS,
6204 + DMAC_INT_BIT_RXFRM_(rx->channel_number));
6205 + }
6206 +- count = 0;
6207 +- while (count < weight) {
6208 +- int rx_process_result = lan743x_rx_process_packet(rx);
6209 +-
6210 +- if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
6211 +- count++;
6212 +- } else if (rx_process_result ==
6213 +- RX_PROCESS_RESULT_NOTHING_TO_DO) {
6214 ++ for (count = 0; count < weight; count++) {
6215 ++ result = lan743x_rx_process_packet(rx);
6216 ++ if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
6217 + break;
6218 +- } else if (rx_process_result ==
6219 +- RX_PROCESS_RESULT_PACKET_DROPPED) {
6220 +- continue;
6221 +- }
6222 + }
6223 + rx->frame_count += count;
6224 +- if (count == weight)
6225 +- goto done;
6226 ++ if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED)
6227 ++ return weight;
6228 +
6229 + if (!napi_complete_done(napi, count))
6230 +- goto done;
6231 ++ return count;
6232 +
6233 ++ /* re-arm interrupts, must write to rx tail on some chip variants */
6234 + if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
6235 + rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
6236 + if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
6237 +@@ -2196,10 +2199,10 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
6238 + INT_BIT_DMA_RX_(rx->channel_number));
6239 + }
6240 +
6241 +- /* update RX_TAIL */
6242 +- lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
6243 +- rx_tail_flags | rx->last_tail);
6244 +-done:
6245 ++ if (rx_tail_flags)
6246 ++ lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
6247 ++ rx_tail_flags | rx->last_tail);
6248 ++
6249 + return count;
6250 + }
6251 +
6252 +@@ -2344,7 +2347,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
6253 +
6254 + netif_napi_add(adapter->netdev,
6255 + &rx->napi, lan743x_rx_napi_poll,
6256 +- rx->ring_size - 1);
6257 ++ NAPI_POLL_WEIGHT);
6258 +
6259 + lan743x_csr_write(adapter, DMAC_CMD,
6260 + DMAC_CMD_RX_SWR_(rx->channel_number));
6261 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
6262 +index c07438db30ba1..f2e5f494462b3 100644
6263 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
6264 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
6265 +@@ -2509,6 +2509,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6266 + qlcnic_sriov_vf_register_map(ahw);
6267 + break;
6268 + default:
6269 ++ err = -EINVAL;
6270 + goto err_out_free_hw_res;
6271 + }
6272 +
6273 +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
6274 +index 99e1a7bc06886..7cc8f405be1ad 100644
6275 +--- a/drivers/net/virtio_net.c
6276 ++++ b/drivers/net/virtio_net.c
6277 +@@ -3114,6 +3114,7 @@ static int virtnet_probe(struct virtio_device *vdev)
6278 + dev_err(&vdev->dev,
6279 + "device MTU appears to have changed it is now %d < %d",
6280 + mtu, dev->min_mtu);
6281 ++ err = -EINVAL;
6282 + goto free;
6283 + }
6284 +
6285 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
6286 +index 630ac00a34ede..5502e145aa17b 100644
6287 +--- a/drivers/net/vxlan.c
6288 ++++ b/drivers/net/vxlan.c
6289 +@@ -3538,6 +3538,9 @@ static void vxlan_config_apply(struct net_device *dev,
6290 + dev->gso_max_segs = lowerdev->gso_max_segs;
6291 +
6292 + needed_headroom = lowerdev->hard_header_len;
6293 ++ needed_headroom += lowerdev->needed_headroom;
6294 ++
6295 ++ dev->needed_tailroom = lowerdev->needed_tailroom;
6296 +
6297 + max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
6298 + VXLAN_HEADROOM);
6299 +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
6300 +index 1e0343081be91..05c0d5e92475e 100644
6301 +--- a/drivers/net/wireless/ath/ath10k/usb.c
6302 ++++ b/drivers/net/wireless/ath/ath10k/usb.c
6303 +@@ -1009,6 +1009,8 @@ static int ath10k_usb_probe(struct usb_interface *interface,
6304 +
6305 + ar_usb = ath10k_usb_priv(ar);
6306 + ret = ath10k_usb_create(ar, interface);
6307 ++ if (ret)
6308 ++ goto err;
6309 + ar_usb->ar = ar;
6310 +
6311 + ar->dev_id = product_id;
6312 +@@ -1021,7 +1023,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
6313 + ret = ath10k_core_register(ar, &bus_params);
6314 + if (ret) {
6315 + ath10k_warn(ar, "failed to register driver core: %d\n", ret);
6316 +- goto err;
6317 ++ goto err_usb_destroy;
6318 + }
6319 +
6320 + /* TODO: remove this once USB support is fully implemented */
6321 +@@ -1029,6 +1031,9 @@ static int ath10k_usb_probe(struct usb_interface *interface,
6322 +
6323 + return 0;
6324 +
6325 ++err_usb_destroy:
6326 ++ ath10k_usb_destroy(ar);
6327 ++
6328 + err:
6329 + ath10k_core_destroy(ar);
6330 +
6331 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
6332 +index 9d5b9df29c352..3ec71f52e8fe1 100644
6333 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
6334 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
6335 +@@ -1260,13 +1260,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
6336 +
6337 + switch (tag) {
6338 + case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
6339 ++ arg->service_map_ext_valid = true;
6340 + arg->service_map_ext_len = *(__le32 *)ptr;
6341 + arg->service_map_ext = ptr + sizeof(__le32);
6342 + return 0;
6343 + default:
6344 + break;
6345 + }
6346 +- return -EPROTO;
6347 ++
6348 ++ return 0;
6349 + }
6350 +
6351 + static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
6352 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
6353 +index 2675174cc4fec..91604a14a8f46 100644
6354 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
6355 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
6356 +@@ -5659,8 +5659,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
6357 + ret);
6358 + }
6359 +
6360 +- ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
6361 +- __le32_to_cpu(arg.service_map_ext_len));
6362 ++ /*
6363 ++ * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
6364 ++ * for the below logic to work.
6365 ++ */
6366 ++ if (arg.service_map_ext_valid)
6367 ++ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
6368 ++ __le32_to_cpu(arg.service_map_ext_len));
6369 + }
6370 +
6371 + static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
6372 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
6373 +index e80dbe7e8f4cf..761bc4a7064df 100644
6374 +--- a/drivers/net/wireless/ath/ath10k/wmi.h
6375 ++++ b/drivers/net/wireless/ath/ath10k/wmi.h
6376 +@@ -6857,6 +6857,7 @@ struct wmi_svc_rdy_ev_arg {
6377 + };
6378 +
6379 + struct wmi_svc_avail_ev_arg {
6380 ++ bool service_map_ext_valid;
6381 + __le32 service_map_ext_len;
6382 + const __le32 *service_map_ext;
6383 + };
6384 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
6385 +index 3be60aef54650..cb68f54a9c56e 100644
6386 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
6387 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
6388 +@@ -1936,16 +1936,18 @@ brcmf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6389 + fwreq = brcmf_pcie_prepare_fw_request(devinfo);
6390 + if (!fwreq) {
6391 + ret = -ENOMEM;
6392 +- goto fail_bus;
6393 ++ goto fail_brcmf;
6394 + }
6395 +
6396 + ret = brcmf_fw_get_firmwares(bus->dev, fwreq, brcmf_pcie_setup);
6397 + if (ret < 0) {
6398 + kfree(fwreq);
6399 +- goto fail_bus;
6400 ++ goto fail_brcmf;
6401 + }
6402 + return 0;
6403 +
6404 ++fail_brcmf:
6405 ++ brcmf_free(&devinfo->pdev->dev);
6406 + fail_bus:
6407 + kfree(bus->msgbuf);
6408 + kfree(bus);
6409 +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
6410 +index 38e6809f16c75..ef5521b9b3577 100644
6411 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
6412 ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
6413 +@@ -4433,6 +4433,7 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
6414 + brcmf_sdiod_intr_unregister(bus->sdiodev);
6415 +
6416 + brcmf_detach(bus->sdiodev->dev);
6417 ++ brcmf_free(bus->sdiodev->dev);
6418 +
6419 + cancel_work_sync(&bus->datawork);
6420 + if (bus->brcmf_wq)
6421 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
6422 +index 3acbd5b7ab4b2..b04cc6214bac8 100644
6423 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
6424 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
6425 +@@ -316,6 +316,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
6426 + iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
6427 + RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
6428 + iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
6429 ++ RX_HANDLER_GRP(MAC_CONF_GROUP, PROBE_RESPONSE_DATA_NOTIF,
6430 ++ iwl_mvm_probe_resp_data_notif,
6431 ++ RX_HANDLER_ASYNC_LOCKED),
6432 ++ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF,
6433 ++ iwl_mvm_channel_switch_noa_notif,
6434 ++ RX_HANDLER_SYNC),
6435 + };
6436 + #undef RX_HANDLER
6437 + #undef RX_HANDLER_GRP
6438 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
6439 +index b0b7eca1754ed..f34297fd453c0 100644
6440 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
6441 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
6442 +@@ -968,6 +968,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6443 +
6444 + {IWL_PCI_DEVICE(0x2725, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
6445 + {IWL_PCI_DEVICE(0x2725, 0x0020, iwlax210_2ax_cfg_ty_gf_a0)},
6446 ++ {IWL_PCI_DEVICE(0x2725, 0x0024, iwlax210_2ax_cfg_ty_gf_a0)},
6447 + {IWL_PCI_DEVICE(0x2725, 0x0310, iwlax210_2ax_cfg_ty_gf_a0)},
6448 + {IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
6449 + {IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
6450 +diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
6451 +index e753f43e0162f..e2368bfe3e468 100644
6452 +--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
6453 ++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
6454 +@@ -1234,13 +1234,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
6455 + if (skb->len < ETH_HLEN)
6456 + goto drop;
6457 +
6458 +- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
6459 +- if (!ctx)
6460 +- goto busy;
6461 +-
6462 +- memset(ctx->buf, 0, BULK_BUF_SIZE);
6463 +- buf = ctx->buf->data;
6464 +-
6465 + tx_control = 0;
6466 +
6467 + err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
6468 +@@ -1248,6 +1241,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
6469 + if (err)
6470 + goto drop;
6471 +
6472 ++ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
6473 ++ if (!ctx)
6474 ++ goto drop;
6475 ++
6476 ++ memset(ctx->buf, 0, BULK_BUF_SIZE);
6477 ++ buf = ctx->buf->data;
6478 ++
6479 + {
6480 + __le16 *tx_cntl = (__le16 *)buf;
6481 + *tx_cntl = cpu_to_le16(tx_control);
6482 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
6483 +index d14e55e3c9dad..5894566ec4805 100644
6484 +--- a/drivers/net/wireless/marvell/mwifiex/main.c
6485 ++++ b/drivers/net/wireless/marvell/mwifiex/main.c
6486 +@@ -1469,6 +1469,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
6487 + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
6488 + mwifiex_deauthenticate(priv, NULL);
6489 +
6490 ++ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
6491 ++
6492 + mwifiex_uninit_sw(adapter);
6493 + adapter->is_up = false;
6494 +
6495 +diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
6496 +index 4824be0c6231e..2b8db3f73d00b 100644
6497 +--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
6498 ++++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
6499 +@@ -299,19 +299,19 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
6500 + sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR);
6501 + if (IS_ERR(sysctl_bar)) {
6502 + pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR);
6503 +- return ret;
6504 ++ return PTR_ERR(sysctl_bar);
6505 + }
6506 +
6507 + dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR);
6508 + if (IS_ERR(dmareg_bar)) {
6509 + pr_err("failed to map BAR%u\n", QTN_DMA_BAR);
6510 +- return ret;
6511 ++ return PTR_ERR(dmareg_bar);
6512 + }
6513 +
6514 + epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR);
6515 + if (IS_ERR(epmem_bar)) {
6516 + pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR);
6517 +- return ret;
6518 ++ return PTR_ERR(epmem_bar);
6519 + }
6520 +
6521 + chipid = qtnf_chip_id_get(sysctl_bar);
6522 +diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
6523 +index 4b9e406b84612..a296f4e0d324a 100644
6524 +--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
6525 ++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
6526 +@@ -733,24 +733,24 @@ static int rsi_reset_card(struct rsi_hw *adapter)
6527 + if (ret < 0)
6528 + goto fail;
6529 + } else {
6530 +- if ((rsi_usb_master_reg_write(adapter,
6531 +- NWP_WWD_INTERRUPT_TIMER,
6532 +- NWP_WWD_INT_TIMER_CLKS,
6533 +- RSI_9116_REG_SIZE)) < 0) {
6534 ++ ret = rsi_usb_master_reg_write(adapter,
6535 ++ NWP_WWD_INTERRUPT_TIMER,
6536 ++ NWP_WWD_INT_TIMER_CLKS,
6537 ++ RSI_9116_REG_SIZE);
6538 ++ if (ret < 0)
6539 + goto fail;
6540 +- }
6541 +- if ((rsi_usb_master_reg_write(adapter,
6542 +- NWP_WWD_SYSTEM_RESET_TIMER,
6543 +- NWP_WWD_SYS_RESET_TIMER_CLKS,
6544 +- RSI_9116_REG_SIZE)) < 0) {
6545 ++ ret = rsi_usb_master_reg_write(adapter,
6546 ++ NWP_WWD_SYSTEM_RESET_TIMER,
6547 ++ NWP_WWD_SYS_RESET_TIMER_CLKS,
6548 ++ RSI_9116_REG_SIZE);
6549 ++ if (ret < 0)
6550 + goto fail;
6551 +- }
6552 +- if ((rsi_usb_master_reg_write(adapter,
6553 +- NWP_WWD_MODE_AND_RSTART,
6554 +- NWP_WWD_TIMER_DISABLE,
6555 +- RSI_9116_REG_SIZE)) < 0) {
6556 ++ ret = rsi_usb_master_reg_write(adapter,
6557 ++ NWP_WWD_MODE_AND_RSTART,
6558 ++ NWP_WWD_TIMER_DISABLE,
6559 ++ RSI_9116_REG_SIZE);
6560 ++ if (ret < 0)
6561 + goto fail;
6562 +- }
6563 + }
6564 +
6565 + rsi_dbg(INFO_ZONE, "Reset card done\n");
6566 +diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
6567 +index f7fe56affbcd2..326b1cc1d2bcb 100644
6568 +--- a/drivers/net/wireless/st/cw1200/main.c
6569 ++++ b/drivers/net/wireless/st/cw1200/main.c
6570 +@@ -381,6 +381,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
6571 + CW1200_LINK_ID_MAX,
6572 + cw1200_skb_dtor,
6573 + priv)) {
6574 ++ destroy_workqueue(priv->workqueue);
6575 + ieee80211_free_hw(hw);
6576 + return NULL;
6577 + }
6578 +@@ -392,6 +393,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
6579 + for (; i > 0; i--)
6580 + cw1200_queue_deinit(&priv->tx_queue[i - 1]);
6581 + cw1200_queue_stats_deinit(&priv->tx_queue_stats);
6582 ++ destroy_workqueue(priv->workqueue);
6583 + ieee80211_free_hw(hw);
6584 + return NULL;
6585 + }
6586 +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
6587 +index f533b7372d598..9e61a6f294641 100644
6588 +--- a/drivers/net/xen-netback/xenbus.c
6589 ++++ b/drivers/net/xen-netback/xenbus.c
6590 +@@ -713,12 +713,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev,
6591 + return -ENOMEM;
6592 + snprintf(node, maxlen, "%s/rate", dev->nodename);
6593 + vif->credit_watch.node = node;
6594 ++ vif->credit_watch.will_handle = NULL;
6595 + vif->credit_watch.callback = xen_net_rate_changed;
6596 + err = register_xenbus_watch(&vif->credit_watch);
6597 + if (err) {
6598 + pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
6599 + kfree(node);
6600 + vif->credit_watch.node = NULL;
6601 ++ vif->credit_watch.will_handle = NULL;
6602 + vif->credit_watch.callback = NULL;
6603 + }
6604 + return err;
6605 +@@ -765,6 +767,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
6606 + snprintf(node, maxlen, "%s/request-multicast-control",
6607 + dev->otherend);
6608 + vif->mcast_ctrl_watch.node = node;
6609 ++ vif->mcast_ctrl_watch.will_handle = NULL;
6610 + vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
6611 + err = register_xenbus_watch(&vif->mcast_ctrl_watch);
6612 + if (err) {
6613 +@@ -772,6 +775,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
6614 + vif->mcast_ctrl_watch.node);
6615 + kfree(node);
6616 + vif->mcast_ctrl_watch.node = NULL;
6617 ++ vif->mcast_ctrl_watch.will_handle = NULL;
6618 + vif->mcast_ctrl_watch.callback = NULL;
6619 + }
6620 + return err;
6621 +@@ -975,7 +979,7 @@ static void connect(struct backend_info *be)
6622 + xenvif_carrier_on(be->vif);
6623 +
6624 + unregister_hotplug_status_watch(be);
6625 +- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
6626 ++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
6627 + hotplug_status_changed,
6628 + "%s/%s", dev->nodename, "hotplug-status");
6629 + if (!err)
6630 +diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
6631 +index be110d9cef022..310773a4ca66e 100644
6632 +--- a/drivers/nfc/s3fwrn5/firmware.c
6633 ++++ b/drivers/nfc/s3fwrn5/firmware.c
6634 +@@ -293,8 +293,10 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
6635 + if (ret < 0)
6636 + return ret;
6637 +
6638 +- if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
6639 ++ if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) {
6640 ++ release_firmware(fw->fw);
6641 + return -EINVAL;
6642 ++ }
6643 +
6644 + memcpy(fw->date, fw->fw->data + 0x00, 12);
6645 + fw->date[12] = '\0';
6646 +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
6647 +index 47a4828b8b310..9251441fd8a35 100644
6648 +--- a/drivers/nvdimm/label.c
6649 ++++ b/drivers/nvdimm/label.c
6650 +@@ -980,6 +980,15 @@ static int __blk_label_update(struct nd_region *nd_region,
6651 + }
6652 + }
6653 +
6654 ++ /* release slots associated with any invalidated UUIDs */
6655 ++ mutex_lock(&nd_mapping->lock);
6656 ++ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
6657 ++ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
6658 ++ reap_victim(nd_mapping, label_ent);
6659 ++ list_move(&label_ent->list, &list);
6660 ++ }
6661 ++ mutex_unlock(&nd_mapping->lock);
6662 ++
6663 + /*
6664 + * Find the resource associated with the first label in the set
6665 + * per the v1.2 namespace specification.
6666 +@@ -999,8 +1008,10 @@ static int __blk_label_update(struct nd_region *nd_region,
6667 + if (is_old_resource(res, old_res_list, old_num_resources))
6668 + continue; /* carry-over */
6669 + slot = nd_label_alloc_slot(ndd);
6670 +- if (slot == UINT_MAX)
6671 ++ if (slot == UINT_MAX) {
6672 ++ rc = -ENXIO;
6673 + goto abort;
6674 ++ }
6675 + dev_dbg(ndd->dev, "allocated: %d\n", slot);
6676 +
6677 + nd_label = to_label(ndd, slot);
6678 +diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
6679 +index 933a4346ae5d6..c6b1c18165e5c 100644
6680 +--- a/drivers/pci/controller/pcie-iproc.c
6681 ++++ b/drivers/pci/controller/pcie-iproc.c
6682 +@@ -307,7 +307,7 @@ enum iproc_pcie_reg {
6683 + };
6684 +
6685 + /* iProc PCIe PAXB BCMA registers */
6686 +-static const u16 iproc_pcie_reg_paxb_bcma[] = {
6687 ++static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
6688 + [IPROC_PCIE_CLK_CTRL] = 0x000,
6689 + [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
6690 + [IPROC_PCIE_CFG_IND_DATA] = 0x124,
6691 +@@ -318,7 +318,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = {
6692 + };
6693 +
6694 + /* iProc PCIe PAXB registers */
6695 +-static const u16 iproc_pcie_reg_paxb[] = {
6696 ++static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
6697 + [IPROC_PCIE_CLK_CTRL] = 0x000,
6698 + [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
6699 + [IPROC_PCIE_CFG_IND_DATA] = 0x124,
6700 +@@ -334,7 +334,7 @@ static const u16 iproc_pcie_reg_paxb[] = {
6701 + };
6702 +
6703 + /* iProc PCIe PAXB v2 registers */
6704 +-static const u16 iproc_pcie_reg_paxb_v2[] = {
6705 ++static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
6706 + [IPROC_PCIE_CLK_CTRL] = 0x000,
6707 + [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
6708 + [IPROC_PCIE_CFG_IND_DATA] = 0x124,
6709 +@@ -363,7 +363,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
6710 + };
6711 +
6712 + /* iProc PCIe PAXC v1 registers */
6713 +-static const u16 iproc_pcie_reg_paxc[] = {
6714 ++static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
6715 + [IPROC_PCIE_CLK_CTRL] = 0x000,
6716 + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
6717 + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
6718 +@@ -372,7 +372,7 @@ static const u16 iproc_pcie_reg_paxc[] = {
6719 + };
6720 +
6721 + /* iProc PCIe PAXC v2 registers */
6722 +-static const u16 iproc_pcie_reg_paxc_v2[] = {
6723 ++static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
6724 + [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
6725 + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
6726 + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
6727 +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
6728 +index 3f40f951a6cdc..e30c2a78a88f5 100644
6729 +--- a/drivers/pci/pci-acpi.c
6730 ++++ b/drivers/pci/pci-acpi.c
6731 +@@ -1060,7 +1060,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
6732 + {
6733 + while (bus->parent) {
6734 + if (acpi_pm_device_can_wakeup(&bus->self->dev))
6735 +- return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
6736 ++ return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
6737 +
6738 + bus = bus->parent;
6739 + }
6740 +@@ -1068,7 +1068,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
6741 + /* We have reached the root bus. */
6742 + if (bus->bridge) {
6743 + if (acpi_pm_device_can_wakeup(bus->bridge))
6744 +- return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
6745 ++ return acpi_pm_set_device_wakeup(bus->bridge, enable);
6746 + }
6747 + return 0;
6748 + }
6749 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
6750 +index b1b2c8ddbc927..89dece8a41321 100644
6751 +--- a/drivers/pci/pci.c
6752 ++++ b/drivers/pci/pci.c
6753 +@@ -6129,19 +6129,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6754 + while (*p) {
6755 + count = 0;
6756 + if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6757 +- p[count] == '@') {
6758 ++ p[count] == '@') {
6759 + p += count + 1;
6760 ++ if (align_order > 63) {
6761 ++ pr_err("PCI: Invalid requested alignment (order %d)\n",
6762 ++ align_order);
6763 ++ align_order = PAGE_SHIFT;
6764 ++ }
6765 + } else {
6766 +- align_order = -1;
6767 ++ align_order = PAGE_SHIFT;
6768 + }
6769 +
6770 + ret = pci_dev_str_match(dev, p, &p);
6771 + if (ret == 1) {
6772 + *resize = true;
6773 +- if (align_order == -1)
6774 +- align = PAGE_SIZE;
6775 +- else
6776 +- align = 1 << align_order;
6777 ++ align = 1ULL << align_order;
6778 + break;
6779 + } else if (ret < 0) {
6780 + pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6781 +diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
6782 +index 1f087746b7bb0..1e3ed6ec0a4af 100644
6783 +--- a/drivers/pci/slot.c
6784 ++++ b/drivers/pci/slot.c
6785 +@@ -308,6 +308,9 @@ placeholder:
6786 + goto err;
6787 + }
6788 +
6789 ++ INIT_LIST_HEAD(&slot->list);
6790 ++ list_add(&slot->list, &parent->slots);
6791 ++
6792 + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
6793 + "%s", slot_name);
6794 + if (err) {
6795 +@@ -315,9 +318,6 @@ placeholder:
6796 + goto err;
6797 + }
6798 +
6799 +- INIT_LIST_HEAD(&slot->list);
6800 +- list_add(&slot->list, &parent->slots);
6801 +-
6802 + down_read(&pci_bus_sem);
6803 + list_for_each_entry(dev, &parent->devices, bus_list)
6804 + if (PCI_SLOT(dev->devfn) == slot_nr)
6805 +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
6806 +index 5087b7c44d55b..cfb98bba7715b 100644
6807 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
6808 ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
6809 +@@ -654,8 +654,10 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
6810 + */
6811 + pm_runtime_enable(dev);
6812 + phy_usb2_ops = of_device_get_match_data(dev);
6813 +- if (!phy_usb2_ops)
6814 +- return -EINVAL;
6815 ++ if (!phy_usb2_ops) {
6816 ++ ret = -EINVAL;
6817 ++ goto error;
6818 ++ }
6819 +
6820 + mutex_init(&channel->lock);
6821 + for (i = 0; i < NUM_OF_PHYS; i++) {
6822 +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
6823 +index 93b5654ff2828..22aca6d182c0c 100644
6824 +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c
6825 ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c
6826 +@@ -277,14 +277,76 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function,
6827 + static bool aspeed_expr_is_gpio(const struct aspeed_sig_expr *expr)
6828 + {
6829 + /*
6830 +- * The signal type is GPIO if the signal name has "GPI" as a prefix.
6831 +- * strncmp (rather than strcmp) is used to implement the prefix
6832 +- * requirement.
6833 ++ * We need to differentiate between GPIO and non-GPIO signals to
6834 ++ * implement the gpio_request_enable() interface. For better or worse
6835 ++ * the ASPEED pinctrl driver uses the expression names to determine
6836 ++ * whether an expression will mux a pin for GPIO.
6837 + *
6838 +- * expr->signal might look like "GPIOB1" in the GPIO case.
6839 +- * expr->signal might look like "GPIT0" in the GPI case.
6840 ++ * Generally we have the following - A GPIO such as B1 has:
6841 ++ *
6842 ++ * - expr->signal set to "GPIOB1"
6843 ++ * - expr->function set to "GPIOB1"
6844 ++ *
6845 ++ * Using this fact we can determine whether the provided expression is
6846 ++ * a GPIO expression by testing the signal name for the string prefix
6847 ++ * "GPIO".
6848 ++ *
6849 ++ * However, some GPIOs are input-only, and the ASPEED datasheets name
6850 ++ * them differently. An input-only GPIO such as T0 has:
6851 ++ *
6852 ++ * - expr->signal set to "GPIT0"
6853 ++ * - expr->function set to "GPIT0"
6854 ++ *
6855 ++ * It's tempting to generalise the prefix test from "GPIO" to "GPI" to
6856 ++ * account for both GPIOs and GPIs, but in doing so we run aground on
6857 ++ * another feature:
6858 ++ *
6859 ++ * Some pins in the ASPEED BMC SoCs have a "pass-through" GPIO
6860 ++ * function where the input state of one pin is replicated as the
6861 ++ * output state of another (as if they were shorted together - a mux
6862 ++ * configuration that is typically enabled by hardware strapping).
6863 ++ * This feature allows the BMC to pass e.g. power button state through
6864 ++ * to the host while the BMC is yet to boot, but take control of the
6865 ++ * button state once the BMC has booted by muxing each pin as a
6866 ++ * separate, pin-specific GPIO.
6867 ++ *
6868 ++ * Conceptually this pass-through mode is a form of GPIO and is named
6869 ++ * as such in the datasheets, e.g. "GPID0". This naming similarity
6870 ++ * trips us up with the simple GPI-prefixed-signal-name scheme
6871 ++ * discussed above, as the pass-through configuration is not what we
6872 ++ * want when muxing a pin as GPIO for the GPIO subsystem.
6873 ++ *
6874 ++ * On e.g. the AST2400, a pass-through function "GPID0" is grouped on
6875 ++ * balls A18 and D16, where we have:
6876 ++ *
6877 ++ * For ball A18:
6878 ++ * - expr->signal set to "GPID0IN"
6879 ++ * - expr->function set to "GPID0"
6880 ++ *
6881 ++ * For ball D16:
6882 ++ * - expr->signal set to "GPID0OUT"
6883 ++ * - expr->function set to "GPID0"
6884 ++ *
6885 ++ * By contrast, the pin-specific GPIO expressions for the same pins are
6886 ++ * as follows:
6887 ++ *
6888 ++ * For ball A18:
6889 ++ * - expr->signal looks like "GPIOD0"
6890 ++ * - expr->function looks like "GPIOD0"
6891 ++ *
6892 ++ * For ball D16:
6893 ++ * - expr->signal looks like "GPIOD1"
6894 ++ * - expr->function looks like "GPIOD1"
6895 ++ *
6896 ++ * Testing both the signal _and_ function names gives us the means
6897 ++ * differentiate the pass-through GPIO pinmux configuration from the
6898 ++ * pin-specific configuration that the GPIO subsystem is after: An
6899 ++ * expression is a pin-specific (non-pass-through) GPIO configuration
6900 ++ * if the signal prefix is "GPI" and the signal name matches the
6901 ++ * function name.
6902 + */
6903 +- return strncmp(expr->signal, "GPI", 3) == 0;
6904 ++ return !strncmp(expr->signal, "GPI", 3) &&
6905 ++ !strcmp(expr->signal, expr->function);
6906 + }
6907 +
6908 + static bool aspeed_gpio_in_exprs(const struct aspeed_sig_expr **exprs)
6909 +diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
6910 +index 140c5ce9fbc11..0aaa20653536f 100644
6911 +--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
6912 ++++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
6913 +@@ -452,10 +452,11 @@ struct aspeed_sig_desc {
6914 + * evaluation of the descriptors.
6915 + *
6916 + * @signal: The signal name for the priority level on the pin. If the signal
6917 +- * type is GPIO, then the signal name must begin with the string
6918 +- * "GPIO", e.g. GPIOA0, GPIOT4 etc.
6919 ++ * type is GPIO, then the signal name must begin with the
6920 ++ * prefix "GPI", e.g. GPIOA0, GPIT0 etc.
6921 + * @function: The name of the function the signal participates in for the
6922 +- * associated expression
6923 ++ * associated expression. For pin-specific GPIO, the function
6924 ++ * name must match the signal name.
6925 + * @ndescs: The number of signal descriptors in the expression
6926 + * @descs: Pointer to an array of signal descriptors that comprise the
6927 + * function expression
6928 +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
6929 +index 5a1174a8e2bac..d05f20ca90d7e 100644
6930 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
6931 ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
6932 +@@ -1060,7 +1060,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
6933 + break;
6934 + case PIN_CONFIG_INPUT_DEBOUNCE:
6935 + debounce = readl(db_reg);
6936 +- debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6937 +
6938 + if (arg)
6939 + conf |= BYT_DEBOUNCE_EN;
6940 +@@ -1069,24 +1068,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
6941 +
6942 + switch (arg) {
6943 + case 375:
6944 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6945 + debounce |= BYT_DEBOUNCE_PULSE_375US;
6946 + break;
6947 + case 750:
6948 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6949 + debounce |= BYT_DEBOUNCE_PULSE_750US;
6950 + break;
6951 + case 1500:
6952 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6953 + debounce |= BYT_DEBOUNCE_PULSE_1500US;
6954 + break;
6955 + case 3000:
6956 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6957 + debounce |= BYT_DEBOUNCE_PULSE_3MS;
6958 + break;
6959 + case 6000:
6960 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6961 + debounce |= BYT_DEBOUNCE_PULSE_6MS;
6962 + break;
6963 + case 12000:
6964 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6965 + debounce |= BYT_DEBOUNCE_PULSE_12MS;
6966 + break;
6967 + case 24000:
6968 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
6969 + debounce |= BYT_DEBOUNCE_PULSE_24MS;
6970 + break;
6971 + default:
6972 +diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
6973 +index 04ca8ae95df83..9e91d83b01388 100644
6974 +--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
6975 ++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
6976 +@@ -741,6 +741,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
6977 + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
6978 + bits |= BUFCFG_PU_EN;
6979 +
6980 ++ /* Set default strength value in case none is given */
6981 ++ if (arg == 1)
6982 ++ arg = 20000;
6983 ++
6984 + switch (arg) {
6985 + case 50000:
6986 + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
6987 +@@ -761,6 +765,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
6988 + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
6989 + bits |= BUFCFG_PD_EN;
6990 +
6991 ++ /* Set default strength value in case none is given */
6992 ++ if (arg == 1)
6993 ++ arg = 20000;
6994 ++
6995 + switch (arg) {
6996 + case 50000:
6997 + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
6998 +diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
6999 +index 62c02b969327f..7521a924dffb0 100644
7000 +--- a/drivers/pinctrl/pinctrl-falcon.c
7001 ++++ b/drivers/pinctrl/pinctrl-falcon.c
7002 +@@ -431,24 +431,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
7003 +
7004 + /* load and remap the pad resources of the different banks */
7005 + for_each_compatible_node(np, NULL, "lantiq,pad-falcon") {
7006 +- struct platform_device *ppdev = of_find_device_by_node(np);
7007 + const __be32 *bank = of_get_property(np, "lantiq,bank", NULL);
7008 + struct resource res;
7009 ++ struct platform_device *ppdev;
7010 + u32 avail;
7011 + int pins;
7012 +
7013 + if (!of_device_is_available(np))
7014 + continue;
7015 +
7016 +- if (!ppdev) {
7017 +- dev_err(&pdev->dev, "failed to find pad pdev\n");
7018 +- continue;
7019 +- }
7020 + if (!bank || *bank >= PORTS)
7021 + continue;
7022 + if (of_address_to_resource(np, 0, &res))
7023 + continue;
7024 ++
7025 ++ ppdev = of_find_device_by_node(np);
7026 ++ if (!ppdev) {
7027 ++ dev_err(&pdev->dev, "failed to find pad pdev\n");
7028 ++ continue;
7029 ++ }
7030 ++
7031 + falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
7032 ++ put_device(&ppdev->dev);
7033 + if (IS_ERR(falcon_info.clk[*bank])) {
7034 + dev_err(&ppdev->dev, "failed to get clock\n");
7035 + of_node_put(np);
7036 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
7037 +index 0cbca30b75dcf..77783582080c7 100644
7038 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
7039 ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
7040 +@@ -1130,20 +1130,22 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
7041 + if (bank == pctl->desc->irq_banks)
7042 + return;
7043 +
7044 ++ chained_irq_enter(chip, desc);
7045 ++
7046 + reg = sunxi_irq_status_reg_from_bank(pctl->desc, bank);
7047 + val = readl(pctl->membase + reg);
7048 +
7049 + if (val) {
7050 + int irqoffset;
7051 +
7052 +- chained_irq_enter(chip, desc);
7053 + for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
7054 + int pin_irq = irq_find_mapping(pctl->domain,
7055 + bank * IRQ_PER_BANK + irqoffset);
7056 + generic_handle_irq(pin_irq);
7057 + }
7058 +- chained_irq_exit(chip, desc);
7059 + }
7060 ++
7061 ++ chained_irq_exit(chip, desc);
7062 + }
7063 +
7064 + static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
7065 +diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
7066 +index a831bd5a5b2ff..5e4521b014280 100644
7067 +--- a/drivers/platform/chrome/cros_ec_spi.c
7068 ++++ b/drivers/platform/chrome/cros_ec_spi.c
7069 +@@ -739,7 +739,6 @@ static int cros_ec_spi_probe(struct spi_device *spi)
7070 + int err;
7071 +
7072 + spi->bits_per_word = 8;
7073 +- spi->mode = SPI_MODE_0;
7074 + spi->rt = true;
7075 + err = spi_setup(spi);
7076 + if (err < 0)
7077 +diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
7078 +index fe59b0ebff314..ceb8e701028df 100644
7079 +--- a/drivers/platform/x86/dell-smbios-base.c
7080 ++++ b/drivers/platform/x86/dell-smbios-base.c
7081 +@@ -594,6 +594,7 @@ static int __init dell_smbios_init(void)
7082 + if (wmi && smm) {
7083 + pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
7084 + wmi, smm);
7085 ++ ret = -ENODEV;
7086 + goto fail_create_group;
7087 + }
7088 +
7089 +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
7090 +index 701d1ddda4b11..bc8b0098d4f32 100644
7091 +--- a/drivers/platform/x86/intel-vbtn.c
7092 ++++ b/drivers/platform/x86/intel-vbtn.c
7093 +@@ -203,6 +203,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
7094 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
7095 + },
7096 + },
7097 ++ {
7098 ++ .matches = {
7099 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
7100 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
7101 ++ },
7102 ++ },
7103 + {} /* Array terminator */
7104 + };
7105 +
7106 +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
7107 +index 59b5b7eebb05a..54db334e52c13 100644
7108 +--- a/drivers/platform/x86/mlx-platform.c
7109 ++++ b/drivers/platform/x86/mlx-platform.c
7110 +@@ -234,15 +234,6 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
7111 + };
7112 +
7113 + /* Platform hotplug devices */
7114 +-static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
7115 +- {
7116 +- I2C_BOARD_INFO("24c02", 0x51),
7117 +- },
7118 +- {
7119 +- I2C_BOARD_INFO("24c02", 0x50),
7120 +- },
7121 +-};
7122 +-
7123 + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
7124 + {
7125 + I2C_BOARD_INFO("dps460", 0x59),
7126 +@@ -273,15 +264,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
7127 + .label = "psu1",
7128 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
7129 + .mask = BIT(0),
7130 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
7131 +- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
7132 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
7133 + },
7134 + {
7135 + .label = "psu2",
7136 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
7137 + .mask = BIT(1),
7138 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
7139 +- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
7140 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
7141 + },
7142 + };
7143 +
7144 +@@ -348,7 +337,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
7145 + .aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF,
7146 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
7147 + .mask = MLXPLAT_CPLD_PSU_MASK,
7148 +- .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
7149 ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
7150 + .inversed = 1,
7151 + .health = false,
7152 + },
7153 +@@ -357,7 +346,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
7154 + .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
7155 + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
7156 + .mask = MLXPLAT_CPLD_PWR_MASK,
7157 +- .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
7158 ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
7159 + .inversed = 0,
7160 + .health = false,
7161 + },
7162 +@@ -366,7 +355,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
7163 + .aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF,
7164 + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
7165 + .mask = MLXPLAT_CPLD_FAN_MASK,
7166 +- .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
7167 ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
7168 + .inversed = 1,
7169 + .health = false,
7170 + },
7171 +@@ -444,15 +433,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = {
7172 + .label = "psu1",
7173 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
7174 + .mask = BIT(0),
7175 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
7176 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
7177 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
7178 + },
7179 + {
7180 + .label = "psu2",
7181 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
7182 + .mask = BIT(1),
7183 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
7184 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
7185 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
7186 + },
7187 + };
7188 +
7189 +diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
7190 +index cf4c67b2d2359..7d09e49f04d3b 100644
7191 +--- a/drivers/power/supply/axp288_charger.c
7192 ++++ b/drivers/power/supply/axp288_charger.c
7193 +@@ -548,14 +548,15 @@ out:
7194 +
7195 + /*
7196 + * The HP Pavilion x2 10 series comes in a number of variants:
7197 +- * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
7198 +- * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
7199 +- * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
7200 ++ * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021"
7201 ++ * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D"
7202 ++ * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E"
7203 ++ * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4"
7204 + *
7205 +- * The variants with the AXP288 PMIC are all kinds of special:
7206 ++ * The variants with the AXP288 + Type-C connector are all kinds of special:
7207 + *
7208 +- * 1. All variants use a Type-C connector which the AXP288 does not support, so
7209 +- * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
7210 ++ * 1. They use a Type-C connector which the AXP288 does not support, so when
7211 ++ * using a Type-C charger it is not recognized. Unlike most AXP288 devices,
7212 + * this model actually has mostly working ACPI AC / Battery code, the ACPI code
7213 + * "solves" this by simply setting the input_current_limit to 3A.
7214 + * There are still some issues with the ACPI code, so we use this native driver,
7215 +@@ -578,12 +579,17 @@ out:
7216 + */
7217 + static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
7218 + {
7219 +- /*
7220 +- * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
7221 +- * Trail model has "HP", so we only match on product_name.
7222 +- */
7223 + .matches = {
7224 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
7225 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
7226 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
7227 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"),
7228 ++ },
7229 ++ },
7230 ++ {
7231 ++ .matches = {
7232 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"),
7233 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
7234 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"),
7235 + },
7236 + },
7237 + {} /* Terminating entry */
7238 +diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
7239 +index 453d6332d43a7..1ae5d6d42c9e3 100644
7240 +--- a/drivers/power/supply/bq24190_charger.c
7241 ++++ b/drivers/power/supply/bq24190_charger.c
7242 +@@ -448,8 +448,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
7243 + return -EINVAL;
7244 +
7245 + ret = pm_runtime_get_sync(bdi->dev);
7246 +- if (ret < 0)
7247 ++ if (ret < 0) {
7248 ++ pm_runtime_put_noidle(bdi->dev);
7249 + return ret;
7250 ++ }
7251 +
7252 + ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
7253 + if (ret)
7254 +@@ -1075,8 +1077,10 @@ static int bq24190_charger_get_property(struct power_supply *psy,
7255 + dev_dbg(bdi->dev, "prop: %d\n", psp);
7256 +
7257 + ret = pm_runtime_get_sync(bdi->dev);
7258 +- if (ret < 0)
7259 ++ if (ret < 0) {
7260 ++ pm_runtime_put_noidle(bdi->dev);
7261 + return ret;
7262 ++ }
7263 +
7264 + switch (psp) {
7265 + case POWER_SUPPLY_PROP_CHARGE_TYPE:
7266 +@@ -1147,8 +1151,10 @@ static int bq24190_charger_set_property(struct power_supply *psy,
7267 + dev_dbg(bdi->dev, "prop: %d\n", psp);
7268 +
7269 + ret = pm_runtime_get_sync(bdi->dev);
7270 +- if (ret < 0)
7271 ++ if (ret < 0) {
7272 ++ pm_runtime_put_noidle(bdi->dev);
7273 + return ret;
7274 ++ }
7275 +
7276 + switch (psp) {
7277 + case POWER_SUPPLY_PROP_ONLINE:
7278 +@@ -1408,8 +1414,10 @@ static int bq24190_battery_get_property(struct power_supply *psy,
7279 + dev_dbg(bdi->dev, "prop: %d\n", psp);
7280 +
7281 + ret = pm_runtime_get_sync(bdi->dev);
7282 +- if (ret < 0)
7283 ++ if (ret < 0) {
7284 ++ pm_runtime_put_noidle(bdi->dev);
7285 + return ret;
7286 ++ }
7287 +
7288 + switch (psp) {
7289 + case POWER_SUPPLY_PROP_STATUS:
7290 +@@ -1454,8 +1462,10 @@ static int bq24190_battery_set_property(struct power_supply *psy,
7291 + dev_dbg(bdi->dev, "prop: %d\n", psp);
7292 +
7293 + ret = pm_runtime_get_sync(bdi->dev);
7294 +- if (ret < 0)
7295 ++ if (ret < 0) {
7296 ++ pm_runtime_put_noidle(bdi->dev);
7297 + return ret;
7298 ++ }
7299 +
7300 + switch (psp) {
7301 + case POWER_SUPPLY_PROP_ONLINE:
7302 +diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
7303 +index 333ba83006e48..a12a1ad9b5fe3 100644
7304 +--- a/drivers/ps3/ps3stor_lib.c
7305 ++++ b/drivers/ps3/ps3stor_lib.c
7306 +@@ -189,7 +189,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
7307 + dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
7308 + dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
7309 + dev->bounce_size, DMA_BIDIRECTIONAL);
7310 +- if (!dev->bounce_dma) {
7311 ++ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) {
7312 + dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
7313 + __func__, __LINE__);
7314 + error = -ENODEV;
7315 +diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
7316 +index 7551253ada32b..bf3f14fb5f244 100644
7317 +--- a/drivers/pwm/pwm-lp3943.c
7318 ++++ b/drivers/pwm/pwm-lp3943.c
7319 +@@ -275,6 +275,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
7320 + lp3943_pwm->chip.dev = &pdev->dev;
7321 + lp3943_pwm->chip.ops = &lp3943_pwm_ops;
7322 + lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
7323 ++ lp3943_pwm->chip.base = -1;
7324 +
7325 + platform_set_drvdata(pdev, lp3943_pwm);
7326 +
7327 +diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
7328 +index e2c21cc34a96a..3763ce5311ac2 100644
7329 +--- a/drivers/pwm/pwm-zx.c
7330 ++++ b/drivers/pwm/pwm-zx.c
7331 +@@ -238,6 +238,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
7332 + ret = pwmchip_add(&zpc->chip);
7333 + if (ret < 0) {
7334 + dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
7335 ++ clk_disable_unprepare(zpc->pclk);
7336 + return ret;
7337 + }
7338 +
7339 +diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
7340 +index 7075f42b9fcf6..aefc351bfed59 100644
7341 +--- a/drivers/regulator/axp20x-regulator.c
7342 ++++ b/drivers/regulator/axp20x-regulator.c
7343 +@@ -596,7 +596,7 @@ static const struct regulator_desc axp22x_regulators[] = {
7344 + AXP22X_DLDO1_V_OUT, AXP22X_DLDO1_V_OUT_MASK,
7345 + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO1_MASK),
7346 + AXP_DESC(AXP22X, DLDO2, "dldo2", "dldoin", 700, 3300, 100,
7347 +- AXP22X_DLDO2_V_OUT, AXP22X_PWR_OUT_DLDO2_MASK,
7348 ++ AXP22X_DLDO2_V_OUT, AXP22X_DLDO2_V_OUT_MASK,
7349 + AXP22X_PWR_OUT_CTRL2, AXP22X_PWR_OUT_DLDO2_MASK),
7350 + AXP_DESC(AXP22X, DLDO3, "dldo3", "dldoin", 700, 3300, 100,
7351 + AXP22X_DLDO3_V_OUT, AXP22X_DLDO3_V_OUT_MASK,
7352 +diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
7353 +index e953886b2eb77..24e8b7e271773 100644
7354 +--- a/drivers/remoteproc/qcom_q6v5_adsp.c
7355 ++++ b/drivers/remoteproc/qcom_q6v5_adsp.c
7356 +@@ -184,8 +184,10 @@ static int adsp_start(struct rproc *rproc)
7357 +
7358 + dev_pm_genpd_set_performance_state(adsp->dev, INT_MAX);
7359 + ret = pm_runtime_get_sync(adsp->dev);
7360 +- if (ret)
7361 ++ if (ret) {
7362 ++ pm_runtime_put_noidle(adsp->dev);
7363 + goto disable_xo_clk;
7364 ++ }
7365 +
7366 + ret = clk_bulk_prepare_enable(adsp->num_clks, adsp->clks);
7367 + if (ret) {
7368 +@@ -345,15 +347,12 @@ static int adsp_init_mmio(struct qcom_adsp *adsp,
7369 + struct platform_device *pdev)
7370 + {
7371 + struct device_node *syscon;
7372 +- struct resource *res;
7373 + int ret;
7374 +
7375 +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
7376 +- adsp->qdsp6ss_base = devm_ioremap(&pdev->dev, res->start,
7377 +- resource_size(res));
7378 +- if (!adsp->qdsp6ss_base) {
7379 ++ adsp->qdsp6ss_base = devm_platform_ioremap_resource(pdev, 0);
7380 ++ if (IS_ERR(adsp->qdsp6ss_base)) {
7381 + dev_err(adsp->dev, "failed to map QDSP6SS registers\n");
7382 +- return -ENOMEM;
7383 ++ return PTR_ERR(adsp->qdsp6ss_base);
7384 + }
7385 +
7386 + syscon = of_parse_phandle(pdev->dev.of_node, "qcom,halt-regs", 0);
7387 +diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
7388 +index a67c55785b4de..5e54e6f5edb1a 100644
7389 +--- a/drivers/remoteproc/qcom_q6v5_mss.c
7390 ++++ b/drivers/remoteproc/qcom_q6v5_mss.c
7391 +@@ -331,8 +331,11 @@ static int q6v5_pds_enable(struct q6v5 *qproc, struct device **pds,
7392 + for (i = 0; i < pd_count; i++) {
7393 + dev_pm_genpd_set_performance_state(pds[i], INT_MAX);
7394 + ret = pm_runtime_get_sync(pds[i]);
7395 +- if (ret < 0)
7396 ++ if (ret < 0) {
7397 ++ pm_runtime_put_noidle(pds[i]);
7398 ++ dev_pm_genpd_set_performance_state(pds[i], 0);
7399 + goto unroll_pd_votes;
7400 ++ }
7401 + }
7402 +
7403 + return 0;
7404 +diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
7405 +index 1766496385fed..4fd6afe2228e7 100644
7406 +--- a/drivers/rtc/rtc-ep93xx.c
7407 ++++ b/drivers/rtc/rtc-ep93xx.c
7408 +@@ -33,7 +33,7 @@ struct ep93xx_rtc {
7409 + static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
7410 + unsigned short *delete)
7411 + {
7412 +- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
7413 ++ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
7414 + unsigned long comp;
7415 +
7416 + comp = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
7417 +@@ -51,7 +51,7 @@ static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
7418 +
7419 + static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
7420 + {
7421 +- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
7422 ++ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
7423 + unsigned long time;
7424 +
7425 + time = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
7426 +@@ -62,7 +62,7 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
7427 +
7428 + static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
7429 + {
7430 +- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
7431 ++ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
7432 + unsigned long secs = rtc_tm_to_time64(tm);
7433 +
7434 + writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
7435 +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
7436 +index 02b069caffd57..d1d37a204264c 100644
7437 +--- a/drivers/rtc/rtc-pcf2127.c
7438 ++++ b/drivers/rtc/rtc-pcf2127.c
7439 +@@ -230,10 +230,8 @@ static int pcf2127_nvmem_read(void *priv, unsigned int offset,
7440 + if (ret)
7441 + return ret;
7442 +
7443 +- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD,
7444 +- val, bytes);
7445 +-
7446 +- return ret ?: bytes;
7447 ++ return regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD,
7448 ++ val, bytes);
7449 + }
7450 +
7451 + static int pcf2127_nvmem_write(void *priv, unsigned int offset,
7452 +@@ -248,10 +246,8 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset,
7453 + if (ret)
7454 + return ret;
7455 +
7456 +- ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD,
7457 +- val, bytes);
7458 +-
7459 +- return ret ?: bytes;
7460 ++ return regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD,
7461 ++ val, bytes);
7462 + }
7463 +
7464 + /* watchdog driver */
7465 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
7466 +index 99f86612f7751..dc78a523a69f2 100644
7467 +--- a/drivers/s390/block/dasd_alias.c
7468 ++++ b/drivers/s390/block/dasd_alias.c
7469 +@@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7470 + return;
7471 + device->discipline->get_uid(device, &uid);
7472 + spin_lock_irqsave(&lcu->lock, flags);
7473 +- list_del_init(&device->alias_list);
7474 + /* make sure that the workers don't use this device */
7475 + if (device == lcu->suc_data.device) {
7476 + spin_unlock_irqrestore(&lcu->lock, flags);
7477 +@@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7478 +
7479 + spin_lock_irqsave(&aliastree.lock, flags);
7480 + spin_lock(&lcu->lock);
7481 ++ list_del_init(&device->alias_list);
7482 + if (list_empty(&lcu->grouplist) &&
7483 + list_empty(&lcu->active_devices) &&
7484 + list_empty(&lcu->inactive_devices)) {
7485 +@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
7486 + spin_unlock_irqrestore(&lcu->lock, flags);
7487 +
7488 + rc = dasd_sleep_on(cqr);
7489 +- if (rc && !suborder_not_supported(cqr)) {
7490 ++ if (!rc)
7491 ++ goto out;
7492 ++
7493 ++ if (suborder_not_supported(cqr)) {
7494 ++ /* suborder not supported or device unusable for IO */
7495 ++ rc = -EOPNOTSUPP;
7496 ++ } else {
7497 ++ /* IO failed but should be retried */
7498 + spin_lock_irqsave(&lcu->lock, flags);
7499 + lcu->flags |= NEED_UAC_UPDATE;
7500 + spin_unlock_irqrestore(&lcu->lock, flags);
7501 + }
7502 ++out:
7503 + dasd_sfree_request(cqr, cqr->memdev);
7504 + return rc;
7505 + }
7506 +@@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
7507 + return rc;
7508 +
7509 + spin_lock_irqsave(&lcu->lock, flags);
7510 ++ /*
7511 ++ * there is another update needed skip the remaining handling
7512 ++ * the data might already be outdated
7513 ++ * but especially do not add the device to an LCU with pending
7514 ++ * update
7515 ++ */
7516 ++ if (lcu->flags & NEED_UAC_UPDATE)
7517 ++ goto out;
7518 + lcu->pav = NO_PAV;
7519 + for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
7520 + switch (lcu->uac->unit[i].ua_type) {
7521 +@@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
7522 + alias_list) {
7523 + _add_device_to_lcu(lcu, device, refdev);
7524 + }
7525 ++out:
7526 + spin_unlock_irqrestore(&lcu->lock, flags);
7527 + return 0;
7528 + }
7529 +@@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device)
7530 + }
7531 + if (lcu->flags & UPDATE_PENDING) {
7532 + list_move(&device->alias_list, &lcu->active_devices);
7533 ++ private->pavgroup = NULL;
7534 + _schedule_lcu_update(lcu, device);
7535 + }
7536 + spin_unlock_irqrestore(&lcu->lock, flags);
7537 +diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
7538 +index 983f9c9e08deb..23e9227e60fd7 100644
7539 +--- a/drivers/s390/cio/device.c
7540 ++++ b/drivers/s390/cio/device.c
7541 +@@ -1664,10 +1664,10 @@ void __init ccw_device_destroy_console(struct ccw_device *cdev)
7542 + struct io_subchannel_private *io_priv = to_io_private(sch);
7543 +
7544 + set_io_private(sch, NULL);
7545 +- put_device(&sch->dev);
7546 +- put_device(&cdev->dev);
7547 + dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
7548 + io_priv->dma_area, io_priv->dma_area_dma);
7549 ++ put_device(&sch->dev);
7550 ++ put_device(&cdev->dev);
7551 + kfree(io_priv);
7552 + }
7553 +
7554 +diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
7555 +index 702dc82c9501d..a0c0791abee69 100644
7556 +--- a/drivers/scsi/bnx2i/Kconfig
7557 ++++ b/drivers/scsi/bnx2i/Kconfig
7558 +@@ -4,6 +4,7 @@ config SCSI_BNX2_ISCSI
7559 + depends on NET
7560 + depends on PCI
7561 + depends on (IPV6 || IPV6=n)
7562 ++ depends on MMU
7563 + select SCSI_ISCSI_ATTRS
7564 + select NETDEVICES
7565 + select ETHERNET
7566 +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
7567 +index 18584ab27c329..3a2618bcce67b 100644
7568 +--- a/drivers/scsi/fnic/fnic_main.c
7569 ++++ b/drivers/scsi/fnic/fnic_main.c
7570 +@@ -741,6 +741,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7571 + for (i = 0; i < FNIC_IO_LOCKS; i++)
7572 + spin_lock_init(&fnic->io_req_lock[i]);
7573 +
7574 ++ err = -ENOMEM;
7575 + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
7576 + if (!fnic->io_req_pool)
7577 + goto err_out_free_resources;
7578 +diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
7579 +index ae09bb863497d..ef75fd6c04834 100644
7580 +--- a/drivers/scsi/lpfc/lpfc_mem.c
7581 ++++ b/drivers/scsi/lpfc/lpfc_mem.c
7582 +@@ -593,8 +593,6 @@ lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
7583 + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
7584 + * pool along a non-DMA-mapped container for it.
7585 + *
7586 +- * Notes: Not interrupt-safe. Must be called with no locks held.
7587 +- *
7588 + * Returns:
7589 + * pointer to HBQ on success
7590 + * NULL on failure
7591 +@@ -604,7 +602,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
7592 + {
7593 + struct rqb_dmabuf *dma_buf;
7594 +
7595 +- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
7596 ++ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
7597 + if (!dma_buf)
7598 + return NULL;
7599 +
7600 +@@ -727,7 +725,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
7601 + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
7602 + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
7603 + if (rc < 0) {
7604 +- (rqbp->rqb_free_buffer)(phba, rqb_entry);
7605 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7606 + "6409 Cannot post to HRQ %d: %x %x %x "
7607 + "DRQ %x %x\n",
7608 +@@ -737,6 +734,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
7609 + rqb_entry->hrq->entry_count,
7610 + rqb_entry->drq->host_index,
7611 + rqb_entry->drq->hba_index);
7612 ++ (rqbp->rqb_free_buffer)(phba, rqb_entry);
7613 + } else {
7614 + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
7615 + rqbp->buffer_count++;
7616 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
7617 +index e2877d2b3cc0d..b9857d7b224fb 100644
7618 +--- a/drivers/scsi/lpfc/lpfc_sli.c
7619 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
7620 +@@ -7102,12 +7102,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7621 + struct rqb_dmabuf *rqb_buffer;
7622 + LIST_HEAD(rqb_buf_list);
7623 +
7624 +- spin_lock_irqsave(&phba->hbalock, flags);
7625 + rqbp = hrq->rqbp;
7626 + for (i = 0; i < count; i++) {
7627 ++ spin_lock_irqsave(&phba->hbalock, flags);
7628 + /* IF RQ is already full, don't bother */
7629 +- if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
7630 ++ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7631 ++ spin_unlock_irqrestore(&phba->hbalock, flags);
7632 + break;
7633 ++ }
7634 ++ spin_unlock_irqrestore(&phba->hbalock, flags);
7635 ++
7636 + rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7637 + if (!rqb_buffer)
7638 + break;
7639 +@@ -7116,6 +7120,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7640 + rqb_buffer->idx = idx;
7641 + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7642 + }
7643 ++
7644 ++ spin_lock_irqsave(&phba->hbalock, flags);
7645 + while (!list_empty(&rqb_buf_list)) {
7646 + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7647 + hbuf.list);
7648 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
7649 +index 2c2966a297c77..4a23dd8b7f9aa 100644
7650 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
7651 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
7652 +@@ -8038,7 +8038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7653 + int error = 0, i;
7654 + void *sense = NULL;
7655 + dma_addr_t sense_handle;
7656 +- unsigned long *sense_ptr;
7657 ++ void *sense_ptr;
7658 + u32 opcode = 0;
7659 +
7660 + memset(kbuff_arr, 0, sizeof(kbuff_arr));
7661 +@@ -8160,6 +8160,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7662 + }
7663 +
7664 + if (ioc->sense_len) {
7665 ++ /* make sure the pointer is part of the frame */
7666 ++ if (ioc->sense_off >
7667 ++ (sizeof(union megasas_frame) - sizeof(__le64))) {
7668 ++ error = -EINVAL;
7669 ++ goto out;
7670 ++ }
7671 ++
7672 + sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
7673 + &sense_handle, GFP_KERNEL);
7674 + if (!sense) {
7675 +@@ -8167,12 +8174,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
7676 + goto out;
7677 + }
7678 +
7679 +- sense_ptr =
7680 +- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
7681 ++ sense_ptr = (void *)cmd->frame + ioc->sense_off;
7682 + if (instance->consistent_mask_64bit)
7683 +- *sense_ptr = cpu_to_le64(sense_handle);
7684 ++ put_unaligned_le64(sense_handle, sense_ptr);
7685 + else
7686 +- *sense_ptr = cpu_to_le32(sense_handle);
7687 ++ put_unaligned_le32(sense_handle, sense_ptr);
7688 + }
7689 +
7690 + /*
7691 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
7692 +index 8be8c510fdf79..7532603aafb15 100644
7693 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
7694 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
7695 +@@ -6227,7 +6227,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7696 +
7697 + r = _base_handshake_req_reply_wait(ioc,
7698 + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
7699 +- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
7700 ++ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
7701 +
7702 + if (r != 0) {
7703 + ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7704 +diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
7705 +index 3374f553c617a..8882ba33ca87c 100644
7706 +--- a/drivers/scsi/pm8001/pm8001_init.c
7707 ++++ b/drivers/scsi/pm8001/pm8001_init.c
7708 +@@ -1040,7 +1040,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
7709 +
7710 + pm8001_init_sas_add(pm8001_ha);
7711 + /* phy setting support for motherboard controller */
7712 +- if (pm8001_configure_phy_settings(pm8001_ha))
7713 ++ rc = pm8001_configure_phy_settings(pm8001_ha);
7714 ++ if (rc)
7715 + goto err_out_shost;
7716 +
7717 + pm8001_post_sas_ha_init(shost, chip);
7718 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
7719 +index acb930b8c6a64..35c96ea2653be 100644
7720 +--- a/drivers/scsi/qedi/qedi_main.c
7721 ++++ b/drivers/scsi/qedi/qedi_main.c
7722 +@@ -2630,7 +2630,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
7723 + QEDI_ERR(&qedi->dbg_ctx,
7724 + "Unable to start offload thread!\n");
7725 + rc = -ENODEV;
7726 +- goto free_cid_que;
7727 ++ goto free_tmf_thread;
7728 + }
7729 +
7730 + /* F/w needs 1st task context memory entry for performance */
7731 +@@ -2650,6 +2650,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
7732 +
7733 + return 0;
7734 +
7735 ++free_tmf_thread:
7736 ++ destroy_workqueue(qedi->tmf_thread);
7737 + free_cid_que:
7738 + qedi_release_cid_que(qedi);
7739 + free_uio:
7740 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
7741 +index 294d77c02cdf6..f169292448bd4 100644
7742 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
7743 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
7744 +@@ -910,7 +910,8 @@ qla27xx_template_checksum(void *p, ulong size)
7745 + static inline int
7746 + qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
7747 + {
7748 +- return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
7749 ++ return qla27xx_template_checksum(tmp,
7750 ++ le32_to_cpu(tmp->template_size)) == 0;
7751 + }
7752 +
7753 + static inline int
7754 +@@ -926,7 +927,7 @@ qla27xx_execute_fwdt_template(struct scsi_qla_host *vha,
7755 + ulong len = 0;
7756 +
7757 + if (qla27xx_fwdt_template_valid(tmp)) {
7758 +- len = tmp->template_size;
7759 ++ len = le32_to_cpu(tmp->template_size);
7760 + tmp = memcpy(buf, tmp, len);
7761 + ql27xx_edit_template(vha, tmp);
7762 + qla27xx_walk_template(vha, tmp, buf, &len);
7763 +@@ -942,7 +943,7 @@ qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha, void *p)
7764 + ulong len = 0;
7765 +
7766 + if (qla27xx_fwdt_template_valid(tmp)) {
7767 +- len = tmp->template_size;
7768 ++ len = le32_to_cpu(tmp->template_size);
7769 + qla27xx_walk_template(vha, tmp, NULL, &len);
7770 + }
7771 +
7772 +@@ -954,7 +955,7 @@ qla27xx_fwdt_template_size(void *p)
7773 + {
7774 + struct qla27xx_fwdt_template *tmp = p;
7775 +
7776 +- return tmp->template_size;
7777 ++ return le32_to_cpu(tmp->template_size);
7778 + }
7779 +
7780 + int
7781 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
7782 +index d2a0014e8b21e..fb8ab3bc86c26 100644
7783 +--- a/drivers/scsi/qla2xxx/qla_tmpl.h
7784 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.h
7785 +@@ -13,7 +13,7 @@
7786 + struct __packed qla27xx_fwdt_template {
7787 + __le32 template_type;
7788 + __le32 entry_offset;
7789 +- uint32_t template_size;
7790 ++ __le32 template_size;
7791 + uint32_t count; /* borrow field for running/residual count */
7792 +
7793 + __le32 entry_count;
7794 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
7795 +index e6944e1cba2ba..b5867e1566f42 100644
7796 +--- a/drivers/scsi/scsi_lib.c
7797 ++++ b/drivers/scsi/scsi_lib.c
7798 +@@ -2930,6 +2930,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev)
7799 + }
7800 + EXPORT_SYMBOL(sdev_enable_disk_events);
7801 +
7802 ++static unsigned char designator_prio(const unsigned char *d)
7803 ++{
7804 ++ if (d[1] & 0x30)
7805 ++ /* not associated with LUN */
7806 ++ return 0;
7807 ++
7808 ++ if (d[3] == 0)
7809 ++ /* invalid length */
7810 ++ return 0;
7811 ++
7812 ++ /*
7813 ++ * Order of preference for lun descriptor:
7814 ++ * - SCSI name string
7815 ++ * - NAA IEEE Registered Extended
7816 ++ * - EUI-64 based 16-byte
7817 ++ * - EUI-64 based 12-byte
7818 ++ * - NAA IEEE Registered
7819 ++ * - NAA IEEE Extended
7820 ++ * - EUI-64 based 8-byte
7821 ++ * - SCSI name string (truncated)
7822 ++ * - T10 Vendor ID
7823 ++ * as longer descriptors reduce the likelyhood
7824 ++ * of identification clashes.
7825 ++ */
7826 ++
7827 ++ switch (d[1] & 0xf) {
7828 ++ case 8:
7829 ++ /* SCSI name string, variable-length UTF-8 */
7830 ++ return 9;
7831 ++ case 3:
7832 ++ switch (d[4] >> 4) {
7833 ++ case 6:
7834 ++ /* NAA registered extended */
7835 ++ return 8;
7836 ++ case 5:
7837 ++ /* NAA registered */
7838 ++ return 5;
7839 ++ case 4:
7840 ++ /* NAA extended */
7841 ++ return 4;
7842 ++ case 3:
7843 ++ /* NAA locally assigned */
7844 ++ return 1;
7845 ++ default:
7846 ++ break;
7847 ++ }
7848 ++ break;
7849 ++ case 2:
7850 ++ switch (d[3]) {
7851 ++ case 16:
7852 ++ /* EUI64-based, 16 byte */
7853 ++ return 7;
7854 ++ case 12:
7855 ++ /* EUI64-based, 12 byte */
7856 ++ return 6;
7857 ++ case 8:
7858 ++ /* EUI64-based, 8 byte */
7859 ++ return 3;
7860 ++ default:
7861 ++ break;
7862 ++ }
7863 ++ break;
7864 ++ case 1:
7865 ++ /* T10 vendor ID */
7866 ++ return 1;
7867 ++ default:
7868 ++ break;
7869 ++ }
7870 ++
7871 ++ return 0;
7872 ++}
7873 ++
7874 + /**
7875 + * scsi_vpd_lun_id - return a unique device identification
7876 + * @sdev: SCSI device
7877 +@@ -2946,7 +3018,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events);
7878 + */
7879 + int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
7880 + {
7881 +- u8 cur_id_type = 0xff;
7882 ++ u8 cur_id_prio = 0;
7883 + u8 cur_id_size = 0;
7884 + const unsigned char *d, *cur_id_str;
7885 + const struct scsi_vpd *vpd_pg83;
7886 +@@ -2959,20 +3031,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
7887 + return -ENXIO;
7888 + }
7889 +
7890 +- /*
7891 +- * Look for the correct descriptor.
7892 +- * Order of preference for lun descriptor:
7893 +- * - SCSI name string
7894 +- * - NAA IEEE Registered Extended
7895 +- * - EUI-64 based 16-byte
7896 +- * - EUI-64 based 12-byte
7897 +- * - NAA IEEE Registered
7898 +- * - NAA IEEE Extended
7899 +- * - T10 Vendor ID
7900 +- * as longer descriptors reduce the likelyhood
7901 +- * of identification clashes.
7902 +- */
7903 +-
7904 + /* The id string must be at least 20 bytes + terminating NULL byte */
7905 + if (id_len < 21) {
7906 + rcu_read_unlock();
7907 +@@ -2982,8 +3040,9 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
7908 + memset(id, 0, id_len);
7909 + d = vpd_pg83->data + 4;
7910 + while (d < vpd_pg83->data + vpd_pg83->len) {
7911 +- /* Skip designators not referring to the LUN */
7912 +- if ((d[1] & 0x30) != 0x00)
7913 ++ u8 prio = designator_prio(d);
7914 ++
7915 ++ if (prio == 0 || cur_id_prio > prio)
7916 + goto next_desig;
7917 +
7918 + switch (d[1] & 0xf) {
7919 +@@ -2991,28 +3050,19 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
7920 + /* T10 Vendor ID */
7921 + if (cur_id_size > d[3])
7922 + break;
7923 +- /* Prefer anything */
7924 +- if (cur_id_type > 0x01 && cur_id_type != 0xff)
7925 +- break;
7926 ++ cur_id_prio = prio;
7927 + cur_id_size = d[3];
7928 + if (cur_id_size + 4 > id_len)
7929 + cur_id_size = id_len - 4;
7930 + cur_id_str = d + 4;
7931 +- cur_id_type = d[1] & 0xf;
7932 + id_size = snprintf(id, id_len, "t10.%*pE",
7933 + cur_id_size, cur_id_str);
7934 + break;
7935 + case 0x2:
7936 + /* EUI-64 */
7937 +- if (cur_id_size > d[3])
7938 +- break;
7939 +- /* Prefer NAA IEEE Registered Extended */
7940 +- if (cur_id_type == 0x3 &&
7941 +- cur_id_size == d[3])
7942 +- break;
7943 ++ cur_id_prio = prio;
7944 + cur_id_size = d[3];
7945 + cur_id_str = d + 4;
7946 +- cur_id_type = d[1] & 0xf;
7947 + switch (cur_id_size) {
7948 + case 8:
7949 + id_size = snprintf(id, id_len,
7950 +@@ -3030,17 +3080,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
7951 + cur_id_str);
7952 + break;
7953 + default:
7954 +- cur_id_size = 0;
7955 + break;
7956 + }
7957 + break;
7958 + case 0x3:
7959 + /* NAA */
7960 +- if (cur_id_size > d[3])
7961 +- break;
7962 ++ cur_id_prio = prio;
7963 + cur_id_size = d[3];
7964 + cur_id_str = d + 4;
7965 +- cur_id_type = d[1] & 0xf;
7966 + switch (cur_id_size) {
7967 + case 8:
7968 + id_size = snprintf(id, id_len,
7969 +@@ -3053,26 +3100,25 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
7970 + cur_id_str);
7971 + break;
7972 + default:
7973 +- cur_id_size = 0;
7974 + break;
7975 + }
7976 + break;
7977 + case 0x8:
7978 + /* SCSI name string */
7979 +- if (cur_id_size + 4 > d[3])
7980 ++ if (cur_id_size > d[3])
7981 + break;
7982 + /* Prefer others for truncated descriptor */
7983 +- if (cur_id_size && d[3] > id_len)
7984 +- break;
7985 ++ if (d[3] > id_len) {
7986 ++ prio = 2;
7987 ++ if (cur_id_prio > prio)
7988 ++ break;
7989 ++ }
7990 ++ cur_id_prio = prio;
7991 + cur_id_size = id_size = d[3];
7992 + cur_id_str = d + 4;
7993 +- cur_id_type = d[1] & 0xf;
7994 + if (cur_id_size >= id_len)
7995 + cur_id_size = id_len - 1;
7996 + memcpy(id, cur_id_str, cur_id_size);
7997 +- /* Decrease priority for truncated descriptor */
7998 +- if (cur_id_size != id_size)
7999 +- cur_id_size = 6;
8000 + break;
8001 + default:
8002 + break;
8003 +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
8004 +index ce265bf7de868..b60541c3f72da 100644
8005 +--- a/drivers/slimbus/qcom-ngd-ctrl.c
8006 ++++ b/drivers/slimbus/qcom-ngd-ctrl.c
8007 +@@ -1201,6 +1201,9 @@ static int qcom_slim_ngd_runtime_resume(struct device *dev)
8008 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
8009 + int ret = 0;
8010 +
8011 ++ if (!ctrl->qmi.handle)
8012 ++ return 0;
8013 ++
8014 + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
8015 + ret = qcom_slim_ngd_power_up(ctrl);
8016 + if (ret) {
8017 +@@ -1497,6 +1500,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
8018 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
8019 + int ret = 0;
8020 +
8021 ++ if (!ctrl->qmi.handle)
8022 ++ return 0;
8023 ++
8024 + ret = qcom_slim_qmi_power_request(ctrl, false);
8025 + if (ret && ret != -EBUSY)
8026 + dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
8027 +diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
8028 +index c655f5f92b124..d0329ad170d13 100644
8029 +--- a/drivers/soc/amlogic/meson-canvas.c
8030 ++++ b/drivers/soc/amlogic/meson-canvas.c
8031 +@@ -72,8 +72,10 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
8032 + * current state, this driver probe cannot return -EPROBE_DEFER
8033 + */
8034 + canvas = dev_get_drvdata(&canvas_pdev->dev);
8035 +- if (!canvas)
8036 ++ if (!canvas) {
8037 ++ put_device(&canvas_pdev->dev);
8038 + return ERR_PTR(-EINVAL);
8039 ++ }
8040 +
8041 + return canvas;
8042 + }
8043 +diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
8044 +index 503222d0d0da1..75f25f08245fd 100644
8045 +--- a/drivers/soc/mediatek/mtk-scpsys.c
8046 ++++ b/drivers/soc/mediatek/mtk-scpsys.c
8047 +@@ -446,6 +446,7 @@ static void mtk_register_power_domains(struct platform_device *pdev,
8048 + for (i = 0; i < num; i++) {
8049 + struct scp_domain *scpd = &scp->domains[i];
8050 + struct generic_pm_domain *genpd = &scpd->genpd;
8051 ++ bool on;
8052 +
8053 + /*
8054 + * Initially turn on all domains to make the domains usable
8055 +@@ -453,9 +454,9 @@ static void mtk_register_power_domains(struct platform_device *pdev,
8056 + * software. The unused domains will be switched off during
8057 + * late_init time.
8058 + */
8059 +- genpd->power_on(genpd);
8060 ++ on = !WARN_ON(genpd->power_on(genpd) < 0);
8061 +
8062 +- pm_genpd_init(genpd, NULL, false);
8063 ++ pm_genpd_init(genpd, NULL, !on);
8064 + }
8065 +
8066 + /*
8067 +diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
8068 +index 7d622ea1274eb..2e66098e82975 100644
8069 +--- a/drivers/soc/qcom/qcom-geni-se.c
8070 ++++ b/drivers/soc/qcom/qcom-geni-se.c
8071 +@@ -282,10 +282,23 @@ static void geni_se_select_fifo_mode(struct geni_se *se)
8072 +
8073 + static void geni_se_select_dma_mode(struct geni_se *se)
8074 + {
8075 ++ u32 proto = geni_se_read_proto(se);
8076 + u32 val;
8077 +
8078 + geni_se_irq_clear(se);
8079 +
8080 ++ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
8081 ++ if (proto != GENI_SE_UART) {
8082 ++ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
8083 ++ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
8084 ++ }
8085 ++ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
8086 ++
8087 ++ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
8088 ++ if (proto != GENI_SE_UART)
8089 ++ val &= ~S_CMD_DONE_EN;
8090 ++ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
8091 ++
8092 + val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
8093 + val |= GENI_DMA_MODE_EN;
8094 + writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
8095 +@@ -644,7 +657,7 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
8096 + writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
8097 + writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
8098 + writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
8099 +- writel_relaxed(len, se->base + SE_DMA_TX_LEN);
8100 ++ writel(len, se->base + SE_DMA_TX_LEN);
8101 + return 0;
8102 + }
8103 + EXPORT_SYMBOL(geni_se_tx_dma_prep);
8104 +@@ -681,7 +694,7 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
8105 + writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
8106 + /* RX does not have EOT buffer type bit. So just reset RX_ATTR */
8107 + writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
8108 +- writel_relaxed(len, se->base + SE_DMA_RX_LEN);
8109 ++ writel(len, se->base + SE_DMA_RX_LEN);
8110 + return 0;
8111 + }
8112 + EXPORT_SYMBOL(geni_se_rx_dma_prep);
8113 +diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
8114 +index c7300d54e4447..42e0b8f647aef 100644
8115 +--- a/drivers/soc/qcom/smp2p.c
8116 ++++ b/drivers/soc/qcom/smp2p.c
8117 +@@ -318,15 +318,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
8118 + static int smp2p_update_bits(void *data, u32 mask, u32 value)
8119 + {
8120 + struct smp2p_entry *entry = data;
8121 ++ unsigned long flags;
8122 + u32 orig;
8123 + u32 val;
8124 +
8125 +- spin_lock(&entry->lock);
8126 ++ spin_lock_irqsave(&entry->lock, flags);
8127 + val = orig = readl(entry->value);
8128 + val &= ~mask;
8129 + val |= value;
8130 + writel(val, entry->value);
8131 +- spin_unlock(&entry->lock);
8132 ++ spin_unlock_irqrestore(&entry->lock, flags);
8133 +
8134 + if (val != orig)
8135 + qcom_smp2p_kick(entry->smp2p);
8136 +diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
8137 +index 54b616ad4a62a..beb1c7211c3d6 100644
8138 +--- a/drivers/soc/renesas/rmobile-sysc.c
8139 ++++ b/drivers/soc/renesas/rmobile-sysc.c
8140 +@@ -327,6 +327,7 @@ static int __init rmobile_init_pm_domains(void)
8141 +
8142 + pmd = of_get_child_by_name(np, "pm-domains");
8143 + if (!pmd) {
8144 ++ iounmap(base);
8145 + pr_warn("%pOF lacks pm-domains node\n", np);
8146 + continue;
8147 + }
8148 +diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
8149 +index 70d3f6e1aa33d..8050742237b76 100644
8150 +--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
8151 ++++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
8152 +@@ -94,7 +94,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
8153 + unsigned int i;
8154 +
8155 + for (i = 0; i < num; i++)
8156 +- if (value < speedos[num])
8157 ++ if (value < speedos[i])
8158 + return i;
8159 +
8160 + return -EINVAL;
8161 +diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
8162 +index 6285cd8efb21b..981a9014c9c4e 100644
8163 +--- a/drivers/soc/ti/knav_dma.c
8164 ++++ b/drivers/soc/ti/knav_dma.c
8165 +@@ -759,8 +759,9 @@ static int knav_dma_probe(struct platform_device *pdev)
8166 + pm_runtime_enable(kdev->dev);
8167 + ret = pm_runtime_get_sync(kdev->dev);
8168 + if (ret < 0) {
8169 ++ pm_runtime_put_noidle(kdev->dev);
8170 + dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
8171 +- return ret;
8172 ++ goto err_pm_disable;
8173 + }
8174 +
8175 + /* Initialise all packet dmas */
8176 +@@ -774,7 +775,8 @@ static int knav_dma_probe(struct platform_device *pdev)
8177 +
8178 + if (list_empty(&kdev->list)) {
8179 + dev_err(dev, "no valid dma instance\n");
8180 +- return -ENODEV;
8181 ++ ret = -ENODEV;
8182 ++ goto err_put_sync;
8183 + }
8184 +
8185 + debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
8186 +@@ -782,6 +784,13 @@ static int knav_dma_probe(struct platform_device *pdev)
8187 +
8188 + device_ready = true;
8189 + return ret;
8190 ++
8191 ++err_put_sync:
8192 ++ pm_runtime_put_sync(kdev->dev);
8193 ++err_pm_disable:
8194 ++ pm_runtime_disable(kdev->dev);
8195 ++
8196 ++ return ret;
8197 + }
8198 +
8199 + static int knav_dma_remove(struct platform_device *pdev)
8200 +diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
8201 +index 1ccc9064e1eb5..b8210479ec997 100644
8202 +--- a/drivers/soc/ti/knav_qmss_queue.c
8203 ++++ b/drivers/soc/ti/knav_qmss_queue.c
8204 +@@ -1791,6 +1791,7 @@ static int knav_queue_probe(struct platform_device *pdev)
8205 + pm_runtime_enable(&pdev->dev);
8206 + ret = pm_runtime_get_sync(&pdev->dev);
8207 + if (ret < 0) {
8208 ++ pm_runtime_put_noidle(&pdev->dev);
8209 + dev_err(dev, "Failed to enable QMSS\n");
8210 + return ret;
8211 + }
8212 +@@ -1858,9 +1859,10 @@ static int knav_queue_probe(struct platform_device *pdev)
8213 + if (ret)
8214 + goto err;
8215 +
8216 +- regions = of_get_child_by_name(node, "descriptor-regions");
8217 ++ regions = of_get_child_by_name(node, "descriptor-regions");
8218 + if (!regions) {
8219 + dev_err(dev, "descriptor-regions not specified\n");
8220 ++ ret = -ENODEV;
8221 + goto err;
8222 + }
8223 + ret = knav_queue_setup_regions(kdev, regions);
8224 +diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
8225 +index 13def7f78b9e9..5fd929e023e18 100644
8226 +--- a/drivers/spi/atmel-quadspi.c
8227 ++++ b/drivers/spi/atmel-quadspi.c
8228 +@@ -284,10 +284,14 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
8229 + if (dummy_cycles)
8230 + ifr |= QSPI_IFR_NBDUM(dummy_cycles);
8231 +
8232 +- /* Set data enable */
8233 +- if (op->data.nbytes)
8234 ++ /* Set data enable and data transfer type. */
8235 ++ if (op->data.nbytes) {
8236 + ifr |= QSPI_IFR_DATAEN;
8237 +
8238 ++ if (op->addr.nbytes)
8239 ++ ifr |= QSPI_IFR_TFRTYP_MEM;
8240 ++ }
8241 ++
8242 + /*
8243 + * If the QSPI controller is set in regular SPI mode, set it in
8244 + * Serial Memory Mode (SMM).
8245 +@@ -312,7 +316,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
8246 + writel_relaxed(icr, aq->regs + QSPI_WICR);
8247 + writel_relaxed(ifr, aq->regs + QSPI_IFR);
8248 + } else {
8249 +- if (op->data.dir == SPI_MEM_DATA_OUT)
8250 ++ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
8251 + ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
8252 +
8253 + /* Set QSPI Instruction Frame registers */
8254 +@@ -454,7 +458,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
8255 + struct resource *res;
8256 + int irq, err = 0;
8257 +
8258 +- ctrl = spi_alloc_master(&pdev->dev, sizeof(*aq));
8259 ++ ctrl = devm_spi_alloc_master(&pdev->dev, sizeof(*aq));
8260 + if (!ctrl)
8261 + return -ENOMEM;
8262 +
8263 +@@ -476,8 +480,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
8264 + aq->regs = devm_ioremap_resource(&pdev->dev, res);
8265 + if (IS_ERR(aq->regs)) {
8266 + dev_err(&pdev->dev, "missing registers\n");
8267 +- err = PTR_ERR(aq->regs);
8268 +- goto exit;
8269 ++ return PTR_ERR(aq->regs);
8270 + }
8271 +
8272 + /* Map the AHB memory */
8273 +@@ -485,8 +488,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
8274 + aq->mem = devm_ioremap_resource(&pdev->dev, res);
8275 + if (IS_ERR(aq->mem)) {
8276 + dev_err(&pdev->dev, "missing AHB memory\n");
8277 +- err = PTR_ERR(aq->mem);
8278 +- goto exit;
8279 ++ return PTR_ERR(aq->mem);
8280 + }
8281 +
8282 + aq->mmap_size = resource_size(res);
8283 +@@ -498,22 +500,21 @@ static int atmel_qspi_probe(struct platform_device *pdev)
8284 +
8285 + if (IS_ERR(aq->pclk)) {
8286 + dev_err(&pdev->dev, "missing peripheral clock\n");
8287 +- err = PTR_ERR(aq->pclk);
8288 +- goto exit;
8289 ++ return PTR_ERR(aq->pclk);
8290 + }
8291 +
8292 + /* Enable the peripheral clock */
8293 + err = clk_prepare_enable(aq->pclk);
8294 + if (err) {
8295 + dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
8296 +- goto exit;
8297 ++ return err;
8298 + }
8299 +
8300 + aq->caps = of_device_get_match_data(&pdev->dev);
8301 + if (!aq->caps) {
8302 + dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
8303 + err = -EINVAL;
8304 +- goto exit;
8305 ++ goto disable_pclk;
8306 + }
8307 +
8308 + if (aq->caps->has_qspick) {
8309 +@@ -557,8 +558,6 @@ disable_qspick:
8310 + clk_disable_unprepare(aq->qspick);
8311 + disable_pclk:
8312 + clk_disable_unprepare(aq->pclk);
8313 +-exit:
8314 +- spi_controller_put(ctrl);
8315 +
8316 + return err;
8317 + }
8318 +diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
8319 +index 36f7eb8ab2df1..509476a2d79bb 100644
8320 +--- a/drivers/spi/spi-bcm63xx-hsspi.c
8321 ++++ b/drivers/spi/spi-bcm63xx-hsspi.c
8322 +@@ -483,8 +483,10 @@ static int bcm63xx_hsspi_resume(struct device *dev)
8323 +
8324 + if (bs->pll_clk) {
8325 + ret = clk_prepare_enable(bs->pll_clk);
8326 +- if (ret)
8327 ++ if (ret) {
8328 ++ clk_disable_unprepare(bs->clk);
8329 + return ret;
8330 ++ }
8331 + }
8332 +
8333 + spi_master_resume(master);
8334 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
8335 +index f71c497393a6f..5260183a58a87 100644
8336 +--- a/drivers/spi/spi-davinci.c
8337 ++++ b/drivers/spi/spi-davinci.c
8338 +@@ -1040,13 +1040,13 @@ static int davinci_spi_remove(struct platform_device *pdev)
8339 + spi_bitbang_stop(&dspi->bitbang);
8340 +
8341 + clk_disable_unprepare(dspi->clk);
8342 +- spi_master_put(master);
8343 +
8344 + if (dspi->dma_rx) {
8345 + dma_release_channel(dspi->dma_rx);
8346 + dma_release_channel(dspi->dma_tx);
8347 + }
8348 +
8349 ++ spi_master_put(master);
8350 + return 0;
8351 + }
8352 +
8353 +diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
8354 +index be7c6ba730728..18a93a2854d80 100644
8355 +--- a/drivers/spi/spi-fsl-spi.c
8356 ++++ b/drivers/spi/spi-fsl-spi.c
8357 +@@ -717,10 +717,11 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
8358 + type = fsl_spi_get_type(&ofdev->dev);
8359 + if (type == TYPE_FSL) {
8360 + struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
8361 ++ bool spisel_boot = false;
8362 + #if IS_ENABLED(CONFIG_FSL_SOC)
8363 + struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
8364 +- bool spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
8365 +
8366 ++ spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
8367 + if (spisel_boot) {
8368 + pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
8369 + if (!pinfo->immr_spi_cs) {
8370 +@@ -737,10 +738,14 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
8371 + * supported on the GRLIB variant.
8372 + */
8373 + ret = gpiod_count(dev, "cs");
8374 +- if (ret <= 0)
8375 ++ if (ret < 0)
8376 ++ ret = 0;
8377 ++ if (ret == 0 && !spisel_boot) {
8378 + pdata->max_chipselect = 1;
8379 +- else
8380 ++ } else {
8381 ++ pdata->max_chipselect = ret + spisel_boot;
8382 + pdata->cs_control = fsl_spi_cs_control;
8383 ++ }
8384 + }
8385 +
8386 + ret = of_address_to_resource(np, 0, &mem);
8387 +diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
8388 +index f9c5bbb747142..e7dc1fad4a87c 100644
8389 +--- a/drivers/spi/spi-gpio.c
8390 ++++ b/drivers/spi/spi-gpio.c
8391 +@@ -350,11 +350,6 @@ static int spi_gpio_probe_pdata(struct platform_device *pdev,
8392 + return 0;
8393 + }
8394 +
8395 +-static void spi_gpio_put(void *data)
8396 +-{
8397 +- spi_master_put(data);
8398 +-}
8399 +-
8400 + static int spi_gpio_probe(struct platform_device *pdev)
8401 + {
8402 + int status;
8403 +@@ -366,16 +361,10 @@ static int spi_gpio_probe(struct platform_device *pdev)
8404 +
8405 + of_id = of_match_device(spi_gpio_dt_ids, &pdev->dev);
8406 +
8407 +- master = spi_alloc_master(dev, sizeof(*spi_gpio));
8408 ++ master = devm_spi_alloc_master(dev, sizeof(*spi_gpio));
8409 + if (!master)
8410 + return -ENOMEM;
8411 +
8412 +- status = devm_add_action_or_reset(&pdev->dev, spi_gpio_put, master);
8413 +- if (status) {
8414 +- spi_master_put(master);
8415 +- return status;
8416 +- }
8417 +-
8418 + if (of_id)
8419 + status = spi_gpio_probe_dt(pdev, master);
8420 + else
8421 +@@ -435,7 +424,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
8422 + if (status)
8423 + return status;
8424 +
8425 +- return devm_spi_register_master(&pdev->dev, spi_master_get(master));
8426 ++ return devm_spi_register_master(&pdev->dev, master);
8427 + }
8428 +
8429 + MODULE_ALIAS("platform:" DRIVER_NAME);
8430 +diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
8431 +index f4a8f470aecc2..e9ef80983b791 100644
8432 +--- a/drivers/spi/spi-img-spfi.c
8433 ++++ b/drivers/spi/spi-img-spfi.c
8434 +@@ -771,8 +771,10 @@ static int img_spfi_resume(struct device *dev)
8435 + int ret;
8436 +
8437 + ret = pm_runtime_get_sync(dev);
8438 +- if (ret)
8439 ++ if (ret) {
8440 ++ pm_runtime_put_noidle(dev);
8441 + return ret;
8442 ++ }
8443 + spfi_reset(spfi);
8444 + pm_runtime_put(dev);
8445 +
8446 +diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
8447 +index de0ba3e5449fa..33115bcfc787e 100644
8448 +--- a/drivers/spi/spi-mem.c
8449 ++++ b/drivers/spi/spi-mem.c
8450 +@@ -237,6 +237,7 @@ static int spi_mem_access_start(struct spi_mem *mem)
8451 +
8452 + ret = pm_runtime_get_sync(ctlr->dev.parent);
8453 + if (ret < 0) {
8454 ++ pm_runtime_put_noidle(ctlr->dev.parent);
8455 + dev_err(&ctlr->dev, "Failed to power device: %d\n",
8456 + ret);
8457 + return ret;
8458 +diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
8459 +index 2c3b7a2a1ec77..b4b9b7309b5e9 100644
8460 +--- a/drivers/spi/spi-mt7621.c
8461 ++++ b/drivers/spi/spi-mt7621.c
8462 +@@ -350,9 +350,10 @@ static int mt7621_spi_probe(struct platform_device *pdev)
8463 + if (status)
8464 + return status;
8465 +
8466 +- master = spi_alloc_master(&pdev->dev, sizeof(*rs));
8467 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rs));
8468 + if (!master) {
8469 + dev_info(&pdev->dev, "master allocation failed\n");
8470 ++ clk_disable_unprepare(clk);
8471 + return -ENOMEM;
8472 + }
8473 +
8474 +@@ -377,10 +378,15 @@ static int mt7621_spi_probe(struct platform_device *pdev)
8475 + ret = device_reset(&pdev->dev);
8476 + if (ret) {
8477 + dev_err(&pdev->dev, "SPI reset failed!\n");
8478 ++ clk_disable_unprepare(clk);
8479 + return ret;
8480 + }
8481 +
8482 +- return devm_spi_register_controller(&pdev->dev, master);
8483 ++ ret = spi_register_controller(master);
8484 ++ if (ret)
8485 ++ clk_disable_unprepare(clk);
8486 ++
8487 ++ return ret;
8488 + }
8489 +
8490 + static int mt7621_spi_remove(struct platform_device *pdev)
8491 +@@ -391,6 +397,7 @@ static int mt7621_spi_remove(struct platform_device *pdev)
8492 + master = dev_get_drvdata(&pdev->dev);
8493 + rs = spi_controller_get_devdata(master);
8494 +
8495 ++ spi_unregister_controller(master);
8496 + clk_disable_unprepare(rs->clk);
8497 +
8498 + return 0;
8499 +diff --git a/drivers/spi/spi-mxic.c b/drivers/spi/spi-mxic.c
8500 +index f48563c09b97c..eba706d5671e2 100644
8501 +--- a/drivers/spi/spi-mxic.c
8502 ++++ b/drivers/spi/spi-mxic.c
8503 +@@ -528,7 +528,7 @@ static int mxic_spi_probe(struct platform_device *pdev)
8504 + struct mxic_spi *mxic;
8505 + int ret;
8506 +
8507 +- master = spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi));
8508 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct mxic_spi));
8509 + if (!master)
8510 + return -ENOMEM;
8511 +
8512 +@@ -573,15 +573,9 @@ static int mxic_spi_probe(struct platform_device *pdev)
8513 + ret = spi_register_master(master);
8514 + if (ret) {
8515 + dev_err(&pdev->dev, "spi_register_master failed\n");
8516 +- goto err_put_master;
8517 ++ pm_runtime_disable(&pdev->dev);
8518 + }
8519 +
8520 +- return 0;
8521 +-
8522 +-err_put_master:
8523 +- spi_master_put(master);
8524 +- pm_runtime_disable(&pdev->dev);
8525 +-
8526 + return ret;
8527 + }
8528 +
8529 +diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
8530 +index 996c1c8a9c719..34856c9ad931a 100644
8531 +--- a/drivers/spi/spi-mxs.c
8532 ++++ b/drivers/spi/spi-mxs.c
8533 +@@ -608,6 +608,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
8534 +
8535 + ret = pm_runtime_get_sync(ssp->dev);
8536 + if (ret < 0) {
8537 ++ pm_runtime_put_noidle(ssp->dev);
8538 + dev_err(ssp->dev, "runtime_get_sync failed\n");
8539 + goto out_pm_runtime_disable;
8540 + }
8541 +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
8542 +index 69f517ec59c68..8272bde5d706f 100644
8543 +--- a/drivers/spi/spi-pic32.c
8544 ++++ b/drivers/spi/spi-pic32.c
8545 +@@ -825,6 +825,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
8546 + return 0;
8547 +
8548 + err_bailout:
8549 ++ pic32_spi_dma_unprep(pic32s);
8550 + clk_disable_unprepare(pic32s->clk);
8551 + err_master:
8552 + spi_master_put(master);
8553 +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
8554 +index 7f4285e2ae682..f5a10a94f156d 100644
8555 +--- a/drivers/spi/spi-pxa2xx.c
8556 ++++ b/drivers/spi/spi-pxa2xx.c
8557 +@@ -1675,9 +1675,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
8558 + }
8559 +
8560 + if (platform_info->is_slave)
8561 +- controller = spi_alloc_slave(dev, sizeof(struct driver_data));
8562 ++ controller = devm_spi_alloc_slave(dev, sizeof(*drv_data));
8563 + else
8564 +- controller = spi_alloc_master(dev, sizeof(struct driver_data));
8565 ++ controller = devm_spi_alloc_master(dev, sizeof(*drv_data));
8566 +
8567 + if (!controller) {
8568 + dev_err(&pdev->dev, "cannot alloc spi_controller\n");
8569 +@@ -1900,7 +1900,6 @@ out_error_dma_irq_alloc:
8570 + free_irq(ssp->irq, drv_data);
8571 +
8572 + out_error_controller_alloc:
8573 +- spi_controller_put(controller);
8574 + pxa_ssp_free(ssp);
8575 + return status;
8576 + }
8577 +diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
8578 +index 4c9620e0d18cc..1ad3f11a22b22 100644
8579 +--- a/drivers/spi/spi-rb4xx.c
8580 ++++ b/drivers/spi/spi-rb4xx.c
8581 +@@ -142,7 +142,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
8582 + if (IS_ERR(spi_base))
8583 + return PTR_ERR(spi_base);
8584 +
8585 +- master = spi_alloc_master(&pdev->dev, sizeof(*rbspi));
8586 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi));
8587 + if (!master)
8588 + return -ENOMEM;
8589 +
8590 +diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
8591 +index 11acddc833041..ced365efc5f07 100644
8592 +--- a/drivers/spi/spi-sc18is602.c
8593 ++++ b/drivers/spi/spi-sc18is602.c
8594 +@@ -239,13 +239,12 @@ static int sc18is602_probe(struct i2c_client *client,
8595 + struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
8596 + struct sc18is602 *hw;
8597 + struct spi_master *master;
8598 +- int error;
8599 +
8600 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
8601 + I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
8602 + return -EINVAL;
8603 +
8604 +- master = spi_alloc_master(dev, sizeof(struct sc18is602));
8605 ++ master = devm_spi_alloc_master(dev, sizeof(struct sc18is602));
8606 + if (!master)
8607 + return -ENOMEM;
8608 +
8609 +@@ -299,15 +298,7 @@ static int sc18is602_probe(struct i2c_client *client,
8610 + master->min_speed_hz = hw->freq / 128;
8611 + master->max_speed_hz = hw->freq / 4;
8612 +
8613 +- error = devm_spi_register_master(dev, master);
8614 +- if (error)
8615 +- goto error_reg;
8616 +-
8617 +- return 0;
8618 +-
8619 +-error_reg:
8620 +- spi_master_put(master);
8621 +- return error;
8622 ++ return devm_spi_register_master(dev, master);
8623 + }
8624 +
8625 + static const struct i2c_device_id sc18is602_id[] = {
8626 +diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
8627 +index 20bdae5fdf3b8..15123a8f41e1e 100644
8628 +--- a/drivers/spi/spi-sh.c
8629 ++++ b/drivers/spi/spi-sh.c
8630 +@@ -440,7 +440,7 @@ static int spi_sh_probe(struct platform_device *pdev)
8631 + if (irq < 0)
8632 + return irq;
8633 +
8634 +- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
8635 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
8636 + if (master == NULL) {
8637 + dev_err(&pdev->dev, "spi_alloc_master error.\n");
8638 + return -ENOMEM;
8639 +@@ -458,16 +458,14 @@ static int spi_sh_probe(struct platform_device *pdev)
8640 + break;
8641 + default:
8642 + dev_err(&pdev->dev, "No support width\n");
8643 +- ret = -ENODEV;
8644 +- goto error1;
8645 ++ return -ENODEV;
8646 + }
8647 + ss->irq = irq;
8648 + ss->master = master;
8649 + ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
8650 + if (ss->addr == NULL) {
8651 + dev_err(&pdev->dev, "ioremap error.\n");
8652 +- ret = -ENOMEM;
8653 +- goto error1;
8654 ++ return -ENOMEM;
8655 + }
8656 + INIT_LIST_HEAD(&ss->queue);
8657 + spin_lock_init(&ss->lock);
8658 +@@ -477,7 +475,7 @@ static int spi_sh_probe(struct platform_device *pdev)
8659 + ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
8660 + if (ret < 0) {
8661 + dev_err(&pdev->dev, "request_irq error\n");
8662 +- goto error1;
8663 ++ return ret;
8664 + }
8665 +
8666 + master->num_chipselect = 2;
8667 +@@ -496,9 +494,6 @@ static int spi_sh_probe(struct platform_device *pdev)
8668 +
8669 + error3:
8670 + free_irq(irq, ss);
8671 +- error1:
8672 +- spi_master_put(master);
8673 +-
8674 + return ret;
8675 + }
8676 +
8677 +diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
8678 +index e60aff9903951..c2bdf19ccdd26 100644
8679 +--- a/drivers/spi/spi-sprd.c
8680 ++++ b/drivers/spi/spi-sprd.c
8681 +@@ -1008,6 +1008,7 @@ static int sprd_spi_remove(struct platform_device *pdev)
8682 +
8683 + ret = pm_runtime_get_sync(ss->dev);
8684 + if (ret < 0) {
8685 ++ pm_runtime_put_noidle(ss->dev);
8686 + dev_err(ss->dev, "failed to resume SPI controller\n");
8687 + return ret;
8688 + }
8689 +diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
8690 +index 77d26d64541a5..6c44dda9ee8c5 100644
8691 +--- a/drivers/spi/spi-st-ssc4.c
8692 ++++ b/drivers/spi/spi-st-ssc4.c
8693 +@@ -375,13 +375,14 @@ static int spi_st_probe(struct platform_device *pdev)
8694 + ret = devm_spi_register_master(&pdev->dev, master);
8695 + if (ret) {
8696 + dev_err(&pdev->dev, "Failed to register master\n");
8697 +- goto clk_disable;
8698 ++ goto rpm_disable;
8699 + }
8700 +
8701 + return 0;
8702 +
8703 +-clk_disable:
8704 ++rpm_disable:
8705 + pm_runtime_disable(&pdev->dev);
8706 ++clk_disable:
8707 + clk_disable_unprepare(spi_st->clk);
8708 + put_master:
8709 + spi_master_put(master);
8710 +diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
8711 +index 9d8ceb63f7db1..ed20ad2950885 100644
8712 +--- a/drivers/spi/spi-stm32.c
8713 ++++ b/drivers/spi/spi-stm32.c
8714 +@@ -2055,6 +2055,7 @@ static int stm32_spi_resume(struct device *dev)
8715 +
8716 + ret = pm_runtime_get_sync(dev);
8717 + if (ret < 0) {
8718 ++ pm_runtime_put_noidle(dev);
8719 + dev_err(dev, "Unable to power device:%d\n", ret);
8720 + return ret;
8721 + }
8722 +diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
8723 +index ae17c99cce037..5ab5119e2f1b0 100644
8724 +--- a/drivers/spi/spi-synquacer.c
8725 ++++ b/drivers/spi/spi-synquacer.c
8726 +@@ -658,7 +658,8 @@ static int synquacer_spi_probe(struct platform_device *pdev)
8727 +
8728 + if (!master->max_speed_hz) {
8729 + dev_err(&pdev->dev, "missing clock source\n");
8730 +- return -EINVAL;
8731 ++ ret = -EINVAL;
8732 ++ goto disable_clk;
8733 + }
8734 + master->min_speed_hz = master->max_speed_hz / 254;
8735 +
8736 +@@ -671,7 +672,7 @@ static int synquacer_spi_probe(struct platform_device *pdev)
8737 + rx_irq = platform_get_irq(pdev, 0);
8738 + if (rx_irq <= 0) {
8739 + ret = rx_irq;
8740 +- goto put_spi;
8741 ++ goto disable_clk;
8742 + }
8743 + snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx",
8744 + dev_name(&pdev->dev));
8745 +@@ -679,13 +680,13 @@ static int synquacer_spi_probe(struct platform_device *pdev)
8746 + 0, sspi->rx_irq_name, sspi);
8747 + if (ret) {
8748 + dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret);
8749 +- goto put_spi;
8750 ++ goto disable_clk;
8751 + }
8752 +
8753 + tx_irq = platform_get_irq(pdev, 1);
8754 + if (tx_irq <= 0) {
8755 + ret = tx_irq;
8756 +- goto put_spi;
8757 ++ goto disable_clk;
8758 + }
8759 + snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx",
8760 + dev_name(&pdev->dev));
8761 +@@ -693,7 +694,7 @@ static int synquacer_spi_probe(struct platform_device *pdev)
8762 + 0, sspi->tx_irq_name, sspi);
8763 + if (ret) {
8764 + dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret);
8765 +- goto put_spi;
8766 ++ goto disable_clk;
8767 + }
8768 +
8769 + master->dev.of_node = np;
8770 +@@ -711,7 +712,7 @@ static int synquacer_spi_probe(struct platform_device *pdev)
8771 +
8772 + ret = synquacer_spi_enable(master);
8773 + if (ret)
8774 +- goto fail_enable;
8775 ++ goto disable_clk;
8776 +
8777 + pm_runtime_set_active(sspi->dev);
8778 + pm_runtime_enable(sspi->dev);
8779 +@@ -724,7 +725,7 @@ static int synquacer_spi_probe(struct platform_device *pdev)
8780 +
8781 + disable_pm:
8782 + pm_runtime_disable(sspi->dev);
8783 +-fail_enable:
8784 ++disable_clk:
8785 + clk_disable_unprepare(sspi->clk);
8786 + put_spi:
8787 + spi_master_put(master);
8788 +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
8789 +index 39374c2edcf3b..594905bf89aa8 100644
8790 +--- a/drivers/spi/spi-tegra114.c
8791 ++++ b/drivers/spi/spi-tegra114.c
8792 +@@ -954,6 +954,7 @@ static int tegra_spi_setup(struct spi_device *spi)
8793 +
8794 + ret = pm_runtime_get_sync(tspi->dev);
8795 + if (ret < 0) {
8796 ++ pm_runtime_put_noidle(tspi->dev);
8797 + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
8798 + if (cdata)
8799 + tegra_spi_cleanup(spi);
8800 +@@ -1472,6 +1473,7 @@ static int tegra_spi_resume(struct device *dev)
8801 +
8802 + ret = pm_runtime_get_sync(dev);
8803 + if (ret < 0) {
8804 ++ pm_runtime_put_noidle(dev);
8805 + dev_err(dev, "pm runtime failed, e = %d\n", ret);
8806 + return ret;
8807 + }
8808 +diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
8809 +index a841a7250d14b..ecb620169753a 100644
8810 +--- a/drivers/spi/spi-tegra20-sflash.c
8811 ++++ b/drivers/spi/spi-tegra20-sflash.c
8812 +@@ -551,6 +551,7 @@ static int tegra_sflash_resume(struct device *dev)
8813 +
8814 + ret = pm_runtime_get_sync(dev);
8815 + if (ret < 0) {
8816 ++ pm_runtime_put_noidle(dev);
8817 + dev_err(dev, "pm runtime failed, e = %d\n", ret);
8818 + return ret;
8819 + }
8820 +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
8821 +index 374a2a32edcd3..2a1905c43a0b7 100644
8822 +--- a/drivers/spi/spi-tegra20-slink.c
8823 ++++ b/drivers/spi/spi-tegra20-slink.c
8824 +@@ -756,6 +756,7 @@ static int tegra_slink_setup(struct spi_device *spi)
8825 +
8826 + ret = pm_runtime_get_sync(tspi->dev);
8827 + if (ret < 0) {
8828 ++ pm_runtime_put_noidle(tspi->dev);
8829 + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
8830 + return ret;
8831 + }
8832 +@@ -1192,6 +1193,7 @@ static int tegra_slink_resume(struct device *dev)
8833 +
8834 + ret = pm_runtime_get_sync(dev);
8835 + if (ret < 0) {
8836 ++ pm_runtime_put_noidle(dev);
8837 + dev_err(dev, "pm runtime failed, e = %d\n", ret);
8838 + return ret;
8839 + }
8840 +diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
8841 +index 66dcb61285392..cad2abcbd9c78 100644
8842 +--- a/drivers/spi/spi-ti-qspi.c
8843 ++++ b/drivers/spi/spi-ti-qspi.c
8844 +@@ -176,6 +176,7 @@ static int ti_qspi_setup(struct spi_device *spi)
8845 +
8846 + ret = pm_runtime_get_sync(qspi->dev);
8847 + if (ret < 0) {
8848 ++ pm_runtime_put_noidle(qspi->dev);
8849 + dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
8850 + return ret;
8851 + }
8852 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
8853 +index 4c96c7c9e335e..e1205d72be523 100644
8854 +--- a/drivers/spi/spi.c
8855 ++++ b/drivers/spi/spi.c
8856 +@@ -405,9 +405,11 @@ static int spi_drv_probe(struct device *dev)
8857 + if (ret)
8858 + return ret;
8859 +
8860 +- ret = sdrv->probe(spi);
8861 +- if (ret)
8862 +- dev_pm_domain_detach(dev, true);
8863 ++ if (sdrv->probe) {
8864 ++ ret = sdrv->probe(spi);
8865 ++ if (ret)
8866 ++ dev_pm_domain_detach(dev, true);
8867 ++ }
8868 +
8869 + return ret;
8870 + }
8871 +@@ -415,9 +417,10 @@ static int spi_drv_probe(struct device *dev)
8872 + static int spi_drv_remove(struct device *dev)
8873 + {
8874 + const struct spi_driver *sdrv = to_spi_driver(dev->driver);
8875 +- int ret;
8876 ++ int ret = 0;
8877 +
8878 +- ret = sdrv->remove(to_spi_device(dev));
8879 ++ if (sdrv->remove)
8880 ++ ret = sdrv->remove(to_spi_device(dev));
8881 + dev_pm_domain_detach(dev, true);
8882 +
8883 + return ret;
8884 +@@ -442,10 +445,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
8885 + {
8886 + sdrv->driver.owner = owner;
8887 + sdrv->driver.bus = &spi_bus_type;
8888 +- if (sdrv->probe)
8889 +- sdrv->driver.probe = spi_drv_probe;
8890 +- if (sdrv->remove)
8891 +- sdrv->driver.remove = spi_drv_remove;
8892 ++ sdrv->driver.probe = spi_drv_probe;
8893 ++ sdrv->driver.remove = spi_drv_remove;
8894 + if (sdrv->shutdown)
8895 + sdrv->driver.shutdown = spi_drv_shutdown;
8896 + return driver_register(&sdrv->driver);
8897 +diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
8898 +index ea430237efa7f..9da8dd748078d 100644
8899 +--- a/drivers/staging/comedi/drivers/mf6x4.c
8900 ++++ b/drivers/staging/comedi/drivers/mf6x4.c
8901 +@@ -112,8 +112,9 @@ static int mf6x4_ai_eoc(struct comedi_device *dev,
8902 + struct mf6x4_private *devpriv = dev->private;
8903 + unsigned int status;
8904 +
8905 ++ /* EOLC goes low at end of conversion. */
8906 + status = ioread32(devpriv->gpioc_reg);
8907 +- if (status & MF6X4_GPIOC_EOLC)
8908 ++ if ((status & MF6X4_GPIOC_EOLC) == 0)
8909 + return 0;
8910 + return -EBUSY;
8911 + }
8912 +diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
8913 +index 2d6195f7300e9..864342acfd86e 100644
8914 +--- a/drivers/staging/gasket/gasket_interrupt.c
8915 ++++ b/drivers/staging/gasket/gasket_interrupt.c
8916 +@@ -487,14 +487,16 @@ int gasket_interrupt_system_status(struct gasket_dev *gasket_dev)
8917 + int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
8918 + int interrupt, int event_fd)
8919 + {
8920 +- struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd);
8921 +-
8922 +- if (IS_ERR(ctx))
8923 +- return PTR_ERR(ctx);
8924 ++ struct eventfd_ctx *ctx;
8925 +
8926 + if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
8927 + return -EINVAL;
8928 +
8929 ++ ctx = eventfd_ctx_fdget(event_fd);
8930 ++
8931 ++ if (IS_ERR(ctx))
8932 ++ return PTR_ERR(ctx);
8933 ++
8934 + interrupt_data->eventfd_ctxs[interrupt] = ctx;
8935 + return 0;
8936 + }
8937 +@@ -505,6 +507,9 @@ int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
8938 + if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
8939 + return -EINVAL;
8940 +
8941 +- interrupt_data->eventfd_ctxs[interrupt] = NULL;
8942 ++ if (interrupt_data->eventfd_ctxs[interrupt]) {
8943 ++ eventfd_ctx_put(interrupt_data->eventfd_ctxs[interrupt]);
8944 ++ interrupt_data->eventfd_ctxs[interrupt] = NULL;
8945 ++ }
8946 + return 0;
8947 + }
8948 +diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
8949 +index 08746c85dea6d..3259bf02ba25e 100644
8950 +--- a/drivers/staging/greybus/audio_codec.c
8951 ++++ b/drivers/staging/greybus/audio_codec.c
8952 +@@ -489,6 +489,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream,
8953 + if (ret) {
8954 + dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
8955 + ret);
8956 ++ gb_pm_runtime_put_noidle(bundle);
8957 + mutex_unlock(&codec->lock);
8958 + return ret;
8959 + }
8960 +@@ -565,6 +566,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
8961 + break;
8962 + }
8963 + if (ret) {
8964 ++ gb_pm_runtime_put_noidle(bundle);
8965 + mutex_unlock(&codec->lock);
8966 + dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
8967 + ret);
8968 +diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
8969 +index dccb4ea29d379..45ac48d031805 100644
8970 +--- a/drivers/staging/speakup/speakup_dectlk.c
8971 ++++ b/drivers/staging/speakup/speakup_dectlk.c
8972 +@@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth);
8973 + static int in_escape;
8974 + static int is_flushing;
8975 +
8976 +-static spinlock_t flush_lock;
8977 ++static DEFINE_SPINLOCK(flush_lock);
8978 + static DECLARE_WAIT_QUEUE_HEAD(flush);
8979 +
8980 + static struct var_t vars[] = {
8981 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
8982 +index 7c27827857363..ad262349703bf 100644
8983 +--- a/drivers/tty/serial/serial_core.c
8984 ++++ b/drivers/tty/serial/serial_core.c
8985 +@@ -1465,6 +1465,10 @@ static void uart_set_ldisc(struct tty_struct *tty)
8986 + {
8987 + struct uart_state *state = tty->driver_data;
8988 + struct uart_port *uport;
8989 ++ struct tty_port *port = &state->port;
8990 ++
8991 ++ if (!tty_port_initialized(port))
8992 ++ return;
8993 +
8994 + mutex_lock(&state->port.mutex);
8995 + uport = uart_port_check(state);
8996 +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
8997 +index df8812c306407..c08bcce04276e 100644
8998 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c
8999 ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
9000 +@@ -57,7 +57,8 @@ static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
9001 +
9002 + static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = {
9003 + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
9004 +- CI_HDRC_TURN_VBUS_EARLY_ON,
9005 ++ CI_HDRC_TURN_VBUS_EARLY_ON |
9006 ++ CI_HDRC_DISABLE_DEVICE_STREAMING,
9007 + };
9008 +
9009 + static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
9010 +diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
9011 +index 9fc98de836249..add0f7ead55cc 100644
9012 +--- a/drivers/usb/gadget/function/f_acm.c
9013 ++++ b/drivers/usb/gadget/function/f_acm.c
9014 +@@ -684,7 +684,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
9015 + acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
9016 +
9017 + status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
9018 +- acm_ss_function, NULL);
9019 ++ acm_ss_function, acm_ss_function);
9020 + if (status)
9021 + goto fail;
9022 +
9023 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
9024 +index d2cfb8ff9ca8a..df003bc67da07 100644
9025 +--- a/drivers/usb/gadget/function/f_fs.c
9026 ++++ b/drivers/usb/gadget/function/f_fs.c
9027 +@@ -1332,6 +1332,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
9028 +
9029 + switch (epfile->ffs->gadget->speed) {
9030 + case USB_SPEED_SUPER:
9031 ++ case USB_SPEED_SUPER_PLUS:
9032 + desc_idx = 2;
9033 + break;
9034 + case USB_SPEED_HIGH:
9035 +@@ -3193,7 +3194,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
9036 + }
9037 +
9038 + if (likely(super)) {
9039 +- func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
9040 ++ func->function.ss_descriptors = func->function.ssp_descriptors =
9041 ++ vla_ptr(vlabuf, d, ss_descs);
9042 + ss_len = ffs_do_descs(ffs->ss_descs_count,
9043 + vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
9044 + d_raw_descs__sz - fs_len - hs_len,
9045 +@@ -3603,6 +3605,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
9046 + func->function.fs_descriptors = NULL;
9047 + func->function.hs_descriptors = NULL;
9048 + func->function.ss_descriptors = NULL;
9049 ++ func->function.ssp_descriptors = NULL;
9050 + func->interfaces_nums = NULL;
9051 +
9052 + ffs_event_add(ffs, FUNCTIONFS_UNBIND);
9053 +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
9054 +index b2b5b0689667b..0e083a53da534 100644
9055 +--- a/drivers/usb/gadget/function/f_midi.c
9056 ++++ b/drivers/usb/gadget/function/f_midi.c
9057 +@@ -1048,6 +1048,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
9058 + f->ss_descriptors = usb_copy_descriptors(midi_function);
9059 + if (!f->ss_descriptors)
9060 + goto fail_f_midi;
9061 ++
9062 ++ if (gadget_is_superspeed_plus(c->cdev->gadget)) {
9063 ++ f->ssp_descriptors = usb_copy_descriptors(midi_function);
9064 ++ if (!f->ssp_descriptors)
9065 ++ goto fail_f_midi;
9066 ++ }
9067 + }
9068 +
9069 + kfree(midi_function);
9070 +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
9071 +index 0d8e4a364ca6e..cc1ff5b7b60c4 100644
9072 +--- a/drivers/usb/gadget/function/f_rndis.c
9073 ++++ b/drivers/usb/gadget/function/f_rndis.c
9074 +@@ -87,8 +87,10 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f)
9075 + /* peak (theoretical) bulk transfer rate in bits-per-second */
9076 + static unsigned int bitrate(struct usb_gadget *g)
9077 + {
9078 ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
9079 ++ return 4250000000U;
9080 + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
9081 +- return 13 * 1024 * 8 * 1000 * 8;
9082 ++ return 3750000000U;
9083 + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
9084 + return 13 * 512 * 8 * 1000 * 8;
9085 + else
9086 +diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
9087 +index fc125b3d06e7d..03122dc332eda 100644
9088 +--- a/drivers/usb/host/ehci-omap.c
9089 ++++ b/drivers/usb/host/ehci-omap.c
9090 +@@ -220,6 +220,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
9091 +
9092 + err_pm_runtime:
9093 + pm_runtime_put_sync(dev);
9094 ++ pm_runtime_disable(dev);
9095 +
9096 + err_phy:
9097 + for (i = 0; i < omap->nports; i++) {
9098 +diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
9099 +index 8819f502b6a68..903abdf30b5a0 100644
9100 +--- a/drivers/usb/host/max3421-hcd.c
9101 ++++ b/drivers/usb/host/max3421-hcd.c
9102 +@@ -1847,7 +1847,7 @@ max3421_probe(struct spi_device *spi)
9103 + struct max3421_hcd *max3421_hcd;
9104 + struct usb_hcd *hcd = NULL;
9105 + struct max3421_hcd_platform_data *pdata = NULL;
9106 +- int retval = -ENOMEM;
9107 ++ int retval;
9108 +
9109 + if (spi_setup(spi) < 0) {
9110 + dev_err(&spi->dev, "Unable to setup SPI bus");
9111 +@@ -1889,6 +1889,7 @@ max3421_probe(struct spi_device *spi)
9112 + goto error;
9113 + }
9114 +
9115 ++ retval = -ENOMEM;
9116 + hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
9117 + dev_name(&spi->dev));
9118 + if (!hcd) {
9119 +diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
9120 +index e67242e437edc..65985247fc00f 100644
9121 +--- a/drivers/usb/host/oxu210hp-hcd.c
9122 ++++ b/drivers/usb/host/oxu210hp-hcd.c
9123 +@@ -4149,8 +4149,10 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
9124 + oxu->is_otg = otg;
9125 +
9126 + ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
9127 +- if (ret < 0)
9128 ++ if (ret < 0) {
9129 ++ usb_put_hcd(hcd);
9130 + return ERR_PTR(ret);
9131 ++ }
9132 +
9133 + device_wakeup_enable(hcd->self.controller);
9134 + return hcd;
9135 +diff --git a/drivers/usb/mtu3/mtu3_debugfs.c b/drivers/usb/mtu3/mtu3_debugfs.c
9136 +index c96e5dab0a480..25b9635b60bb7 100644
9137 +--- a/drivers/usb/mtu3/mtu3_debugfs.c
9138 ++++ b/drivers/usb/mtu3/mtu3_debugfs.c
9139 +@@ -127,7 +127,7 @@ static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base,
9140 + struct debugfs_regset32 *regset;
9141 + struct mtu3_regset *mregs;
9142 +
9143 +- mregs = devm_kzalloc(mtu->dev, sizeof(*regset), GFP_KERNEL);
9144 ++ mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL);
9145 + if (!mregs)
9146 + return;
9147 +
9148 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
9149 +index 578ebdd865205..e0ef758eade50 100644
9150 +--- a/drivers/usb/serial/digi_acceleport.c
9151 ++++ b/drivers/usb/serial/digi_acceleport.c
9152 +@@ -19,7 +19,6 @@
9153 + #include <linux/tty_flip.h>
9154 + #include <linux/module.h>
9155 + #include <linux/spinlock.h>
9156 +-#include <linux/workqueue.h>
9157 + #include <linux/uaccess.h>
9158 + #include <linux/usb.h>
9159 + #include <linux/wait.h>
9160 +@@ -198,14 +197,12 @@ struct digi_port {
9161 + int dp_throttle_restart;
9162 + wait_queue_head_t dp_flush_wait;
9163 + wait_queue_head_t dp_close_wait; /* wait queue for close */
9164 +- struct work_struct dp_wakeup_work;
9165 + struct usb_serial_port *dp_port;
9166 + };
9167 +
9168 +
9169 + /* Local Function Declarations */
9170 +
9171 +-static void digi_wakeup_write_lock(struct work_struct *work);
9172 + static int digi_write_oob_command(struct usb_serial_port *port,
9173 + unsigned char *buf, int count, int interruptible);
9174 + static int digi_write_inb_command(struct usb_serial_port *port,
9175 +@@ -356,26 +353,6 @@ __releases(lock)
9176 + return timeout;
9177 + }
9178 +
9179 +-
9180 +-/*
9181 +- * Digi Wakeup Write
9182 +- *
9183 +- * Wake up port, line discipline, and tty processes sleeping
9184 +- * on writes.
9185 +- */
9186 +-
9187 +-static void digi_wakeup_write_lock(struct work_struct *work)
9188 +-{
9189 +- struct digi_port *priv =
9190 +- container_of(work, struct digi_port, dp_wakeup_work);
9191 +- struct usb_serial_port *port = priv->dp_port;
9192 +- unsigned long flags;
9193 +-
9194 +- spin_lock_irqsave(&priv->dp_port_lock, flags);
9195 +- tty_port_tty_wakeup(&port->port);
9196 +- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
9197 +-}
9198 +-
9199 + /*
9200 + * Digi Write OOB Command
9201 + *
9202 +@@ -986,6 +963,7 @@ static void digi_write_bulk_callback(struct urb *urb)
9203 + unsigned long flags;
9204 + int ret = 0;
9205 + int status = urb->status;
9206 ++ bool wakeup;
9207 +
9208 + /* port and serial sanity check */
9209 + if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
9210 +@@ -1012,6 +990,7 @@ static void digi_write_bulk_callback(struct urb *urb)
9211 + }
9212 +
9213 + /* try to send any buffered data on this port */
9214 ++ wakeup = true;
9215 + spin_lock_irqsave(&priv->dp_port_lock, flags);
9216 + priv->dp_write_urb_in_use = 0;
9217 + if (priv->dp_out_buf_len > 0) {
9218 +@@ -1027,19 +1006,18 @@ static void digi_write_bulk_callback(struct urb *urb)
9219 + if (ret == 0) {
9220 + priv->dp_write_urb_in_use = 1;
9221 + priv->dp_out_buf_len = 0;
9222 ++ wakeup = false;
9223 + }
9224 + }
9225 +- /* wake up processes sleeping on writes immediately */
9226 +- tty_port_tty_wakeup(&port->port);
9227 +- /* also queue up a wakeup at scheduler time, in case we */
9228 +- /* lost the race in write_chan(). */
9229 +- schedule_work(&priv->dp_wakeup_work);
9230 +-
9231 + spin_unlock_irqrestore(&priv->dp_port_lock, flags);
9232 ++
9233 + if (ret && ret != -EPERM)
9234 + dev_err_console(port,
9235 + "%s: usb_submit_urb failed, ret=%d, port=%d\n",
9236 + __func__, ret, priv->dp_port_num);
9237 ++
9238 ++ if (wakeup)
9239 ++ tty_port_tty_wakeup(&port->port);
9240 + }
9241 +
9242 + static int digi_write_room(struct tty_struct *tty)
9243 +@@ -1239,7 +1217,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
9244 + init_waitqueue_head(&priv->dp_transmit_idle_wait);
9245 + init_waitqueue_head(&priv->dp_flush_wait);
9246 + init_waitqueue_head(&priv->dp_close_wait);
9247 +- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
9248 + priv->dp_port = port;
9249 +
9250 + init_waitqueue_head(&port->write_wait);
9251 +@@ -1508,13 +1485,14 @@ static int digi_read_oob_callback(struct urb *urb)
9252 + rts = C_CRTSCTS(tty);
9253 +
9254 + if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
9255 ++ bool wakeup = false;
9256 ++
9257 + spin_lock_irqsave(&priv->dp_port_lock, flags);
9258 + /* convert from digi flags to termiox flags */
9259 + if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
9260 + priv->dp_modem_signals |= TIOCM_CTS;
9261 +- /* port must be open to use tty struct */
9262 + if (rts)
9263 +- tty_port_tty_wakeup(&port->port);
9264 ++ wakeup = true;
9265 + } else {
9266 + priv->dp_modem_signals &= ~TIOCM_CTS;
9267 + /* port must be open to use tty struct */
9268 +@@ -1533,6 +1511,9 @@ static int digi_read_oob_callback(struct urb *urb)
9269 + priv->dp_modem_signals &= ~TIOCM_CD;
9270 +
9271 + spin_unlock_irqrestore(&priv->dp_port_lock, flags);
9272 ++
9273 ++ if (wakeup)
9274 ++ tty_port_tty_wakeup(&port->port);
9275 + } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
9276 + spin_lock_irqsave(&priv->dp_port_lock, flags);
9277 + priv->dp_transmit_idle = 1;
9278 +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
9279 +index bf988f77d4006..fab6aa8a676aa 100644
9280 +--- a/drivers/usb/serial/keyspan_pda.c
9281 ++++ b/drivers/usb/serial/keyspan_pda.c
9282 +@@ -40,11 +40,12 @@
9283 + #define DRIVER_AUTHOR "Brian Warner <warner@××××××.com>"
9284 + #define DRIVER_DESC "USB Keyspan PDA Converter driver"
9285 +
9286 ++#define KEYSPAN_TX_THRESHOLD 16
9287 ++
9288 + struct keyspan_pda_private {
9289 + int tx_room;
9290 + int tx_throttled;
9291 +- struct work_struct wakeup_work;
9292 +- struct work_struct unthrottle_work;
9293 ++ struct work_struct unthrottle_work;
9294 + struct usb_serial *serial;
9295 + struct usb_serial_port *port;
9296 + };
9297 +@@ -97,15 +98,6 @@ static const struct usb_device_id id_table_fake_xircom[] = {
9298 + };
9299 + #endif
9300 +
9301 +-static void keyspan_pda_wakeup_write(struct work_struct *work)
9302 +-{
9303 +- struct keyspan_pda_private *priv =
9304 +- container_of(work, struct keyspan_pda_private, wakeup_work);
9305 +- struct usb_serial_port *port = priv->port;
9306 +-
9307 +- tty_port_tty_wakeup(&port->port);
9308 +-}
9309 +-
9310 + static void keyspan_pda_request_unthrottle(struct work_struct *work)
9311 + {
9312 + struct keyspan_pda_private *priv =
9313 +@@ -120,7 +112,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work)
9314 + 7, /* request_unthrottle */
9315 + USB_TYPE_VENDOR | USB_RECIP_INTERFACE
9316 + | USB_DIR_OUT,
9317 +- 16, /* value: threshold */
9318 ++ KEYSPAN_TX_THRESHOLD,
9319 + 0, /* index */
9320 + NULL,
9321 + 0,
9322 +@@ -139,6 +131,8 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
9323 + int retval;
9324 + int status = urb->status;
9325 + struct keyspan_pda_private *priv;
9326 ++ unsigned long flags;
9327 ++
9328 + priv = usb_get_serial_port_data(port);
9329 +
9330 + switch (status) {
9331 +@@ -172,18 +166,21 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
9332 + break;
9333 + case 1:
9334 + /* status interrupt */
9335 +- if (len < 3) {
9336 ++ if (len < 2) {
9337 + dev_warn(&port->dev, "short interrupt message received\n");
9338 + break;
9339 + }
9340 +- dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]);
9341 ++ dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]);
9342 + switch (data[1]) {
9343 + case 1: /* modemline change */
9344 + break;
9345 + case 2: /* tx unthrottle interrupt */
9346 ++ spin_lock_irqsave(&port->lock, flags);
9347 + priv->tx_throttled = 0;
9348 ++ priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD);
9349 ++ spin_unlock_irqrestore(&port->lock, flags);
9350 + /* queue up a wakeup at scheduler time */
9351 +- schedule_work(&priv->wakeup_work);
9352 ++ usb_serial_port_softint(port);
9353 + break;
9354 + default:
9355 + break;
9356 +@@ -443,6 +440,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
9357 + int request_unthrottle = 0;
9358 + int rc = 0;
9359 + struct keyspan_pda_private *priv;
9360 ++ unsigned long flags;
9361 +
9362 + priv = usb_get_serial_port_data(port);
9363 + /* guess how much room is left in the device's ring buffer, and if we
9364 +@@ -462,13 +460,13 @@ static int keyspan_pda_write(struct tty_struct *tty,
9365 + the TX urb is in-flight (wait until it completes)
9366 + the device is full (wait until it says there is room)
9367 + */
9368 +- spin_lock_bh(&port->lock);
9369 ++ spin_lock_irqsave(&port->lock, flags);
9370 + if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) {
9371 +- spin_unlock_bh(&port->lock);
9372 ++ spin_unlock_irqrestore(&port->lock, flags);
9373 + return 0;
9374 + }
9375 + clear_bit(0, &port->write_urbs_free);
9376 +- spin_unlock_bh(&port->lock);
9377 ++ spin_unlock_irqrestore(&port->lock, flags);
9378 +
9379 + /* At this point the URB is in our control, nobody else can submit it
9380 + again (the only sudden transition was the one from EINPROGRESS to
9381 +@@ -514,7 +512,8 @@ static int keyspan_pda_write(struct tty_struct *tty,
9382 + goto exit;
9383 + }
9384 + }
9385 +- if (count > priv->tx_room) {
9386 ++
9387 ++ if (count >= priv->tx_room) {
9388 + /* we're about to completely fill the Tx buffer, so
9389 + we'll be throttled afterwards. */
9390 + count = priv->tx_room;
9391 +@@ -547,7 +546,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
9392 +
9393 + rc = count;
9394 + exit:
9395 +- if (rc < 0)
9396 ++ if (rc <= 0)
9397 + set_bit(0, &port->write_urbs_free);
9398 + return rc;
9399 + }
9400 +@@ -562,21 +561,24 @@ static void keyspan_pda_write_bulk_callback(struct urb *urb)
9401 + priv = usb_get_serial_port_data(port);
9402 +
9403 + /* queue up a wakeup at scheduler time */
9404 +- schedule_work(&priv->wakeup_work);
9405 ++ usb_serial_port_softint(port);
9406 + }
9407 +
9408 +
9409 + static int keyspan_pda_write_room(struct tty_struct *tty)
9410 + {
9411 + struct usb_serial_port *port = tty->driver_data;
9412 +- struct keyspan_pda_private *priv;
9413 +- priv = usb_get_serial_port_data(port);
9414 +- /* used by n_tty.c for processing of tabs and such. Giving it our
9415 +- conservative guess is probably good enough, but needs testing by
9416 +- running a console through the device. */
9417 +- return priv->tx_room;
9418 +-}
9419 ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
9420 ++ unsigned long flags;
9421 ++ int room = 0;
9422 +
9423 ++ spin_lock_irqsave(&port->lock, flags);
9424 ++ if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled)
9425 ++ room = priv->tx_room;
9426 ++ spin_unlock_irqrestore(&port->lock, flags);
9427 ++
9428 ++ return room;
9429 ++}
9430 +
9431 + static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
9432 + {
9433 +@@ -656,8 +658,12 @@ error:
9434 + }
9435 + static void keyspan_pda_close(struct usb_serial_port *port)
9436 + {
9437 ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
9438 ++
9439 + usb_kill_urb(port->write_urb);
9440 + usb_kill_urb(port->interrupt_in_urb);
9441 ++
9442 ++ cancel_work_sync(&priv->unthrottle_work);
9443 + }
9444 +
9445 +
9446 +@@ -715,7 +721,6 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port)
9447 + if (!priv)
9448 + return -ENOMEM;
9449 +
9450 +- INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
9451 + INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
9452 + priv->serial = port->serial;
9453 + priv->port = port;
9454 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
9455 +index 2ec4eeacebc76..55b2879f27bdc 100644
9456 +--- a/drivers/usb/serial/mos7720.c
9457 ++++ b/drivers/usb/serial/mos7720.c
9458 +@@ -638,6 +638,8 @@ static void parport_mos7715_restore_state(struct parport *pp,
9459 + spin_unlock(&release_lock);
9460 + return;
9461 + }
9462 ++ mos_parport->shadowDCR = s->u.pc.ctr;
9463 ++ mos_parport->shadowECR = s->u.pc.ecr;
9464 + write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
9465 + mos_parport->shadowDCR);
9466 + write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
9467 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
9468 +index c2ef238653002..531744049e7f0 100644
9469 +--- a/drivers/usb/serial/option.c
9470 ++++ b/drivers/usb/serial/option.c
9471 +@@ -563,6 +563,9 @@ static void option_instat_callback(struct urb *urb);
9472 +
9473 + /* Device flags */
9474 +
9475 ++/* Highest interface number which can be used with NCTRL() and RSVD() */
9476 ++#define FLAG_IFNUM_MAX 7
9477 ++
9478 + /* Interface does not support modem-control requests */
9479 + #define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
9480 +
9481 +@@ -2101,6 +2104,14 @@ static struct usb_serial_driver * const serial_drivers[] = {
9482 +
9483 + module_usb_serial_driver(serial_drivers, option_ids);
9484 +
9485 ++static bool iface_is_reserved(unsigned long device_flags, u8 ifnum)
9486 ++{
9487 ++ if (ifnum > FLAG_IFNUM_MAX)
9488 ++ return false;
9489 ++
9490 ++ return device_flags & RSVD(ifnum);
9491 ++}
9492 ++
9493 + static int option_probe(struct usb_serial *serial,
9494 + const struct usb_device_id *id)
9495 + {
9496 +@@ -2117,7 +2128,7 @@ static int option_probe(struct usb_serial *serial,
9497 + * the same class/subclass/protocol as the serial interfaces. Look at
9498 + * the Windows driver .INF files for reserved interface numbers.
9499 + */
9500 +- if (device_flags & RSVD(iface_desc->bInterfaceNumber))
9501 ++ if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber))
9502 + return -ENODEV;
9503 +
9504 + /*
9505 +@@ -2133,6 +2144,14 @@ static int option_probe(struct usb_serial *serial,
9506 + return 0;
9507 + }
9508 +
9509 ++static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum)
9510 ++{
9511 ++ if (ifnum > FLAG_IFNUM_MAX)
9512 ++ return false;
9513 ++
9514 ++ return device_flags & NCTRL(ifnum);
9515 ++}
9516 ++
9517 + static int option_attach(struct usb_serial *serial)
9518 + {
9519 + struct usb_interface_descriptor *iface_desc;
9520 +@@ -2148,7 +2167,7 @@ static int option_attach(struct usb_serial *serial)
9521 +
9522 + iface_desc = &serial->interface->cur_altsetting->desc;
9523 +
9524 +- if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
9525 ++ if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber))
9526 + data->use_send_setup = 1;
9527 +
9528 + if (device_flags & ZLP)
9529 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
9530 +index 443a35dde7f52..632653cd70e3b 100644
9531 +--- a/drivers/vfio/pci/vfio_pci.c
9532 ++++ b/drivers/vfio/pci/vfio_pci.c
9533 +@@ -1451,8 +1451,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
9534 +
9535 + mutex_unlock(&vdev->vma_lock);
9536 +
9537 +- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
9538 +- vma->vm_end - vma->vm_start, vma->vm_page_prot))
9539 ++ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
9540 ++ vma->vm_end - vma->vm_start, vma->vm_page_prot))
9541 + ret = VM_FAULT_SIGBUS;
9542 +
9543 + up_out:
9544 +diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
9545 +index 3f5f8198a6bb1..08f17839c2fe2 100644
9546 +--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
9547 ++++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
9548 +@@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
9549 + return -EINVAL;
9550 +
9551 + if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
9552 +- return -EINVAL;
9553 ++ return -ENODEV;
9554 +
9555 + mem_node = of_find_node_by_phandle(mem_phandle);
9556 + if (!mem_node)
9557 +@@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
9558 + int ret;
9559 + struct vfio_pci_npu2_data *data;
9560 + struct device_node *nvlink_dn;
9561 +- u32 nvlink_index = 0;
9562 ++ u32 nvlink_index = 0, mem_phandle = 0;
9563 + struct pci_dev *npdev = vdev->pdev;
9564 + struct device_node *npu_node = pci_device_to_OF_node(npdev);
9565 + struct pci_controller *hose = pci_bus_to_host(npdev->bus);
9566 +@@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
9567 + if (!pnv_pci_get_gpu_dev(vdev->pdev))
9568 + return -ENODEV;
9569 +
9570 ++ if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
9571 ++ return -ENODEV;
9572 ++
9573 + /*
9574 + * NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
9575 + * so we can allocate one register per link, using nvlink index as
9576 +diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
9577 +index 5ff8e0320d95b..cf2bfff2efbf1 100644
9578 +--- a/drivers/video/fbdev/atmel_lcdfb.c
9579 ++++ b/drivers/video/fbdev/atmel_lcdfb.c
9580 +@@ -987,8 +987,8 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
9581 + }
9582 +
9583 + INIT_LIST_HEAD(&pdata->pwr_gpios);
9584 +- ret = -ENOMEM;
9585 + for (i = 0; i < gpiod_count(dev, "atmel,power-control"); i++) {
9586 ++ ret = -ENOMEM;
9587 + gpiod = devm_gpiod_get_index(dev, "atmel,power-control",
9588 + i, GPIOD_ASIS);
9589 + if (IS_ERR(gpiod))
9590 +diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
9591 +index 4f7c73e6052f6..97e8a195e18f5 100644
9592 +--- a/drivers/virtio/virtio_ring.c
9593 ++++ b/drivers/virtio/virtio_ring.c
9594 +@@ -1608,7 +1608,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
9595 + vq->num_added = 0;
9596 + vq->packed_ring = true;
9597 + vq->use_dma_api = vring_use_dma_api(vdev);
9598 +- list_add_tail(&vq->vq.list, &vdev->vqs);
9599 + #ifdef DEBUG
9600 + vq->in_use = false;
9601 + vq->last_add_time_valid = false;
9602 +@@ -1669,6 +1668,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
9603 + cpu_to_le16(vq->packed.event_flags_shadow);
9604 + }
9605 +
9606 ++ list_add_tail(&vq->vq.list, &vdev->vqs);
9607 + return &vq->vq;
9608 +
9609 + err_desc_extra:
9610 +@@ -1676,9 +1676,9 @@ err_desc_extra:
9611 + err_desc_state:
9612 + kfree(vq);
9613 + err_vq:
9614 +- vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
9615 ++ vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
9616 + err_device:
9617 +- vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
9618 ++ vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
9619 + err_driver:
9620 + vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
9621 + err_ring:
9622 +@@ -2085,7 +2085,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
9623 + vq->last_used_idx = 0;
9624 + vq->num_added = 0;
9625 + vq->use_dma_api = vring_use_dma_api(vdev);
9626 +- list_add_tail(&vq->vq.list, &vdev->vqs);
9627 + #ifdef DEBUG
9628 + vq->in_use = false;
9629 + vq->last_add_time_valid = false;
9630 +@@ -2127,6 +2126,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
9631 + memset(vq->split.desc_state, 0, vring.num *
9632 + sizeof(struct vring_desc_state_split));
9633 +
9634 ++ list_add_tail(&vq->vq.list, &vdev->vqs);
9635 + return &vq->vq;
9636 + }
9637 + EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
9638 +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
9639 +index e2745f6861960..1aa42e879e633 100644
9640 +--- a/drivers/watchdog/Kconfig
9641 ++++ b/drivers/watchdog/Kconfig
9642 +@@ -374,6 +374,7 @@ config ARM_SBSA_WATCHDOG
9643 + config ARMADA_37XX_WATCHDOG
9644 + tristate "Armada 37xx watchdog"
9645 + depends on ARCH_MVEBU || COMPILE_TEST
9646 ++ depends on HAS_IOMEM
9647 + select MFD_SYSCON
9648 + select WATCHDOG_CORE
9649 + help
9650 +@@ -617,7 +618,7 @@ config SUNXI_WATCHDOG
9651 +
9652 + config COH901327_WATCHDOG
9653 + bool "ST-Ericsson COH 901 327 watchdog"
9654 +- depends on ARCH_U300 || (ARM && COMPILE_TEST)
9655 ++ depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST)
9656 + default y if MACH_U300
9657 + select WATCHDOG_CORE
9658 + help
9659 +@@ -774,6 +775,7 @@ config MOXART_WDT
9660 +
9661 + config SIRFSOC_WATCHDOG
9662 + tristate "SiRFSOC watchdog"
9663 ++ depends on HAS_IOMEM
9664 + depends on ARCH_SIRF || COMPILE_TEST
9665 + select WATCHDOG_CORE
9666 + default y
9667 +diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
9668 +index eb47fe5ed2805..ea8a6abd64ecb 100644
9669 +--- a/drivers/watchdog/qcom-wdt.c
9670 ++++ b/drivers/watchdog/qcom-wdt.c
9671 +@@ -143,7 +143,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
9672 + */
9673 + wmb();
9674 +
9675 +- msleep(150);
9676 ++ mdelay(150);
9677 + return 0;
9678 + }
9679 +
9680 +diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
9681 +index 65cb55f3916fc..b9b1daa9e2a4c 100644
9682 +--- a/drivers/watchdog/sprd_wdt.c
9683 ++++ b/drivers/watchdog/sprd_wdt.c
9684 +@@ -108,18 +108,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
9685 + u32 tmr_step = timeout * SPRD_WDT_CNT_STEP;
9686 + u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP;
9687 +
9688 +- sprd_wdt_unlock(wdt->base);
9689 +- writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
9690 +- SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
9691 +- writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK),
9692 +- wdt->base + SPRD_WDT_LOAD_LOW);
9693 +- writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
9694 +- SPRD_WDT_LOW_VALUE_MASK,
9695 +- wdt->base + SPRD_WDT_IRQ_LOAD_HIGH);
9696 +- writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK,
9697 +- wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
9698 +- sprd_wdt_lock(wdt->base);
9699 +-
9700 + /*
9701 + * Waiting the load value operation done,
9702 + * it needs two or three RTC clock cycles.
9703 +@@ -134,6 +122,19 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
9704 +
9705 + if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
9706 + return -EBUSY;
9707 ++
9708 ++ sprd_wdt_unlock(wdt->base);
9709 ++ writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
9710 ++ SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
9711 ++ writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK),
9712 ++ wdt->base + SPRD_WDT_LOAD_LOW);
9713 ++ writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
9714 ++ SPRD_WDT_LOW_VALUE_MASK,
9715 ++ wdt->base + SPRD_WDT_IRQ_LOAD_HIGH);
9716 ++ writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK,
9717 ++ wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
9718 ++ sprd_wdt_lock(wdt->base);
9719 ++
9720 + return 0;
9721 + }
9722 +
9723 +@@ -345,15 +346,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
9724 + if (ret)
9725 + return ret;
9726 +
9727 +- if (watchdog_active(&wdt->wdd)) {
9728 ++ if (watchdog_active(&wdt->wdd))
9729 + ret = sprd_wdt_start(&wdt->wdd);
9730 +- if (ret) {
9731 +- sprd_wdt_disable(wdt);
9732 +- return ret;
9733 +- }
9734 +- }
9735 +
9736 +- return 0;
9737 ++ return ret;
9738 + }
9739 +
9740 + static const struct dev_pm_ops sprd_wdt_pm_ops = {
9741 +diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
9742 +index 861daf4f37b28..faa46a666f4c5 100644
9743 +--- a/drivers/watchdog/watchdog_core.c
9744 ++++ b/drivers/watchdog/watchdog_core.c
9745 +@@ -255,15 +255,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
9746 + }
9747 +
9748 + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
9749 +- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
9750 +-
9751 +- ret = register_reboot_notifier(&wdd->reboot_nb);
9752 +- if (ret) {
9753 +- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
9754 +- wdd->id, ret);
9755 +- watchdog_dev_unregister(wdd);
9756 +- ida_simple_remove(&watchdog_ida, id);
9757 +- return ret;
9758 ++ if (!wdd->ops->stop)
9759 ++ pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
9760 ++ else {
9761 ++ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
9762 ++
9763 ++ ret = register_reboot_notifier(&wdd->reboot_nb);
9764 ++ if (ret) {
9765 ++ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
9766 ++ wdd->id, ret);
9767 ++ watchdog_dev_unregister(wdd);
9768 ++ ida_simple_remove(&watchdog_ida, id);
9769 ++ return ret;
9770 ++ }
9771 + }
9772 + }
9773 +
9774 +diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
9775 +index e7a6702359655..7fb65836230ac 100644
9776 +--- a/drivers/xen/xen-pciback/xenbus.c
9777 ++++ b/drivers/xen/xen-pciback/xenbus.c
9778 +@@ -688,7 +688,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
9779 +
9780 + /* watch the backend node for backend configuration information */
9781 + err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
9782 +- xen_pcibk_be_watch);
9783 ++ NULL, xen_pcibk_be_watch);
9784 + if (err)
9785 + goto out;
9786 +
9787 +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
9788 +index d75a2385b37c7..88516a8a9f932 100644
9789 +--- a/drivers/xen/xenbus/xenbus.h
9790 ++++ b/drivers/xen/xenbus/xenbus.h
9791 +@@ -44,6 +44,8 @@ struct xen_bus_type {
9792 + int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
9793 + int (*probe)(struct xen_bus_type *bus, const char *type,
9794 + const char *dir);
9795 ++ bool (*otherend_will_handle)(struct xenbus_watch *watch,
9796 ++ const char *path, const char *token);
9797 + void (*otherend_changed)(struct xenbus_watch *watch, const char *path,
9798 + const char *token);
9799 + struct bus_type bus;
9800 +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
9801 +index f38bdaea0ef11..81eddb8529ffc 100644
9802 +--- a/drivers/xen/xenbus/xenbus_client.c
9803 ++++ b/drivers/xen/xenbus/xenbus_client.c
9804 +@@ -114,18 +114,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
9805 + */
9806 + int xenbus_watch_path(struct xenbus_device *dev, const char *path,
9807 + struct xenbus_watch *watch,
9808 ++ bool (*will_handle)(struct xenbus_watch *,
9809 ++ const char *, const char *),
9810 + void (*callback)(struct xenbus_watch *,
9811 + const char *, const char *))
9812 + {
9813 + int err;
9814 +
9815 + watch->node = path;
9816 ++ watch->will_handle = will_handle;
9817 + watch->callback = callback;
9818 +
9819 + err = register_xenbus_watch(watch);
9820 +
9821 + if (err) {
9822 + watch->node = NULL;
9823 ++ watch->will_handle = NULL;
9824 + watch->callback = NULL;
9825 + xenbus_dev_fatal(dev, err, "adding watch on %s", path);
9826 + }
9827 +@@ -152,6 +156,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
9828 + */
9829 + int xenbus_watch_pathfmt(struct xenbus_device *dev,
9830 + struct xenbus_watch *watch,
9831 ++ bool (*will_handle)(struct xenbus_watch *,
9832 ++ const char *, const char *),
9833 + void (*callback)(struct xenbus_watch *,
9834 + const char *, const char *),
9835 + const char *pathfmt, ...)
9836 +@@ -168,7 +174,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
9837 + xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
9838 + return -ENOMEM;
9839 + }
9840 +- err = xenbus_watch_path(dev, path, watch, callback);
9841 ++ err = xenbus_watch_path(dev, path, watch, will_handle, callback);
9842 +
9843 + if (err)
9844 + kfree(path);
9845 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
9846 +index 5b471889d7237..e6d0903459e11 100644
9847 +--- a/drivers/xen/xenbus/xenbus_probe.c
9848 ++++ b/drivers/xen/xenbus/xenbus_probe.c
9849 +@@ -136,6 +136,7 @@ static int watch_otherend(struct xenbus_device *dev)
9850 + container_of(dev->dev.bus, struct xen_bus_type, bus);
9851 +
9852 + return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
9853 ++ bus->otherend_will_handle,
9854 + bus->otherend_changed,
9855 + "%s/%s", dev->otherend, "state");
9856 + }
9857 +diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
9858 +index b0bed4faf44cc..4bb603051d5b6 100644
9859 +--- a/drivers/xen/xenbus/xenbus_probe_backend.c
9860 ++++ b/drivers/xen/xenbus/xenbus_probe_backend.c
9861 +@@ -180,6 +180,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
9862 + return err;
9863 + }
9864 +
9865 ++static bool frontend_will_handle(struct xenbus_watch *watch,
9866 ++ const char *path, const char *token)
9867 ++{
9868 ++ return watch->nr_pending == 0;
9869 ++}
9870 ++
9871 + static void frontend_changed(struct xenbus_watch *watch,
9872 + const char *path, const char *token)
9873 + {
9874 +@@ -191,6 +197,7 @@ static struct xen_bus_type xenbus_backend = {
9875 + .levels = 3, /* backend/type/<frontend>/<id> */
9876 + .get_bus_id = backend_bus_id,
9877 + .probe = xenbus_probe_backend,
9878 ++ .otherend_will_handle = frontend_will_handle,
9879 + .otherend_changed = frontend_changed,
9880 + .bus = {
9881 + .name = "xen-backend",
9882 +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
9883 +index 3a06eb699f333..12e02eb01f599 100644
9884 +--- a/drivers/xen/xenbus/xenbus_xs.c
9885 ++++ b/drivers/xen/xenbus/xenbus_xs.c
9886 +@@ -705,9 +705,13 @@ int xs_watch_msg(struct xs_watch_event *event)
9887 +
9888 + spin_lock(&watches_lock);
9889 + event->handle = find_watch(event->token);
9890 +- if (event->handle != NULL) {
9891 ++ if (event->handle != NULL &&
9892 ++ (!event->handle->will_handle ||
9893 ++ event->handle->will_handle(event->handle,
9894 ++ event->path, event->token))) {
9895 + spin_lock(&watch_events_lock);
9896 + list_add_tail(&event->list, &watch_events);
9897 ++ event->handle->nr_pending++;
9898 + wake_up(&watch_events_waitq);
9899 + spin_unlock(&watch_events_lock);
9900 + } else
9901 +@@ -765,6 +769,8 @@ int register_xenbus_watch(struct xenbus_watch *watch)
9902 +
9903 + sprintf(token, "%lX", (long)watch);
9904 +
9905 ++ watch->nr_pending = 0;
9906 ++
9907 + down_read(&xs_watch_rwsem);
9908 +
9909 + spin_lock(&watches_lock);
9910 +@@ -814,11 +820,14 @@ void unregister_xenbus_watch(struct xenbus_watch *watch)
9911 +
9912 + /* Cancel pending watch events. */
9913 + spin_lock(&watch_events_lock);
9914 +- list_for_each_entry_safe(event, tmp, &watch_events, list) {
9915 +- if (event->handle != watch)
9916 +- continue;
9917 +- list_del(&event->list);
9918 +- kfree(event);
9919 ++ if (watch->nr_pending) {
9920 ++ list_for_each_entry_safe(event, tmp, &watch_events, list) {
9921 ++ if (event->handle != watch)
9922 ++ continue;
9923 ++ list_del(&event->list);
9924 ++ kfree(event);
9925 ++ }
9926 ++ watch->nr_pending = 0;
9927 + }
9928 + spin_unlock(&watch_events_lock);
9929 +
9930 +@@ -865,7 +874,6 @@ void xs_suspend_cancel(void)
9931 +
9932 + static int xenwatch_thread(void *unused)
9933 + {
9934 +- struct list_head *ent;
9935 + struct xs_watch_event *event;
9936 +
9937 + xenwatch_pid = current->pid;
9938 +@@ -880,13 +888,15 @@ static int xenwatch_thread(void *unused)
9939 + mutex_lock(&xenwatch_mutex);
9940 +
9941 + spin_lock(&watch_events_lock);
9942 +- ent = watch_events.next;
9943 +- if (ent != &watch_events)
9944 +- list_del(ent);
9945 ++ event = list_first_entry_or_null(&watch_events,
9946 ++ struct xs_watch_event, list);
9947 ++ if (event) {
9948 ++ list_del(&event->list);
9949 ++ event->handle->nr_pending--;
9950 ++ }
9951 + spin_unlock(&watch_events_lock);
9952 +
9953 +- if (ent != &watch_events) {
9954 +- event = list_entry(ent, struct xs_watch_event, list);
9955 ++ if (event) {
9956 + event->handle->callback(event->handle, event->path,
9957 + event->token);
9958 + kfree(event);
9959 +diff --git a/fs/afs/super.c b/fs/afs/super.c
9960 +index 7f8a9b3137bff..eb04dcc543289 100644
9961 +--- a/fs/afs/super.c
9962 ++++ b/fs/afs/super.c
9963 +@@ -236,6 +236,9 @@ static int afs_parse_source(struct fs_context *fc, struct fs_parameter *param)
9964 +
9965 + _enter(",%s", name);
9966 +
9967 ++ if (fc->source)
9968 ++ return invalf(fc, "kAFS: Multiple sources not supported");
9969 ++
9970 + if (!name) {
9971 + printk(KERN_ERR "kAFS: no volume name specified\n");
9972 + return -EINVAL;
9973 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
9974 +index c6d9e8c07c236..c0dd839e99b79 100644
9975 +--- a/fs/btrfs/extent-tree.c
9976 ++++ b/fs/btrfs/extent-tree.c
9977 +@@ -32,6 +32,7 @@
9978 + #include "block-rsv.h"
9979 + #include "delalloc-space.h"
9980 + #include "block-group.h"
9981 ++#include "rcu-string.h"
9982 +
9983 + #undef SCRAMBLE_DELAYED_REFS
9984 +
9985 +@@ -2838,10 +2839,10 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
9986 + len = cache->key.objectid + cache->key.offset - start;
9987 + len = min(len, end + 1 - start);
9988 +
9989 +- if (start < cache->last_byte_to_unpin) {
9990 +- len = min(len, cache->last_byte_to_unpin - start);
9991 +- if (return_free_space)
9992 +- btrfs_add_free_space(cache, start, len);
9993 ++ if (start < cache->last_byte_to_unpin && return_free_space) {
9994 ++ u64 add_len = min(len, cache->last_byte_to_unpin - start);
9995 ++
9996 ++ btrfs_add_free_space(cache, start, add_len);
9997 + }
9998 +
9999 + start += len;
10000 +@@ -5618,6 +5619,19 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed)
10001 + &start, &end,
10002 + CHUNK_TRIMMED | CHUNK_ALLOCATED);
10003 +
10004 ++ /* Check if there are any CHUNK_* bits left */
10005 ++ if (start > device->total_bytes) {
10006 ++ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
10007 ++ btrfs_warn_in_rcu(fs_info,
10008 ++"ignoring attempt to trim beyond device size: offset %llu length %llu device %s device size %llu",
10009 ++ start, end - start + 1,
10010 ++ rcu_str_deref(device->name),
10011 ++ device->total_bytes);
10012 ++ mutex_unlock(&fs_info->chunk_mutex);
10013 ++ ret = 0;
10014 ++ break;
10015 ++ }
10016 ++
10017 + /* Ensure we skip the reserved area in the first 1M */
10018 + start = max_t(u64, start, SZ_1M);
10019 +
10020 +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
10021 +index bc858c8cef0a6..fcf1807cc8dd7 100644
10022 +--- a/fs/btrfs/extent_io.h
10023 ++++ b/fs/btrfs/extent_io.h
10024 +@@ -35,6 +35,8 @@
10025 + */
10026 + #define CHUNK_ALLOCATED EXTENT_DIRTY
10027 + #define CHUNK_TRIMMED EXTENT_DEFRAG
10028 ++#define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \
10029 ++ CHUNK_TRIMMED)
10030 +
10031 + /*
10032 + * flags for bio submission. The high bits indicate the compression
10033 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
10034 +index 457f8f858a3f0..67ffbe92944c6 100644
10035 +--- a/fs/btrfs/volumes.c
10036 ++++ b/fs/btrfs/volumes.c
10037 +@@ -4908,6 +4908,10 @@ again:
10038 + }
10039 +
10040 + mutex_lock(&fs_info->chunk_mutex);
10041 ++ /* Clear all state bits beyond the shrunk device size */
10042 ++ clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
10043 ++ CHUNK_STATE_MASK);
10044 ++
10045 + btrfs_device_set_disk_total_bytes(device, new_size);
10046 + if (list_empty(&device->post_commit_list))
10047 + list_add_tail(&device->post_commit_list,
10048 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
10049 +index af563d73d252c..22833fa5bb589 100644
10050 +--- a/fs/ceph/caps.c
10051 ++++ b/fs/ceph/caps.c
10052 +@@ -1052,12 +1052,19 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
10053 + {
10054 + struct ceph_mds_session *session = cap->session;
10055 + struct ceph_inode_info *ci = cap->ci;
10056 +- struct ceph_mds_client *mdsc =
10057 +- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
10058 ++ struct ceph_mds_client *mdsc;
10059 + int removed = 0;
10060 +
10061 ++ /* 'ci' being NULL means the remove have already occurred */
10062 ++ if (!ci) {
10063 ++ dout("%s: cap inode is NULL\n", __func__);
10064 ++ return;
10065 ++ }
10066 ++
10067 + dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
10068 +
10069 ++ mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
10070 ++
10071 + /* remove from inode's cap rbtree, and clear auth cap */
10072 + rb_erase(&cap->ci_node, &ci->i_caps);
10073 + if (ci->i_auth_cap == cap)
10074 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
10075 +index 6211f8b731a97..30025cc5d4ae7 100644
10076 +--- a/fs/cifs/smb2ops.c
10077 ++++ b/fs/cifs/smb2ops.c
10078 +@@ -478,7 +478,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
10079 + goto out;
10080 + }
10081 +
10082 +- if (bytes_left || p->Next)
10083 ++ /* Azure rounds the buffer size up 8, to a 16 byte boundary */
10084 ++ if ((bytes_left > 8) || p->Next)
10085 + cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
10086 +
10087 +
10088 +diff --git a/fs/erofs/data.c b/fs/erofs/data.c
10089 +index fc3a8d8064f84..b22a08ac53a23 100644
10090 +--- a/fs/erofs/data.c
10091 ++++ b/fs/erofs/data.c
10092 +@@ -323,27 +323,12 @@ static int erofs_raw_access_readpages(struct file *filp,
10093 + return 0;
10094 + }
10095 +
10096 +-static int erofs_get_block(struct inode *inode, sector_t iblock,
10097 +- struct buffer_head *bh, int create)
10098 +-{
10099 +- struct erofs_map_blocks map = {
10100 +- .m_la = iblock << 9,
10101 +- };
10102 +- int err;
10103 +-
10104 +- err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
10105 +- if (err)
10106 +- return err;
10107 +-
10108 +- if (map.m_flags & EROFS_MAP_MAPPED)
10109 +- bh->b_blocknr = erofs_blknr(map.m_pa);
10110 +-
10111 +- return err;
10112 +-}
10113 +-
10114 + static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
10115 + {
10116 + struct inode *inode = mapping->host;
10117 ++ struct erofs_map_blocks map = {
10118 ++ .m_la = blknr_to_addr(block),
10119 ++ };
10120 +
10121 + if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
10122 + erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
10123 +@@ -352,7 +337,10 @@ static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
10124 + return 0;
10125 + }
10126 +
10127 +- return generic_block_bmap(mapping, block, erofs_get_block);
10128 ++ if (!erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW))
10129 ++ return erofs_blknr(map.m_pa);
10130 ++
10131 ++ return 0;
10132 + }
10133 +
10134 + /* for uncompressed (aligned) files and raw access for other files */
10135 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
10136 +index cbd028a31daff..3bac525f0439d 100644
10137 +--- a/fs/ext4/inode.c
10138 ++++ b/fs/ext4/inode.c
10139 +@@ -203,6 +203,7 @@ void ext4_evict_inode(struct inode *inode)
10140 + */
10141 + int extra_credits = 6;
10142 + struct ext4_xattr_inode_array *ea_inode_array = NULL;
10143 ++ bool freeze_protected = false;
10144 +
10145 + trace_ext4_evict_inode(inode);
10146 +
10147 +@@ -250,9 +251,14 @@ void ext4_evict_inode(struct inode *inode)
10148 +
10149 + /*
10150 + * Protect us against freezing - iput() caller didn't have to have any
10151 +- * protection against it
10152 ++ * protection against it. When we are in a running transaction though,
10153 ++ * we are already protected against freezing and we cannot grab further
10154 ++ * protection due to lock ordering constraints.
10155 + */
10156 +- sb_start_intwrite(inode->i_sb);
10157 ++ if (!ext4_journal_current_handle()) {
10158 ++ sb_start_intwrite(inode->i_sb);
10159 ++ freeze_protected = true;
10160 ++ }
10161 +
10162 + if (!IS_NOQUOTA(inode))
10163 + extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
10164 +@@ -271,7 +277,8 @@ void ext4_evict_inode(struct inode *inode)
10165 + * cleaned up.
10166 + */
10167 + ext4_orphan_del(NULL, inode);
10168 +- sb_end_intwrite(inode->i_sb);
10169 ++ if (freeze_protected)
10170 ++ sb_end_intwrite(inode->i_sb);
10171 + goto no_delete;
10172 + }
10173 +
10174 +@@ -312,7 +319,8 @@ void ext4_evict_inode(struct inode *inode)
10175 + stop_handle:
10176 + ext4_journal_stop(handle);
10177 + ext4_orphan_del(NULL, inode);
10178 +- sb_end_intwrite(inode->i_sb);
10179 ++ if (freeze_protected)
10180 ++ sb_end_intwrite(inode->i_sb);
10181 + ext4_xattr_inode_array_free(ea_inode_array);
10182 + goto no_delete;
10183 + }
10184 +@@ -341,7 +349,8 @@ stop_handle:
10185 + else
10186 + ext4_free_inode(handle, inode);
10187 + ext4_journal_stop(handle);
10188 +- sb_end_intwrite(inode->i_sb);
10189 ++ if (freeze_protected)
10190 ++ sb_end_intwrite(inode->i_sb);
10191 + ext4_xattr_inode_array_free(ea_inode_array);
10192 + return;
10193 + no_delete:
10194 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
10195 +index e5d43d2ee474d..cd69510f29472 100644
10196 +--- a/fs/ext4/mballoc.c
10197 ++++ b/fs/ext4/mballoc.c
10198 +@@ -4691,6 +4691,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
10199 + ext4_group_first_block_no(sb, group) +
10200 + EXT4_C2B(sbi, cluster),
10201 + "Block already on to-be-freed list");
10202 ++ kmem_cache_free(ext4_free_data_cachep, new_entry);
10203 + return 0;
10204 + }
10205 + }
10206 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
10207 +index 2a4a382f28fed..3ac2a4b32375d 100644
10208 +--- a/fs/f2fs/node.c
10209 ++++ b/fs/f2fs/node.c
10210 +@@ -109,7 +109,7 @@ static void clear_node_page_dirty(struct page *page)
10211 +
10212 + static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
10213 + {
10214 +- return f2fs_get_meta_page(sbi, current_nat_addr(sbi, nid));
10215 ++ return f2fs_get_meta_page_retry(sbi, current_nat_addr(sbi, nid));
10216 + }
10217 +
10218 + static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
10219 +diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
10220 +index bccfc40b3a74a..d19483fa1fe89 100644
10221 +--- a/fs/jffs2/readinode.c
10222 ++++ b/fs/jffs2/readinode.c
10223 +@@ -672,6 +672,22 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
10224 + jffs2_free_full_dirent(fd);
10225 + return -EIO;
10226 + }
10227 ++
10228 ++#ifdef CONFIG_JFFS2_SUMMARY
10229 ++ /*
10230 ++ * we use CONFIG_JFFS2_SUMMARY because without it, we
10231 ++ * have checked it while mounting
10232 ++ */
10233 ++ crc = crc32(0, fd->name, rd->nsize);
10234 ++ if (unlikely(crc != je32_to_cpu(rd->name_crc))) {
10235 ++ JFFS2_NOTICE("name CRC failed on dirent node at"
10236 ++ "%#08x: read %#08x,calculated %#08x\n",
10237 ++ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
10238 ++ jffs2_mark_node_obsolete(c, ref);
10239 ++ jffs2_free_full_dirent(fd);
10240 ++ return 0;
10241 ++ }
10242 ++#endif
10243 + }
10244 +
10245 + fd->nhash = full_name_hash(NULL, fd->name, rd->nsize);
10246 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
10247 +index 0e6406c4f3621..60636b2e35ea4 100644
10248 +--- a/fs/jffs2/super.c
10249 ++++ b/fs/jffs2/super.c
10250 +@@ -221,11 +221,28 @@ static int jffs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
10251 + return 0;
10252 + }
10253 +
10254 ++static inline void jffs2_update_mount_opts(struct fs_context *fc)
10255 ++{
10256 ++ struct jffs2_sb_info *new_c = fc->s_fs_info;
10257 ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(fc->root->d_sb);
10258 ++
10259 ++ mutex_lock(&c->alloc_sem);
10260 ++ if (new_c->mount_opts.override_compr) {
10261 ++ c->mount_opts.override_compr = new_c->mount_opts.override_compr;
10262 ++ c->mount_opts.compr = new_c->mount_opts.compr;
10263 ++ }
10264 ++ if (new_c->mount_opts.rp_size)
10265 ++ c->mount_opts.rp_size = new_c->mount_opts.rp_size;
10266 ++ mutex_unlock(&c->alloc_sem);
10267 ++}
10268 ++
10269 + static int jffs2_reconfigure(struct fs_context *fc)
10270 + {
10271 + struct super_block *sb = fc->root->d_sb;
10272 +
10273 + sync_filesystem(sb);
10274 ++ jffs2_update_mount_opts(fc);
10275 ++
10276 + return jffs2_do_remount_fs(sb, fc);
10277 + }
10278 +
10279 +diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
10280 +index 29891fad3f095..aa03a904d5ab2 100644
10281 +--- a/fs/jfs/jfs_dmap.h
10282 ++++ b/fs/jfs/jfs_dmap.h
10283 +@@ -183,7 +183,7 @@ typedef union dmtree {
10284 + #define dmt_leafidx t1.leafidx
10285 + #define dmt_height t1.height
10286 + #define dmt_budmin t1.budmin
10287 +-#define dmt_stree t1.stree
10288 ++#define dmt_stree t2.stree
10289 +
10290 + /*
10291 + * on-disk aggregate disk allocation map descriptor.
10292 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
10293 +index 7d46fafdbbe5a..584c03e11844e 100644
10294 +--- a/fs/lockd/host.c
10295 ++++ b/fs/lockd/host.c
10296 +@@ -439,12 +439,7 @@ nlm_bind_host(struct nlm_host *host)
10297 + * RPC rebind is required
10298 + */
10299 + if ((clnt = host->h_rpcclnt) != NULL) {
10300 +- if (time_after_eq(jiffies, host->h_nextrebind)) {
10301 +- rpc_force_rebind(clnt);
10302 +- host->h_nextrebind = jiffies + NLM_HOST_REBIND;
10303 +- dprintk("lockd: next rebind in %lu jiffies\n",
10304 +- host->h_nextrebind - jiffies);
10305 +- }
10306 ++ nlm_rebind_host(host);
10307 + } else {
10308 + unsigned long increment = nlmsvc_timeout;
10309 + struct rpc_timeout timeparms = {
10310 +@@ -493,13 +488,20 @@ nlm_bind_host(struct nlm_host *host)
10311 + return clnt;
10312 + }
10313 +
10314 +-/*
10315 +- * Force a portmap lookup of the remote lockd port
10316 ++/**
10317 ++ * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port
10318 ++ * @host: NLM host handle for peer
10319 ++ *
10320 ++ * This is not needed when using a connection-oriented protocol, such as TCP.
10321 ++ * The existing autobind mechanism is sufficient to force a rebind when
10322 ++ * required, e.g. on connection state transitions.
10323 + */
10324 + void
10325 + nlm_rebind_host(struct nlm_host *host)
10326 + {
10327 +- dprintk("lockd: rebind host %s\n", host->h_name);
10328 ++ if (host->h_proto != IPPROTO_UDP)
10329 ++ return;
10330 ++
10331 + if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
10332 + rpc_force_rebind(host->h_rpcclnt);
10333 + host->h_nextrebind = jiffies + NLM_HOST_REBIND;
10334 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
10335 +index 6de41f7412808..53604cc090ca5 100644
10336 +--- a/fs/nfs/inode.c
10337 ++++ b/fs/nfs/inode.c
10338 +@@ -2151,7 +2151,7 @@ static int nfsiod_start(void)
10339 + {
10340 + struct workqueue_struct *wq;
10341 + dprintk("RPC: creating workqueue nfsiod\n");
10342 +- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
10343 ++ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
10344 + if (wq == NULL)
10345 + return -ENOMEM;
10346 + nfsiod_workqueue = wq;
10347 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
10348 +index ddc900df461c8..8598eba3fc234 100644
10349 +--- a/fs/nfs/nfs4proc.c
10350 ++++ b/fs/nfs/nfs4proc.c
10351 +@@ -4899,12 +4899,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
10352 + u64 cookie, struct page **pages, unsigned int count, bool plus)
10353 + {
10354 + struct inode *dir = d_inode(dentry);
10355 ++ struct nfs_server *server = NFS_SERVER(dir);
10356 + struct nfs4_readdir_arg args = {
10357 + .fh = NFS_FH(dir),
10358 + .pages = pages,
10359 + .pgbase = 0,
10360 + .count = count,
10361 +- .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
10362 + .plus = plus,
10363 + };
10364 + struct nfs4_readdir_res res;
10365 +@@ -4919,9 +4919,15 @@ static int _nfs4_proc_readdir(struct dentry *dentry, const struct cred *cred,
10366 + dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
10367 + dentry,
10368 + (unsigned long long)cookie);
10369 ++ if (!(server->caps & NFS_CAP_SECURITY_LABEL))
10370 ++ args.bitmask = server->attr_bitmask_nl;
10371 ++ else
10372 ++ args.bitmask = server->attr_bitmask;
10373 ++
10374 + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
10375 + res.pgbase = args.pgbase;
10376 +- status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
10377 ++ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
10378 ++ &res.seq_res, 0);
10379 + if (status >= 0) {
10380 + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
10381 + status += args.pgbase;
10382 +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
10383 +index 677751bc3a334..9a022a4fb9643 100644
10384 +--- a/fs/nfs/nfs4xdr.c
10385 ++++ b/fs/nfs/nfs4xdr.c
10386 +@@ -3012,15 +3012,19 @@ static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req,
10387 + struct compound_hdr hdr = {
10388 + .minorversion = nfs4_xdr_minorversion(&args->seq_args),
10389 + };
10390 ++ uint32_t replen;
10391 +
10392 + encode_compound_hdr(xdr, req, &hdr);
10393 + encode_sequence(xdr, &args->seq_args, &hdr);
10394 ++
10395 ++ replen = hdr.replen + op_decode_hdr_maxsz;
10396 ++
10397 + encode_getdeviceinfo(xdr, args, &hdr);
10398 +
10399 +- /* set up reply kvec. Subtract notification bitmap max size (2)
10400 +- * so that notification bitmap is put in xdr_buf tail */
10401 ++ /* set up reply kvec. device_addr4 opaque data is read into the
10402 ++ * pages */
10403 + rpc_prepare_reply_pages(req, args->pdev->pages, args->pdev->pgbase,
10404 +- args->pdev->pglen, hdr.replen - 2);
10405 ++ args->pdev->pglen, replen + 2 + 1);
10406 + encode_nops(&hdr);
10407 + }
10408 +
10409 +diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
10410 +index b73d9dd37f73c..26f2a50eceac9 100644
10411 +--- a/fs/nfs_common/grace.c
10412 ++++ b/fs/nfs_common/grace.c
10413 +@@ -69,10 +69,14 @@ __state_in_grace(struct net *net, bool open)
10414 + if (!open)
10415 + return !list_empty(grace_list);
10416 +
10417 ++ spin_lock(&grace_lock);
10418 + list_for_each_entry(lm, grace_list, list) {
10419 +- if (lm->block_opens)
10420 ++ if (lm->block_opens) {
10421 ++ spin_unlock(&grace_lock);
10422 + return true;
10423 ++ }
10424 + }
10425 ++ spin_unlock(&grace_lock);
10426 + return false;
10427 + }
10428 +
10429 +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
10430 +index e8bee8ff30c59..155a4e43b24ee 100644
10431 +--- a/fs/nfsd/nfssvc.c
10432 ++++ b/fs/nfsd/nfssvc.c
10433 +@@ -516,8 +516,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
10434 + return;
10435 +
10436 + nfsd_shutdown_net(net);
10437 +- printk(KERN_WARNING "nfsd: last server has exited, flushing export "
10438 +- "cache\n");
10439 ++ pr_info("nfsd: last server has exited, flushing export cache\n");
10440 + nfsd_export_flush(net);
10441 + }
10442 +
10443 +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
10444 +index 276c27fb99280..36dce17b01016 100644
10445 +--- a/fs/quota/quota_v2.c
10446 ++++ b/fs/quota/quota_v2.c
10447 +@@ -159,6 +159,25 @@ static int v2_read_file_info(struct super_block *sb, int type)
10448 + qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
10449 + qinfo->dqi_ops = &v2r1_qtree_ops;
10450 + }
10451 ++ ret = -EUCLEAN;
10452 ++ /* Some sanity checks of the read headers... */
10453 ++ if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits >
10454 ++ i_size_read(sb_dqopt(sb)->files[type])) {
10455 ++ quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
10456 ++ (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
10457 ++ i_size_read(sb_dqopt(sb)->files[type]));
10458 ++ goto out;
10459 ++ }
10460 ++ if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
10461 ++ quota_error(sb, "Free block number too big (%u >= %u).",
10462 ++ qinfo->dqi_free_blk, qinfo->dqi_blocks);
10463 ++ goto out;
10464 ++ }
10465 ++ if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
10466 ++ quota_error(sb, "Block with free entry too big (%u >= %u).",
10467 ++ qinfo->dqi_free_entry, qinfo->dqi_blocks);
10468 ++ goto out;
10469 ++ }
10470 + ret = 0;
10471 + out:
10472 + up_read(&dqopt->dqio_sem);
10473 +diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
10474 +index f985a3fbbb36a..b10418b5fb719 100644
10475 +--- a/fs/ubifs/auth.c
10476 ++++ b/fs/ubifs/auth.c
10477 +@@ -352,8 +352,10 @@ int ubifs_init_authentication(struct ubifs_info *c)
10478 + c->authenticated = true;
10479 +
10480 + c->log_hash = ubifs_hash_get_desc(c);
10481 +- if (IS_ERR(c->log_hash))
10482 ++ if (IS_ERR(c->log_hash)) {
10483 ++ err = PTR_ERR(c->log_hash);
10484 + goto out_free_hmac;
10485 ++ }
10486 +
10487 + err = 0;
10488 +
10489 +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
10490 +index 7e4bfaf2871fa..eae9cf5a57b05 100644
10491 +--- a/fs/ubifs/io.c
10492 ++++ b/fs/ubifs/io.c
10493 +@@ -319,7 +319,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
10494 + {
10495 + uint32_t crc;
10496 +
10497 +- ubifs_assert(c, pad >= 0 && !(pad & 7));
10498 ++ ubifs_assert(c, pad >= 0);
10499 +
10500 + if (pad >= UBIFS_PAD_NODE_SZ) {
10501 + struct ubifs_ch *ch = buf;
10502 +@@ -764,6 +764,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
10503 + * write-buffer.
10504 + */
10505 + memcpy(wbuf->buf + wbuf->used, buf, len);
10506 ++ if (aligned_len > len) {
10507 ++ ubifs_assert(c, aligned_len - len < 8);
10508 ++ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
10509 ++ }
10510 +
10511 + if (aligned_len == wbuf->avail) {
10512 + dbg_io("flush jhead %s wbuf to LEB %d:%d",
10513 +@@ -856,13 +860,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
10514 + }
10515 +
10516 + spin_lock(&wbuf->lock);
10517 +- if (aligned_len)
10518 ++ if (aligned_len) {
10519 + /*
10520 + * And now we have what's left and what does not take whole
10521 + * max. write unit, so write it to the write-buffer and we are
10522 + * done.
10523 + */
10524 + memcpy(wbuf->buf, buf + written, len);
10525 ++ if (aligned_len > len) {
10526 ++ ubifs_assert(c, aligned_len - len < 8);
10527 ++ ubifs_pad(c, wbuf->buf + len, aligned_len - len);
10528 ++ }
10529 ++ }
10530 +
10531 + if (c->leb_size - wbuf->offs >= c->max_write_size)
10532 + wbuf->size = c->max_write_size;
10533 +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
10534 +index 3f6fddeb75199..defed629073bf 100644
10535 +--- a/include/acpi/acpi_bus.h
10536 ++++ b/include/acpi/acpi_bus.h
10537 +@@ -614,7 +614,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
10538 + bool acpi_pm_device_can_wakeup(struct device *dev);
10539 + int acpi_pm_device_sleep_state(struct device *, int *, int);
10540 + int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
10541 +-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable);
10542 + #else
10543 + static inline void acpi_pm_wakeup_event(struct device *dev)
10544 + {
10545 +@@ -645,10 +644,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
10546 + {
10547 + return -ENODEV;
10548 + }
10549 +-static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
10550 +-{
10551 +- return -ENODEV;
10552 +-}
10553 + #endif
10554 +
10555 + #ifdef CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT
10556 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
10557 +index 1b261c51b3a3a..f5c21b7d29748 100644
10558 +--- a/include/linux/netfilter/x_tables.h
10559 ++++ b/include/linux/netfilter/x_tables.h
10560 +@@ -227,7 +227,7 @@ struct xt_table {
10561 + unsigned int valid_hooks;
10562 +
10563 + /* Man behind the curtain... */
10564 +- struct xt_table_info *private;
10565 ++ struct xt_table_info __rcu *private;
10566 +
10567 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
10568 + struct module *me;
10569 +@@ -448,6 +448,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
10570 +
10571 + struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
10572 +
10573 ++struct xt_table_info
10574 ++*xt_table_get_private_protected(const struct xt_table *table);
10575 ++
10576 + #ifdef CONFIG_COMPAT
10577 + #include <net/compat.h>
10578 +
10579 +diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
10580 +index fe61e3b9a9ca2..7145795b4b9da 100644
10581 +--- a/include/linux/pm_runtime.h
10582 ++++ b/include/linux/pm_runtime.h
10583 +@@ -224,6 +224,27 @@ static inline int pm_runtime_get_sync(struct device *dev)
10584 + return __pm_runtime_resume(dev, RPM_GET_PUT);
10585 + }
10586 +
10587 ++/**
10588 ++ * pm_runtime_resume_and_get - Bump up usage counter of a device and resume it.
10589 ++ * @dev: Target device.
10590 ++ *
10591 ++ * Resume @dev synchronously and if that is successful, increment its runtime
10592 ++ * PM usage counter. Return 0 if the runtime PM usage counter of @dev has been
10593 ++ * incremented or a negative error code otherwise.
10594 ++ */
10595 ++static inline int pm_runtime_resume_and_get(struct device *dev)
10596 ++{
10597 ++ int ret;
10598 ++
10599 ++ ret = __pm_runtime_resume(dev, RPM_GET_PUT);
10600 ++ if (ret < 0) {
10601 ++ pm_runtime_put_noidle(dev);
10602 ++ return ret;
10603 ++ }
10604 ++
10605 ++ return 0;
10606 ++}
10607 ++
10608 + static inline int pm_runtime_put(struct device *dev)
10609 + {
10610 + return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC);
10611 +diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
10612 +index 13eafebf3549a..b83a3f944f287 100644
10613 +--- a/include/linux/prefetch.h
10614 ++++ b/include/linux/prefetch.h
10615 +@@ -15,6 +15,7 @@
10616 + #include <asm/processor.h>
10617 + #include <asm/cache.h>
10618 +
10619 ++struct page;
10620 + /*
10621 + prefetch(x) attempts to pre-emptively get the memory pointed to
10622 + by address "x" into the CPU L1 cache.
10623 +@@ -62,4 +63,11 @@ static inline void prefetch_range(void *addr, size_t len)
10624 + #endif
10625 + }
10626 +
10627 ++static inline void prefetch_page_address(struct page *page)
10628 ++{
10629 ++#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
10630 ++ prefetch(page);
10631 ++#endif
10632 ++}
10633 ++
10634 + #endif
10635 +diff --git a/include/linux/security.h b/include/linux/security.h
10636 +index fd022768e91df..df90399a8af98 100644
10637 +--- a/include/linux/security.h
10638 ++++ b/include/linux/security.h
10639 +@@ -852,7 +852,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
10640 +
10641 + static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
10642 + {
10643 +- return -EOPNOTSUPP;
10644 ++ return cap_inode_getsecurity(inode, name, buffer, alloc);
10645 + }
10646 +
10647 + static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
10648 +diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
10649 +index aa5deb041c25d..7cc952282e8be 100644
10650 +--- a/include/linux/seq_buf.h
10651 ++++ b/include/linux/seq_buf.h
10652 +@@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s)
10653 + }
10654 +
10655 + static inline void
10656 +-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
10657 ++seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
10658 + {
10659 + s->buffer = buf;
10660 + s->size = size;
10661 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
10662 +index d783e15ba898c..d7ef5b97174ce 100644
10663 +--- a/include/linux/sunrpc/xprt.h
10664 ++++ b/include/linux/sunrpc/xprt.h
10665 +@@ -330,6 +330,7 @@ struct xprt_class {
10666 + struct rpc_xprt * (*setup)(struct xprt_create *);
10667 + struct module *owner;
10668 + char name[32];
10669 ++ const char * netid[];
10670 + };
10671 +
10672 + /*
10673 +diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
10674 +index 6609b39a72326..6db257466af68 100644
10675 +--- a/include/linux/trace_seq.h
10676 ++++ b/include/linux/trace_seq.h
10677 +@@ -12,7 +12,7 @@
10678 + */
10679 +
10680 + struct trace_seq {
10681 +- unsigned char buffer[PAGE_SIZE];
10682 ++ char buffer[PAGE_SIZE];
10683 + struct seq_buf seq;
10684 + int full;
10685 + };
10686 +@@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s)
10687 + * that is about to be written to and then return the result
10688 + * of that write.
10689 + */
10690 +-static inline unsigned char *
10691 ++static inline char *
10692 + trace_seq_buffer_ptr(struct trace_seq *s)
10693 + {
10694 + return s->buffer + seq_buf_used(&s->seq);
10695 +diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
10696 +index 45f88f0248c4e..c072ed1418113 100644
10697 +--- a/include/media/v4l2-mediabus.h
10698 ++++ b/include/media/v4l2-mediabus.h
10699 +@@ -78,6 +78,7 @@
10700 + * @V4L2_MBUS_CCP2: CCP2 (Compact Camera Port 2)
10701 + * @V4L2_MBUS_CSI2_DPHY: MIPI CSI-2 serial interface, with D-PHY
10702 + * @V4L2_MBUS_CSI2_CPHY: MIPI CSI-2 serial interface, with C-PHY
10703 ++ * @V4L2_MBUS_INVALID: invalid bus type (keep as last)
10704 + */
10705 + enum v4l2_mbus_type {
10706 + V4L2_MBUS_UNKNOWN,
10707 +@@ -87,6 +88,7 @@ enum v4l2_mbus_type {
10708 + V4L2_MBUS_CCP2,
10709 + V4L2_MBUS_CSI2_DPHY,
10710 + V4L2_MBUS_CSI2_CPHY,
10711 ++ V4L2_MBUS_INVALID,
10712 + };
10713 +
10714 + /**
10715 +diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
10716 +index a576bcbba2fcc..f694f08ad635b 100644
10717 +--- a/include/net/netfilter/nf_tables.h
10718 ++++ b/include/net/netfilter/nf_tables.h
10719 +@@ -1462,4 +1462,10 @@ void nft_chain_filter_fini(void);
10720 +
10721 + void __init nft_chain_route_init(void);
10722 + void nft_chain_route_fini(void);
10723 ++
10724 ++void nf_tables_trans_destroy_flush_work(void);
10725 ++
10726 ++int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result);
10727 ++__be64 nf_jiffies64_to_msecs(u64 input);
10728 ++
10729 + #endif /* _NET_NF_TABLES_H */
10730 +diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
10731 +index 2832134e53971..731780804c2fd 100644
10732 +--- a/include/uapi/linux/android/binder.h
10733 ++++ b/include/uapi/linux/android/binder.h
10734 +@@ -248,6 +248,7 @@ enum transaction_flags {
10735 + TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
10736 + TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
10737 + TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
10738 ++ TF_CLEAR_BUF = 0x20, /* clear buffer on txn complete */
10739 + };
10740 +
10741 + struct binder_transaction_data {
10742 +diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
10743 +index bc2bcdec377b4..7690507714231 100644
10744 +--- a/include/uapi/linux/if_alg.h
10745 ++++ b/include/uapi/linux/if_alg.h
10746 +@@ -24,6 +24,22 @@ struct sockaddr_alg {
10747 + __u8 salg_name[64];
10748 + };
10749 +
10750 ++/*
10751 ++ * Linux v4.12 and later removed the 64-byte limit on salg_name[]; it's now an
10752 ++ * arbitrary-length field. We had to keep the original struct above for source
10753 ++ * compatibility with existing userspace programs, though. Use the new struct
10754 ++ * below if support for very long algorithm names is needed. To do this,
10755 ++ * allocate 'sizeof(struct sockaddr_alg_new) + strlen(algname) + 1' bytes, and
10756 ++ * copy algname (including the null terminator) into salg_name.
10757 ++ */
10758 ++struct sockaddr_alg_new {
10759 ++ __u16 salg_family;
10760 ++ __u8 salg_type[14];
10761 ++ __u32 salg_feat;
10762 ++ __u32 salg_mask;
10763 ++ __u8 salg_name[];
10764 ++};
10765 ++
10766 + struct af_alg_iv {
10767 + __u32 ivlen;
10768 + __u8 iv[0];
10769 +diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
10770 +index 869c816d5f8c3..eba01ab5a55e0 100644
10771 +--- a/include/xen/xenbus.h
10772 ++++ b/include/xen/xenbus.h
10773 +@@ -59,6 +59,15 @@ struct xenbus_watch
10774 + /* Path being watched. */
10775 + const char *node;
10776 +
10777 ++ unsigned int nr_pending;
10778 ++
10779 ++ /*
10780 ++ * Called just before enqueing new event while a spinlock is held.
10781 ++ * The event will be discarded if this callback returns false.
10782 ++ */
10783 ++ bool (*will_handle)(struct xenbus_watch *,
10784 ++ const char *path, const char *token);
10785 ++
10786 + /* Callback (executed in a process context with no locks held). */
10787 + void (*callback)(struct xenbus_watch *,
10788 + const char *path, const char *token);
10789 +@@ -192,10 +201,14 @@ void xenbus_probe(struct work_struct *);
10790 +
10791 + int xenbus_watch_path(struct xenbus_device *dev, const char *path,
10792 + struct xenbus_watch *watch,
10793 ++ bool (*will_handle)(struct xenbus_watch *,
10794 ++ const char *, const char *),
10795 + void (*callback)(struct xenbus_watch *,
10796 + const char *, const char *));
10797 +-__printf(4, 5)
10798 ++__printf(5, 6)
10799 + int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
10800 ++ bool (*will_handle)(struct xenbus_watch *,
10801 ++ const char *, const char *),
10802 + void (*callback)(struct xenbus_watch *,
10803 + const char *, const char *),
10804 + const char *pathfmt, ...);
10805 +diff --git a/init/initramfs.c b/init/initramfs.c
10806 +index 5feee4f616d55..00a32799a38b0 100644
10807 +--- a/init/initramfs.c
10808 ++++ b/init/initramfs.c
10809 +@@ -527,7 +527,7 @@ extern unsigned long __initramfs_size;
10810 + #include <linux/initrd.h>
10811 + #include <linux/kexec.h>
10812 +
10813 +-void __weak free_initrd_mem(unsigned long start, unsigned long end)
10814 ++void __weak __init free_initrd_mem(unsigned long start, unsigned long end)
10815 + {
10816 + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
10817 + "initrd");
10818 +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
10819 +index c87ee6412b36a..bab6a934862e3 100644
10820 +--- a/kernel/cgroup/cpuset.c
10821 ++++ b/kernel/cgroup/cpuset.c
10822 +@@ -981,25 +981,48 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
10823 + */
10824 + static void rebuild_sched_domains_locked(void)
10825 + {
10826 ++ struct cgroup_subsys_state *pos_css;
10827 + struct sched_domain_attr *attr;
10828 + cpumask_var_t *doms;
10829 ++ struct cpuset *cs;
10830 + int ndoms;
10831 +
10832 + lockdep_assert_cpus_held();
10833 + percpu_rwsem_assert_held(&cpuset_rwsem);
10834 +
10835 + /*
10836 +- * We have raced with CPU hotplug. Don't do anything to avoid
10837 ++ * If we have raced with CPU hotplug, return early to avoid
10838 + * passing doms with offlined cpu to partition_sched_domains().
10839 +- * Anyways, hotplug work item will rebuild sched domains.
10840 ++ * Anyways, cpuset_hotplug_workfn() will rebuild sched domains.
10841 ++ *
10842 ++ * With no CPUs in any subpartitions, top_cpuset's effective CPUs
10843 ++ * should be the same as the active CPUs, so checking only top_cpuset
10844 ++ * is enough to detect racing CPU offlines.
10845 + */
10846 + if (!top_cpuset.nr_subparts_cpus &&
10847 + !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
10848 + return;
10849 +
10850 +- if (top_cpuset.nr_subparts_cpus &&
10851 +- !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
10852 +- return;
10853 ++ /*
10854 ++ * With subpartition CPUs, however, the effective CPUs of a partition
10855 ++ * root should be only a subset of the active CPUs. Since a CPU in any
10856 ++ * partition root could be offlined, all must be checked.
10857 ++ */
10858 ++ if (top_cpuset.nr_subparts_cpus) {
10859 ++ rcu_read_lock();
10860 ++ cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
10861 ++ if (!is_partition_root(cs)) {
10862 ++ pos_css = css_rightmost_descendant(pos_css);
10863 ++ continue;
10864 ++ }
10865 ++ if (!cpumask_subset(cs->effective_cpus,
10866 ++ cpu_active_mask)) {
10867 ++ rcu_read_unlock();
10868 ++ return;
10869 ++ }
10870 ++ }
10871 ++ rcu_read_unlock();
10872 ++ }
10873 +
10874 + /* Generate domain masks and attrs */
10875 + ndoms = generate_sched_domains(&doms, &attr);
10876 +diff --git a/kernel/cpu.c b/kernel/cpu.c
10877 +index 7527825ac7daa..fa0e5727b4d9c 100644
10878 +--- a/kernel/cpu.c
10879 ++++ b/kernel/cpu.c
10880 +@@ -815,6 +815,10 @@ void __init cpuhp_threads_init(void)
10881 + }
10882 +
10883 + #ifdef CONFIG_HOTPLUG_CPU
10884 ++#ifndef arch_clear_mm_cpumask_cpu
10885 ++#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
10886 ++#endif
10887 ++
10888 + /**
10889 + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
10890 + * @cpu: a CPU id
10891 +@@ -850,7 +854,7 @@ void clear_tasks_mm_cpumask(int cpu)
10892 + t = find_lock_task_mm(p);
10893 + if (!t)
10894 + continue;
10895 +- cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
10896 ++ arch_clear_mm_cpumask_cpu(cpu, t->mm);
10897 + task_unlock(t);
10898 + }
10899 + rcu_read_unlock();
10900 +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
10901 +index 5a60de39457c7..5e03cbee70d67 100644
10902 +--- a/kernel/irq/irqdomain.c
10903 ++++ b/kernel/irq/irqdomain.c
10904 +@@ -1288,8 +1288,15 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
10905 + unsigned int irq_base,
10906 + unsigned int nr_irqs)
10907 + {
10908 +- if (domain->ops->free)
10909 +- domain->ops->free(domain, irq_base, nr_irqs);
10910 ++ unsigned int i;
10911 ++
10912 ++ if (!domain->ops->free)
10913 ++ return;
10914 ++
10915 ++ for (i = 0; i < nr_irqs; i++) {
10916 ++ if (irq_domain_get_irq_data(domain, irq_base + i))
10917 ++ domain->ops->free(domain, irq_base + i, 1);
10918 ++ }
10919 + }
10920 +
10921 + int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
10922 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
10923 +index 4511532b08b84..7841e738e38f0 100644
10924 +--- a/kernel/sched/core.c
10925 ++++ b/kernel/sched/core.c
10926 +@@ -5679,12 +5679,8 @@ static void do_sched_yield(void)
10927 + schedstat_inc(rq->yld_count);
10928 + current->sched_class->yield_task(rq);
10929 +
10930 +- /*
10931 +- * Since we are going to call schedule() anyway, there's
10932 +- * no need to preempt or enable interrupts:
10933 +- */
10934 + preempt_disable();
10935 +- rq_unlock(rq, &rf);
10936 ++ rq_unlock_irq(rq, &rf);
10937 + sched_preempt_enable_no_resched();
10938 +
10939 + schedule();
10940 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
10941 +index 4cb00538a207b..4ce8c11e5e4ae 100644
10942 +--- a/kernel/sched/deadline.c
10943 ++++ b/kernel/sched/deadline.c
10944 +@@ -2469,7 +2469,7 @@ int sched_dl_global_validate(void)
10945 + u64 period = global_rt_period();
10946 + u64 new_bw = to_ratio(period, runtime);
10947 + struct dl_bw *dl_b;
10948 +- int cpu, ret = 0;
10949 ++ int cpu, cpus, ret = 0;
10950 + unsigned long flags;
10951 +
10952 + /*
10953 +@@ -2484,9 +2484,10 @@ int sched_dl_global_validate(void)
10954 + for_each_possible_cpu(cpu) {
10955 + rcu_read_lock_sched();
10956 + dl_b = dl_bw_of(cpu);
10957 ++ cpus = dl_bw_cpus(cpu);
10958 +
10959 + raw_spin_lock_irqsave(&dl_b->lock, flags);
10960 +- if (new_bw < dl_b->total_bw)
10961 ++ if (new_bw * cpus < dl_b->total_bw)
10962 + ret = -EBUSY;
10963 + raw_spin_unlock_irqrestore(&dl_b->lock, flags);
10964 +
10965 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
10966 +index 3e7590813844f..e10fb9bf2988c 100644
10967 +--- a/kernel/sched/sched.h
10968 ++++ b/kernel/sched/sched.h
10969 +@@ -247,30 +247,6 @@ struct rt_bandwidth {
10970 +
10971 + void __dl_clear_params(struct task_struct *p);
10972 +
10973 +-/*
10974 +- * To keep the bandwidth of -deadline tasks and groups under control
10975 +- * we need some place where:
10976 +- * - store the maximum -deadline bandwidth of the system (the group);
10977 +- * - cache the fraction of that bandwidth that is currently allocated.
10978 +- *
10979 +- * This is all done in the data structure below. It is similar to the
10980 +- * one used for RT-throttling (rt_bandwidth), with the main difference
10981 +- * that, since here we are only interested in admission control, we
10982 +- * do not decrease any runtime while the group "executes", neither we
10983 +- * need a timer to replenish it.
10984 +- *
10985 +- * With respect to SMP, the bandwidth is given on a per-CPU basis,
10986 +- * meaning that:
10987 +- * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
10988 +- * - dl_total_bw array contains, in the i-eth element, the currently
10989 +- * allocated bandwidth on the i-eth CPU.
10990 +- * Moreover, groups consume bandwidth on each CPU, while tasks only
10991 +- * consume bandwidth on the CPU they're running on.
10992 +- * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
10993 +- * that will be shown the next time the proc or cgroup controls will
10994 +- * be red. It on its turn can be changed by writing on its own
10995 +- * control.
10996 +- */
10997 + struct dl_bandwidth {
10998 + raw_spinlock_t dl_runtime_lock;
10999 + u64 dl_runtime;
11000 +@@ -282,6 +258,24 @@ static inline int dl_bandwidth_enabled(void)
11001 + return sysctl_sched_rt_runtime >= 0;
11002 + }
11003 +
11004 ++/*
11005 ++ * To keep the bandwidth of -deadline tasks under control
11006 ++ * we need some place where:
11007 ++ * - store the maximum -deadline bandwidth of each cpu;
11008 ++ * - cache the fraction of bandwidth that is currently allocated in
11009 ++ * each root domain;
11010 ++ *
11011 ++ * This is all done in the data structure below. It is similar to the
11012 ++ * one used for RT-throttling (rt_bandwidth), with the main difference
11013 ++ * that, since here we are only interested in admission control, we
11014 ++ * do not decrease any runtime while the group "executes", neither we
11015 ++ * need a timer to replenish it.
11016 ++ *
11017 ++ * With respect to SMP, bandwidth is given on a per root domain basis,
11018 ++ * meaning that:
11019 ++ * - bw (< 100%) is the deadline bandwidth of each CPU;
11020 ++ * - total_bw is the currently allocated bandwidth in each root domain;
11021 ++ */
11022 + struct dl_bw {
11023 + raw_spinlock_t lock;
11024 + u64 bw;
11025 +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
11026 +index 2372b861f2cfa..74c1db7178cff 100644
11027 +--- a/kernel/trace/bpf_trace.c
11028 ++++ b/kernel/trace/bpf_trace.c
11029 +@@ -1320,10 +1320,12 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name)
11030 +
11031 + void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
11032 + {
11033 +- struct module *mod = __module_address((unsigned long)btp);
11034 ++ struct module *mod;
11035 +
11036 +- if (mod)
11037 +- module_put(mod);
11038 ++ preempt_disable();
11039 ++ mod = __module_address((unsigned long)btp);
11040 ++ module_put(mod);
11041 ++ preempt_enable();
11042 + }
11043 +
11044 + static __always_inline
11045 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
11046 +index 6e5c6b023dc32..077877ed54f73 100644
11047 +--- a/kernel/trace/ring_buffer.c
11048 ++++ b/kernel/trace/ring_buffer.c
11049 +@@ -129,7 +129,16 @@ int ring_buffer_print_entry_header(struct trace_seq *s)
11050 + #define RB_ALIGNMENT 4U
11051 + #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
11052 + #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
11053 +-#define RB_ALIGN_DATA __aligned(RB_ALIGNMENT)
11054 ++
11055 ++#ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
11056 ++# define RB_FORCE_8BYTE_ALIGNMENT 0
11057 ++# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
11058 ++#else
11059 ++# define RB_FORCE_8BYTE_ALIGNMENT 1
11060 ++# define RB_ARCH_ALIGNMENT 8U
11061 ++#endif
11062 ++
11063 ++#define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT)
11064 +
11065 + /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
11066 + #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
11067 +@@ -2367,7 +2376,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
11068 +
11069 + event->time_delta = delta;
11070 + length -= RB_EVNT_HDR_SIZE;
11071 +- if (length > RB_MAX_SMALL_DATA) {
11072 ++ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
11073 + event->type_len = 0;
11074 + event->array[0] = length;
11075 + } else
11076 +@@ -2382,11 +2391,11 @@ static unsigned rb_calculate_event_length(unsigned length)
11077 + if (!length)
11078 + length++;
11079 +
11080 +- if (length > RB_MAX_SMALL_DATA)
11081 ++ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
11082 + length += sizeof(event.array[0]);
11083 +
11084 + length += RB_EVNT_HDR_SIZE;
11085 +- length = ALIGN(length, RB_ALIGNMENT);
11086 ++ length = ALIGN(length, RB_ARCH_ALIGNMENT);
11087 +
11088 + /*
11089 + * In case the time delta is larger than the 27 bits for it
11090 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
11091 +index 1c869c6b825f3..4357f5475a504 100644
11092 +--- a/mm/page_alloc.c
11093 ++++ b/mm/page_alloc.c
11094 +@@ -2346,12 +2346,12 @@ static bool can_steal_fallback(unsigned int order, int start_mt)
11095 + return false;
11096 + }
11097 +
11098 +-static inline void boost_watermark(struct zone *zone)
11099 ++static inline bool boost_watermark(struct zone *zone)
11100 + {
11101 + unsigned long max_boost;
11102 +
11103 + if (!watermark_boost_factor)
11104 +- return;
11105 ++ return false;
11106 + /*
11107 + * Don't bother in zones that are unlikely to produce results.
11108 + * On small machines, including kdump capture kernels running
11109 +@@ -2359,7 +2359,7 @@ static inline void boost_watermark(struct zone *zone)
11110 + * memory situation immediately.
11111 + */
11112 + if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
11113 +- return;
11114 ++ return false;
11115 +
11116 + max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
11117 + watermark_boost_factor, 10000);
11118 +@@ -2373,12 +2373,14 @@ static inline void boost_watermark(struct zone *zone)
11119 + * boosted watermark resulting in a hang.
11120 + */
11121 + if (!max_boost)
11122 +- return;
11123 ++ return false;
11124 +
11125 + max_boost = max(pageblock_nr_pages, max_boost);
11126 +
11127 + zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
11128 + max_boost);
11129 ++
11130 ++ return true;
11131 + }
11132 +
11133 + /*
11134 +@@ -2417,8 +2419,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
11135 + * likelihood of future fallbacks. Wake kswapd now as the node
11136 + * may be balanced overall and kswapd will not wake naturally.
11137 + */
11138 +- boost_watermark(zone);
11139 +- if (alloc_flags & ALLOC_KSWAPD)
11140 ++ if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
11141 + set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
11142 +
11143 + /* We are not allowed to try stealing from the whole block */
11144 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
11145 +index 9917b399ddd0d..0a88645f103f0 100644
11146 +--- a/net/bluetooth/hci_event.c
11147 ++++ b/net/bluetooth/hci_event.c
11148 +@@ -4791,6 +4791,11 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
11149 + return;
11150 + }
11151 +
11152 ++ if (!hcon->amp_mgr) {
11153 ++ hci_dev_unlock(hdev);
11154 ++ return;
11155 ++ }
11156 ++
11157 + if (ev->status) {
11158 + hci_conn_del(hcon);
11159 + hci_dev_unlock(hdev);
11160 +@@ -5711,21 +5716,19 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
11161 + struct sk_buff *skb)
11162 + {
11163 + u8 num_reports = skb->data[0];
11164 +- void *ptr = &skb->data[1];
11165 ++ struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
11166 +
11167 +- hci_dev_lock(hdev);
11168 ++ if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
11169 ++ return;
11170 +
11171 +- while (num_reports--) {
11172 +- struct hci_ev_le_direct_adv_info *ev = ptr;
11173 ++ hci_dev_lock(hdev);
11174 +
11175 ++ for (; num_reports; num_reports--, ev++)
11176 + process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
11177 + ev->bdaddr_type, &ev->direct_addr,
11178 + ev->direct_addr_type, ev->rssi, NULL, 0,
11179 + false);
11180 +
11181 +- ptr += sizeof(*ev);
11182 +- }
11183 +-
11184 + hci_dev_unlock(hdev);
11185 + }
11186 +
11187 +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
11188 +index 99a6de52b21da..a5502c5aa44e7 100644
11189 +--- a/net/core/lwt_bpf.c
11190 ++++ b/net/core/lwt_bpf.c
11191 +@@ -39,12 +39,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
11192 + {
11193 + int ret;
11194 +
11195 +- /* Preempt disable is needed to protect per-cpu redirect_info between
11196 +- * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
11197 +- * access to maps strictly require a rcu_read_lock() for protection,
11198 +- * mixing with BH RCU lock doesn't work.
11199 ++ /* Preempt disable and BH disable are needed to protect per-cpu
11200 ++ * redirect_info between BPF prog and skb_do_redirect().
11201 + */
11202 + preempt_disable();
11203 ++ local_bh_disable();
11204 + bpf_compute_data_pointers(skb);
11205 + ret = bpf_prog_run_save_cb(lwt->prog, skb);
11206 +
11207 +@@ -78,6 +77,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
11208 + break;
11209 + }
11210 +
11211 ++ local_bh_enable();
11212 + preempt_enable();
11213 +
11214 + return ret;
11215 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
11216 +index f1f78a742b36a..8a6a4384e7916 100644
11217 +--- a/net/ipv4/netfilter/arp_tables.c
11218 ++++ b/net/ipv4/netfilter/arp_tables.c
11219 +@@ -203,7 +203,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
11220 +
11221 + local_bh_disable();
11222 + addend = xt_write_recseq_begin();
11223 +- private = READ_ONCE(table->private); /* Address dependency. */
11224 ++ private = rcu_access_pointer(table->private);
11225 + cpu = smp_processor_id();
11226 + table_base = private->entries;
11227 + jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
11228 +@@ -649,7 +649,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
11229 + {
11230 + unsigned int countersize;
11231 + struct xt_counters *counters;
11232 +- const struct xt_table_info *private = table->private;
11233 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11234 +
11235 + /* We need atomic snapshot of counters: rest doesn't change
11236 + * (other than comefrom, which userspace doesn't care
11237 +@@ -673,7 +673,7 @@ static int copy_entries_to_user(unsigned int total_size,
11238 + unsigned int off, num;
11239 + const struct arpt_entry *e;
11240 + struct xt_counters *counters;
11241 +- struct xt_table_info *private = table->private;
11242 ++ struct xt_table_info *private = xt_table_get_private_protected(table);
11243 + int ret = 0;
11244 + void *loc_cpu_entry;
11245 +
11246 +@@ -808,7 +808,7 @@ static int get_info(struct net *net, void __user *user,
11247 + t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
11248 + if (!IS_ERR(t)) {
11249 + struct arpt_getinfo info;
11250 +- const struct xt_table_info *private = t->private;
11251 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
11252 + #ifdef CONFIG_COMPAT
11253 + struct xt_table_info tmp;
11254 +
11255 +@@ -861,7 +861,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
11256 +
11257 + t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
11258 + if (!IS_ERR(t)) {
11259 +- const struct xt_table_info *private = t->private;
11260 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
11261 +
11262 + if (get.size == private->size)
11263 + ret = copy_entries_to_user(private->size,
11264 +@@ -1020,7 +1020,7 @@ static int do_add_counters(struct net *net, const void __user *user,
11265 + }
11266 +
11267 + local_bh_disable();
11268 +- private = t->private;
11269 ++ private = xt_table_get_private_protected(t);
11270 + if (private->number != tmp.num_counters) {
11271 + ret = -EINVAL;
11272 + goto unlock_up_free;
11273 +@@ -1357,7 +1357,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
11274 + void __user *userptr)
11275 + {
11276 + struct xt_counters *counters;
11277 +- const struct xt_table_info *private = table->private;
11278 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11279 + void __user *pos;
11280 + unsigned int size;
11281 + int ret = 0;
11282 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
11283 +index 10b91ebdf2131..4852769995440 100644
11284 +--- a/net/ipv4/netfilter/ip_tables.c
11285 ++++ b/net/ipv4/netfilter/ip_tables.c
11286 +@@ -258,7 +258,7 @@ ipt_do_table(struct sk_buff *skb,
11287 + WARN_ON(!(table->valid_hooks & (1 << hook)));
11288 + local_bh_disable();
11289 + addend = xt_write_recseq_begin();
11290 +- private = READ_ONCE(table->private); /* Address dependency. */
11291 ++ private = rcu_access_pointer(table->private);
11292 + cpu = smp_processor_id();
11293 + table_base = private->entries;
11294 + jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
11295 +@@ -791,7 +791,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
11296 + {
11297 + unsigned int countersize;
11298 + struct xt_counters *counters;
11299 +- const struct xt_table_info *private = table->private;
11300 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11301 +
11302 + /* We need atomic snapshot of counters: rest doesn't change
11303 + (other than comefrom, which userspace doesn't care
11304 +@@ -815,7 +815,7 @@ copy_entries_to_user(unsigned int total_size,
11305 + unsigned int off, num;
11306 + const struct ipt_entry *e;
11307 + struct xt_counters *counters;
11308 +- const struct xt_table_info *private = table->private;
11309 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11310 + int ret = 0;
11311 + const void *loc_cpu_entry;
11312 +
11313 +@@ -965,7 +965,7 @@ static int get_info(struct net *net, void __user *user,
11314 + t = xt_request_find_table_lock(net, AF_INET, name);
11315 + if (!IS_ERR(t)) {
11316 + struct ipt_getinfo info;
11317 +- const struct xt_table_info *private = t->private;
11318 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
11319 + #ifdef CONFIG_COMPAT
11320 + struct xt_table_info tmp;
11321 +
11322 +@@ -1019,7 +1019,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
11323 +
11324 + t = xt_find_table_lock(net, AF_INET, get.name);
11325 + if (!IS_ERR(t)) {
11326 +- const struct xt_table_info *private = t->private;
11327 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
11328 + if (get.size == private->size)
11329 + ret = copy_entries_to_user(private->size,
11330 + t, uptr->entrytable);
11331 +@@ -1175,7 +1175,7 @@ do_add_counters(struct net *net, const void __user *user,
11332 + }
11333 +
11334 + local_bh_disable();
11335 +- private = t->private;
11336 ++ private = xt_table_get_private_protected(t);
11337 + if (private->number != tmp.num_counters) {
11338 + ret = -EINVAL;
11339 + goto unlock_up_free;
11340 +@@ -1570,7 +1570,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
11341 + void __user *userptr)
11342 + {
11343 + struct xt_counters *counters;
11344 +- const struct xt_table_info *private = table->private;
11345 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11346 + void __user *pos;
11347 + unsigned int size;
11348 + int ret = 0;
11349 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
11350 +index c973ace208c51..12735ee7713a7 100644
11351 +--- a/net/ipv6/netfilter/ip6_tables.c
11352 ++++ b/net/ipv6/netfilter/ip6_tables.c
11353 +@@ -280,7 +280,7 @@ ip6t_do_table(struct sk_buff *skb,
11354 +
11355 + local_bh_disable();
11356 + addend = xt_write_recseq_begin();
11357 +- private = READ_ONCE(table->private); /* Address dependency. */
11358 ++ private = rcu_access_pointer(table->private);
11359 + cpu = smp_processor_id();
11360 + table_base = private->entries;
11361 + jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
11362 +@@ -807,7 +807,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
11363 + {
11364 + unsigned int countersize;
11365 + struct xt_counters *counters;
11366 +- const struct xt_table_info *private = table->private;
11367 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11368 +
11369 + /* We need atomic snapshot of counters: rest doesn't change
11370 + (other than comefrom, which userspace doesn't care
11371 +@@ -831,7 +831,7 @@ copy_entries_to_user(unsigned int total_size,
11372 + unsigned int off, num;
11373 + const struct ip6t_entry *e;
11374 + struct xt_counters *counters;
11375 +- const struct xt_table_info *private = table->private;
11376 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11377 + int ret = 0;
11378 + const void *loc_cpu_entry;
11379 +
11380 +@@ -981,7 +981,7 @@ static int get_info(struct net *net, void __user *user,
11381 + t = xt_request_find_table_lock(net, AF_INET6, name);
11382 + if (!IS_ERR(t)) {
11383 + struct ip6t_getinfo info;
11384 +- const struct xt_table_info *private = t->private;
11385 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
11386 + #ifdef CONFIG_COMPAT
11387 + struct xt_table_info tmp;
11388 +
11389 +@@ -1036,7 +1036,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
11390 +
11391 + t = xt_find_table_lock(net, AF_INET6, get.name);
11392 + if (!IS_ERR(t)) {
11393 +- struct xt_table_info *private = t->private;
11394 ++ struct xt_table_info *private = xt_table_get_private_protected(t);
11395 + if (get.size == private->size)
11396 + ret = copy_entries_to_user(private->size,
11397 + t, uptr->entrytable);
11398 +@@ -1191,7 +1191,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
11399 + }
11400 +
11401 + local_bh_disable();
11402 +- private = t->private;
11403 ++ private = xt_table_get_private_protected(t);
11404 + if (private->number != tmp.num_counters) {
11405 + ret = -EINVAL;
11406 + goto unlock_up_free;
11407 +@@ -1579,7 +1579,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
11408 + void __user *userptr)
11409 + {
11410 + struct xt_counters *counters;
11411 +- const struct xt_table_info *private = table->private;
11412 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
11413 + void __user *pos;
11414 + unsigned int size;
11415 + int ret = 0;
11416 +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
11417 +index aabc63dadf176..cea83fa5fc5b9 100644
11418 +--- a/net/mac80211/vht.c
11419 ++++ b/net/mac80211/vht.c
11420 +@@ -446,12 +446,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
11421 + * IEEE80211-2016 specification makes higher bandwidth operation
11422 + * possible on the TDLS link if the peers have wider bandwidth
11423 + * capability.
11424 ++ *
11425 ++ * However, in this case, and only if the TDLS peer is authorized,
11426 ++ * limit to the tdls_chandef so that the configuration here isn't
11427 ++ * wider than what's actually requested on the channel context.
11428 + */
11429 + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
11430 +- test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
11431 +- return bw;
11432 +-
11433 +- bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
11434 ++ test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) &&
11435 ++ test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
11436 ++ sta->tdls_chandef.chan)
11437 ++ bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width));
11438 ++ else
11439 ++ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
11440 +
11441 + return bw;
11442 + }
11443 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
11444 +index 459b7c0547115..40216c2a7dd72 100644
11445 +--- a/net/netfilter/nf_tables_api.c
11446 ++++ b/net/netfilter/nf_tables_api.c
11447 +@@ -3277,7 +3277,7 @@ cont:
11448 + return 0;
11449 + }
11450 +
11451 +-static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
11452 ++int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
11453 + {
11454 + u64 ms = be64_to_cpu(nla_get_be64(nla));
11455 + u64 max = (u64)(~((u64)0));
11456 +@@ -3291,7 +3291,7 @@ static int nf_msecs_to_jiffies64(const struct nlattr *nla, u64 *result)
11457 + return 0;
11458 + }
11459 +
11460 +-static __be64 nf_jiffies64_to_msecs(u64 input)
11461 ++__be64 nf_jiffies64_to_msecs(u64 input)
11462 + {
11463 + return cpu_to_be64(jiffies64_to_msecs(input));
11464 + }
11465 +@@ -6605,6 +6605,12 @@ static void nf_tables_trans_destroy_work(struct work_struct *w)
11466 + }
11467 + }
11468 +
11469 ++void nf_tables_trans_destroy_flush_work(void)
11470 ++{
11471 ++ flush_work(&trans_destroy_work);
11472 ++}
11473 ++EXPORT_SYMBOL_GPL(nf_tables_trans_destroy_flush_work);
11474 ++
11475 + static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *chain)
11476 + {
11477 + struct nft_rule *rule;
11478 +@@ -6776,9 +6782,9 @@ static void nf_tables_commit_release(struct net *net)
11479 + spin_unlock(&nf_tables_destroy_list_lock);
11480 +
11481 + nf_tables_module_autoload_cleanup(net);
11482 +- mutex_unlock(&net->nft.commit_mutex);
11483 +-
11484 + schedule_work(&trans_destroy_work);
11485 ++
11486 ++ mutex_unlock(&net->nft.commit_mutex);
11487 + }
11488 +
11489 + static int nf_tables_commit(struct net *net, struct sk_buff *skb)
11490 +diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
11491 +index f9adca62ccb3d..0e3e0ff805812 100644
11492 +--- a/net/netfilter/nft_compat.c
11493 ++++ b/net/netfilter/nft_compat.c
11494 +@@ -27,6 +27,8 @@ struct nft_xt_match_priv {
11495 + void *info;
11496 + };
11497 +
11498 ++static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1);
11499 ++
11500 + static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
11501 + const char *tablename)
11502 + {
11503 +@@ -236,6 +238,15 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
11504 +
11505 + nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
11506 +
11507 ++ /* xtables matches or targets can have side effects, e.g.
11508 ++ * creation/destruction of /proc files.
11509 ++ * The xt ->destroy functions are run asynchronously from
11510 ++ * work queue. If we have pending invocations we thus
11511 ++ * need to wait for those to finish.
11512 ++ */
11513 ++ if (refcount_read(&nft_compat_pending_destroy) > 1)
11514 ++ nf_tables_trans_destroy_flush_work();
11515 ++
11516 + ret = xt_check_target(&par, size, proto, inv);
11517 + if (ret < 0)
11518 + return ret;
11519 +@@ -247,6 +258,13 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
11520 + return 0;
11521 + }
11522 +
11523 ++static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr)
11524 ++{
11525 ++ refcount_dec(&nft_compat_pending_destroy);
11526 ++ module_put(me);
11527 ++ kfree(expr->ops);
11528 ++}
11529 ++
11530 + static void
11531 + nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
11532 + {
11533 +@@ -262,8 +280,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
11534 + if (par.target->destroy != NULL)
11535 + par.target->destroy(&par);
11536 +
11537 +- module_put(me);
11538 +- kfree(expr->ops);
11539 ++ __nft_mt_tg_destroy(me, expr);
11540 + }
11541 +
11542 + static int nft_extension_dump_info(struct sk_buff *skb, int attr,
11543 +@@ -494,8 +511,7 @@ __nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
11544 + if (par.match->destroy != NULL)
11545 + par.match->destroy(&par);
11546 +
11547 +- module_put(me);
11548 +- kfree(expr->ops);
11549 ++ __nft_mt_tg_destroy(me, expr);
11550 + }
11551 +
11552 + static void
11553 +@@ -700,6 +716,14 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
11554 +
11555 + static struct nft_expr_type nft_match_type;
11556 +
11557 ++static void nft_mt_tg_deactivate(const struct nft_ctx *ctx,
11558 ++ const struct nft_expr *expr,
11559 ++ enum nft_trans_phase phase)
11560 ++{
11561 ++ if (phase == NFT_TRANS_COMMIT)
11562 ++ refcount_inc(&nft_compat_pending_destroy);
11563 ++}
11564 ++
11565 + static const struct nft_expr_ops *
11566 + nft_match_select_ops(const struct nft_ctx *ctx,
11567 + const struct nlattr * const tb[])
11568 +@@ -738,6 +762,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
11569 + ops->type = &nft_match_type;
11570 + ops->eval = nft_match_eval;
11571 + ops->init = nft_match_init;
11572 ++ ops->deactivate = nft_mt_tg_deactivate,
11573 + ops->destroy = nft_match_destroy;
11574 + ops->dump = nft_match_dump;
11575 + ops->validate = nft_match_validate;
11576 +@@ -828,6 +853,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
11577 + ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
11578 + ops->init = nft_target_init;
11579 + ops->destroy = nft_target_destroy;
11580 ++ ops->deactivate = nft_mt_tg_deactivate,
11581 + ops->dump = nft_target_dump;
11582 + ops->validate = nft_target_validate;
11583 + ops->data = target;
11584 +@@ -891,6 +917,8 @@ static void __exit nft_compat_module_exit(void)
11585 + nfnetlink_subsys_unregister(&nfnl_compat_subsys);
11586 + nft_unregister_expr(&nft_target_type);
11587 + nft_unregister_expr(&nft_match_type);
11588 ++
11589 ++ WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1);
11590 + }
11591 +
11592 + MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
11593 +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
11594 +index 46ca8bcca1bd5..2042c6f4629cc 100644
11595 +--- a/net/netfilter/nft_ct.c
11596 ++++ b/net/netfilter/nft_ct.c
11597 +@@ -177,8 +177,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
11598 + }
11599 + #endif
11600 + case NFT_CT_ID:
11601 +- if (!nf_ct_is_confirmed(ct))
11602 +- goto err;
11603 + *dest = nf_ct_get_id(ct);
11604 + return;
11605 + default:
11606 +diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
11607 +index 8887295414dcb..217fd1bdc55e7 100644
11608 +--- a/net/netfilter/nft_dynset.c
11609 ++++ b/net/netfilter/nft_dynset.c
11610 +@@ -180,8 +180,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
11611 + if (tb[NFTA_DYNSET_TIMEOUT] != NULL) {
11612 + if (!(set->flags & NFT_SET_TIMEOUT))
11613 + return -EINVAL;
11614 +- timeout = msecs_to_jiffies(be64_to_cpu(nla_get_be64(
11615 +- tb[NFTA_DYNSET_TIMEOUT])));
11616 ++
11617 ++ err = nf_msecs_to_jiffies64(tb[NFTA_DYNSET_TIMEOUT], &timeout);
11618 ++ if (err)
11619 ++ return err;
11620 + }
11621 +
11622 + priv->sreg_key = nft_parse_register(tb[NFTA_DYNSET_SREG_KEY]);
11623 +@@ -296,7 +298,7 @@ static int nft_dynset_dump(struct sk_buff *skb, const struct nft_expr *expr)
11624 + if (nla_put_string(skb, NFTA_DYNSET_SET_NAME, priv->set->name))
11625 + goto nla_put_failure;
11626 + if (nla_put_be64(skb, NFTA_DYNSET_TIMEOUT,
11627 +- cpu_to_be64(jiffies_to_msecs(priv->timeout)),
11628 ++ nf_jiffies64_to_msecs(priv->timeout),
11629 + NFTA_DYNSET_PAD))
11630 + goto nla_put_failure;
11631 + if (priv->expr && nft_expr_dump(skb, NFTA_DYNSET_EXPR, priv->expr))
11632 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
11633 +index 44f971f319920..d1ef2d7930739 100644
11634 +--- a/net/netfilter/x_tables.c
11635 ++++ b/net/netfilter/x_tables.c
11636 +@@ -1349,6 +1349,14 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
11637 + }
11638 + EXPORT_SYMBOL(xt_counters_alloc);
11639 +
11640 ++struct xt_table_info
11641 ++*xt_table_get_private_protected(const struct xt_table *table)
11642 ++{
11643 ++ return rcu_dereference_protected(table->private,
11644 ++ mutex_is_locked(&xt[table->af].mutex));
11645 ++}
11646 ++EXPORT_SYMBOL(xt_table_get_private_protected);
11647 ++
11648 + struct xt_table_info *
11649 + xt_replace_table(struct xt_table *table,
11650 + unsigned int num_counters,
11651 +@@ -1356,7 +1364,6 @@ xt_replace_table(struct xt_table *table,
11652 + int *error)
11653 + {
11654 + struct xt_table_info *private;
11655 +- unsigned int cpu;
11656 + int ret;
11657 +
11658 + ret = xt_jumpstack_alloc(newinfo);
11659 +@@ -1366,47 +1373,20 @@ xt_replace_table(struct xt_table *table,
11660 + }
11661 +
11662 + /* Do the substitution. */
11663 +- local_bh_disable();
11664 +- private = table->private;
11665 ++ private = xt_table_get_private_protected(table);
11666 +
11667 + /* Check inside lock: is the old number correct? */
11668 + if (num_counters != private->number) {
11669 + pr_debug("num_counters != table->private->number (%u/%u)\n",
11670 + num_counters, private->number);
11671 +- local_bh_enable();
11672 + *error = -EAGAIN;
11673 + return NULL;
11674 + }
11675 +
11676 + newinfo->initial_entries = private->initial_entries;
11677 +- /*
11678 +- * Ensure contents of newinfo are visible before assigning to
11679 +- * private.
11680 +- */
11681 +- smp_wmb();
11682 +- table->private = newinfo;
11683 +-
11684 +- /* make sure all cpus see new ->private value */
11685 +- smp_wmb();
11686 +
11687 +- /*
11688 +- * Even though table entries have now been swapped, other CPU's
11689 +- * may still be using the old entries...
11690 +- */
11691 +- local_bh_enable();
11692 +-
11693 +- /* ... so wait for even xt_recseq on all cpus */
11694 +- for_each_possible_cpu(cpu) {
11695 +- seqcount_t *s = &per_cpu(xt_recseq, cpu);
11696 +- u32 seq = raw_read_seqcount(s);
11697 +-
11698 +- if (seq & 1) {
11699 +- do {
11700 +- cond_resched();
11701 +- cpu_relax();
11702 +- } while (seq == raw_read_seqcount(s));
11703 +- }
11704 +- }
11705 ++ rcu_assign_pointer(table->private, newinfo);
11706 ++ synchronize_rcu();
11707 +
11708 + #ifdef CONFIG_AUDIT
11709 + if (audit_enabled) {
11710 +@@ -1447,12 +1427,12 @@ struct xt_table *xt_register_table(struct net *net,
11711 + }
11712 +
11713 + /* Simplifies replace_table code. */
11714 +- table->private = bootstrap;
11715 ++ rcu_assign_pointer(table->private, bootstrap);
11716 +
11717 + if (!xt_replace_table(table, 0, newinfo, &ret))
11718 + goto unlock;
11719 +
11720 +- private = table->private;
11721 ++ private = xt_table_get_private_protected(table);
11722 + pr_debug("table->private->number = %u\n", private->number);
11723 +
11724 + /* save number of initial entries */
11725 +@@ -1475,7 +1455,8 @@ void *xt_unregister_table(struct xt_table *table)
11726 + struct xt_table_info *private;
11727 +
11728 + mutex_lock(&xt[table->af].mutex);
11729 +- private = table->private;
11730 ++ private = xt_table_get_private_protected(table);
11731 ++ RCU_INIT_POINTER(table->private, NULL);
11732 + list_del(&table->list);
11733 + mutex_unlock(&xt[table->af].mutex);
11734 + kfree(table);
11735 +diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c
11736 +index fd9bca2427242..56029e3af6ff0 100644
11737 +--- a/net/sunrpc/debugfs.c
11738 ++++ b/net/sunrpc/debugfs.c
11739 +@@ -128,13 +128,13 @@ static int do_xprt_debugfs(struct rpc_clnt *clnt, struct rpc_xprt *xprt, void *n
11740 + return 0;
11741 + len = snprintf(name, sizeof(name), "../../rpc_xprt/%s",
11742 + xprt->debugfs->d_name.name);
11743 +- if (len > sizeof(name))
11744 ++ if (len >= sizeof(name))
11745 + return -1;
11746 + if (*nump == 0)
11747 + strcpy(link, "xprt");
11748 + else {
11749 + len = snprintf(link, sizeof(link), "xprt%d", *nump);
11750 +- if (len > sizeof(link))
11751 ++ if (len >= sizeof(link))
11752 + return -1;
11753 + }
11754 + debugfs_create_symlink(link, clnt->cl_debugfs, name);
11755 +diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
11756 +index 53d8b82eda006..7afbf15bcbd9a 100644
11757 +--- a/net/sunrpc/sched.c
11758 ++++ b/net/sunrpc/sched.c
11759 +@@ -699,6 +699,23 @@ struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
11760 + }
11761 + EXPORT_SYMBOL_GPL(rpc_wake_up_next);
11762 +
11763 ++/**
11764 ++ * rpc_wake_up_locked - wake up all rpc_tasks
11765 ++ * @queue: rpc_wait_queue on which the tasks are sleeping
11766 ++ *
11767 ++ */
11768 ++static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
11769 ++{
11770 ++ struct rpc_task *task;
11771 ++
11772 ++ for (;;) {
11773 ++ task = __rpc_find_next_queued(queue);
11774 ++ if (task == NULL)
11775 ++ break;
11776 ++ rpc_wake_up_task_queue_locked(queue, task);
11777 ++ }
11778 ++}
11779 ++
11780 + /**
11781 + * rpc_wake_up - wake up all rpc_tasks
11782 + * @queue: rpc_wait_queue on which the tasks are sleeping
11783 +@@ -707,25 +724,28 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next);
11784 + */
11785 + void rpc_wake_up(struct rpc_wait_queue *queue)
11786 + {
11787 +- struct list_head *head;
11788 +-
11789 + spin_lock(&queue->lock);
11790 +- head = &queue->tasks[queue->maxpriority];
11791 ++ rpc_wake_up_locked(queue);
11792 ++ spin_unlock(&queue->lock);
11793 ++}
11794 ++EXPORT_SYMBOL_GPL(rpc_wake_up);
11795 ++
11796 ++/**
11797 ++ * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
11798 ++ * @queue: rpc_wait_queue on which the tasks are sleeping
11799 ++ * @status: status value to set
11800 ++ */
11801 ++static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
11802 ++{
11803 ++ struct rpc_task *task;
11804 ++
11805 + for (;;) {
11806 +- while (!list_empty(head)) {
11807 +- struct rpc_task *task;
11808 +- task = list_first_entry(head,
11809 +- struct rpc_task,
11810 +- u.tk_wait.list);
11811 +- rpc_wake_up_task_queue_locked(queue, task);
11812 +- }
11813 +- if (head == &queue->tasks[0])
11814 ++ task = __rpc_find_next_queued(queue);
11815 ++ if (task == NULL)
11816 + break;
11817 +- head--;
11818 ++ rpc_wake_up_task_queue_set_status_locked(queue, task, status);
11819 + }
11820 +- spin_unlock(&queue->lock);
11821 + }
11822 +-EXPORT_SYMBOL_GPL(rpc_wake_up);
11823 +
11824 + /**
11825 + * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
11826 +@@ -736,23 +756,8 @@ EXPORT_SYMBOL_GPL(rpc_wake_up);
11827 + */
11828 + void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
11829 + {
11830 +- struct list_head *head;
11831 +-
11832 + spin_lock(&queue->lock);
11833 +- head = &queue->tasks[queue->maxpriority];
11834 +- for (;;) {
11835 +- while (!list_empty(head)) {
11836 +- struct rpc_task *task;
11837 +- task = list_first_entry(head,
11838 +- struct rpc_task,
11839 +- u.tk_wait.list);
11840 +- task->tk_status = status;
11841 +- rpc_wake_up_task_queue_locked(queue, task);
11842 +- }
11843 +- if (head == &queue->tasks[0])
11844 +- break;
11845 +- head--;
11846 +- }
11847 ++ rpc_wake_up_status_locked(queue, status);
11848 + spin_unlock(&queue->lock);
11849 + }
11850 + EXPORT_SYMBOL_GPL(rpc_wake_up_status);
11851 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
11852 +index a6fee86f400ec..639837b3a5d90 100644
11853 +--- a/net/sunrpc/xprt.c
11854 ++++ b/net/sunrpc/xprt.c
11855 +@@ -151,31 +151,64 @@ out:
11856 + }
11857 + EXPORT_SYMBOL_GPL(xprt_unregister_transport);
11858 +
11859 ++static void
11860 ++xprt_class_release(const struct xprt_class *t)
11861 ++{
11862 ++ module_put(t->owner);
11863 ++}
11864 ++
11865 ++static const struct xprt_class *
11866 ++xprt_class_find_by_netid_locked(const char *netid)
11867 ++{
11868 ++ const struct xprt_class *t;
11869 ++ unsigned int i;
11870 ++
11871 ++ list_for_each_entry(t, &xprt_list, list) {
11872 ++ for (i = 0; t->netid[i][0] != '\0'; i++) {
11873 ++ if (strcmp(t->netid[i], netid) != 0)
11874 ++ continue;
11875 ++ if (!try_module_get(t->owner))
11876 ++ continue;
11877 ++ return t;
11878 ++ }
11879 ++ }
11880 ++ return NULL;
11881 ++}
11882 ++
11883 ++static const struct xprt_class *
11884 ++xprt_class_find_by_netid(const char *netid)
11885 ++{
11886 ++ const struct xprt_class *t;
11887 ++
11888 ++ spin_lock(&xprt_list_lock);
11889 ++ t = xprt_class_find_by_netid_locked(netid);
11890 ++ if (!t) {
11891 ++ spin_unlock(&xprt_list_lock);
11892 ++ request_module("rpc%s", netid);
11893 ++ spin_lock(&xprt_list_lock);
11894 ++ t = xprt_class_find_by_netid_locked(netid);
11895 ++ }
11896 ++ spin_unlock(&xprt_list_lock);
11897 ++ return t;
11898 ++}
11899 ++
11900 + /**
11901 + * xprt_load_transport - load a transport implementation
11902 +- * @transport_name: transport to load
11903 ++ * @netid: transport to load
11904 + *
11905 + * Returns:
11906 + * 0: transport successfully loaded
11907 + * -ENOENT: transport module not available
11908 + */
11909 +-int xprt_load_transport(const char *transport_name)
11910 ++int xprt_load_transport(const char *netid)
11911 + {
11912 +- struct xprt_class *t;
11913 +- int result;
11914 ++ const struct xprt_class *t;
11915 +
11916 +- result = 0;
11917 +- spin_lock(&xprt_list_lock);
11918 +- list_for_each_entry(t, &xprt_list, list) {
11919 +- if (strcmp(t->name, transport_name) == 0) {
11920 +- spin_unlock(&xprt_list_lock);
11921 +- goto out;
11922 +- }
11923 +- }
11924 +- spin_unlock(&xprt_list_lock);
11925 +- result = request_module("xprt%s", transport_name);
11926 +-out:
11927 +- return result;
11928 ++ t = xprt_class_find_by_netid(netid);
11929 ++ if (!t)
11930 ++ return -ENOENT;
11931 ++ xprt_class_release(t);
11932 ++ return 0;
11933 + }
11934 + EXPORT_SYMBOL_GPL(xprt_load_transport);
11935 +
11936 +diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
11937 +index 620327c01302c..45c5b41ac8dc9 100644
11938 +--- a/net/sunrpc/xprtrdma/module.c
11939 ++++ b/net/sunrpc/xprtrdma/module.c
11940 +@@ -24,6 +24,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport");
11941 + MODULE_LICENSE("Dual BSD/GPL");
11942 + MODULE_ALIAS("svcrdma");
11943 + MODULE_ALIAS("xprtrdma");
11944 ++MODULE_ALIAS("rpcrdma6");
11945 +
11946 + static void __exit rpc_rdma_cleanup(void)
11947 + {
11948 +diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
11949 +index 21970185485fc..c091417bd799e 100644
11950 +--- a/net/sunrpc/xprtrdma/rpc_rdma.c
11951 ++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
11952 +@@ -183,6 +183,31 @@ rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
11953 + r_xprt->rx_ep.rep_max_inline_recv;
11954 + }
11955 +
11956 ++/* ACL likes to be lazy in allocating pages. For TCP, these
11957 ++ * pages can be allocated during receive processing. Not true
11958 ++ * for RDMA, which must always provision receive buffers
11959 ++ * up front.
11960 ++ */
11961 ++static noinline int
11962 ++rpcrdma_alloc_sparse_pages(struct xdr_buf *buf)
11963 ++{
11964 ++ struct page **ppages;
11965 ++ int len;
11966 ++
11967 ++ len = buf->page_len;
11968 ++ ppages = buf->pages + (buf->page_base >> PAGE_SHIFT);
11969 ++ while (len > 0) {
11970 ++ if (!*ppages)
11971 ++ *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
11972 ++ if (!*ppages)
11973 ++ return -ENOBUFS;
11974 ++ ppages++;
11975 ++ len -= PAGE_SIZE;
11976 ++ }
11977 ++
11978 ++ return 0;
11979 ++}
11980 ++
11981 + /* Split @vec on page boundaries into SGEs. FMR registers pages, not
11982 + * a byte range. Other modes coalesce these SGEs into a single MR
11983 + * when they can.
11984 +@@ -237,15 +262,6 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
11985 + ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
11986 + page_base = offset_in_page(xdrbuf->page_base);
11987 + while (len) {
11988 +- /* ACL likes to be lazy in allocating pages - ACLs
11989 +- * are small by default but can get huge.
11990 +- */
11991 +- if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
11992 +- if (!*ppages)
11993 +- *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
11994 +- if (!*ppages)
11995 +- return -ENOBUFS;
11996 +- }
11997 + seg->mr_page = *ppages;
11998 + seg->mr_offset = (char *)page_base;
11999 + seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
12000 +@@ -800,6 +816,12 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
12001 + __be32 *p;
12002 + int ret;
12003 +
12004 ++ if (unlikely(rqst->rq_rcv_buf.flags & XDRBUF_SPARSE_PAGES)) {
12005 ++ ret = rpcrdma_alloc_sparse_pages(&rqst->rq_rcv_buf);
12006 ++ if (ret)
12007 ++ return ret;
12008 ++ }
12009 ++
12010 + rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
12011 + xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
12012 + rqst);
12013 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
12014 +index c67d465dc0620..2f21e3c52bfc1 100644
12015 +--- a/net/sunrpc/xprtrdma/transport.c
12016 ++++ b/net/sunrpc/xprtrdma/transport.c
12017 +@@ -827,6 +827,7 @@ static struct xprt_class xprt_rdma = {
12018 + .owner = THIS_MODULE,
12019 + .ident = XPRT_TRANSPORT_RDMA,
12020 + .setup = xprt_setup_rdma,
12021 ++ .netid = { "rdma", "rdma6", "" },
12022 + };
12023 +
12024 + void xprt_rdma_cleanup(void)
12025 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
12026 +index 934e30e675375..8ffc54b6661f8 100644
12027 +--- a/net/sunrpc/xprtsock.c
12028 ++++ b/net/sunrpc/xprtsock.c
12029 +@@ -432,7 +432,8 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
12030 + if (ret <= 0)
12031 + goto sock_err;
12032 + xs_flush_bvec(buf->bvec, ret, seek + buf->page_base);
12033 +- offset += ret - buf->page_base;
12034 ++ ret -= buf->page_base;
12035 ++ offset += ret;
12036 + if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
12037 + goto out;
12038 + if (ret != want)
12039 +@@ -3204,6 +3205,7 @@ static struct xprt_class xs_local_transport = {
12040 + .owner = THIS_MODULE,
12041 + .ident = XPRT_TRANSPORT_LOCAL,
12042 + .setup = xs_setup_local,
12043 ++ .netid = { "" },
12044 + };
12045 +
12046 + static struct xprt_class xs_udp_transport = {
12047 +@@ -3212,6 +3214,7 @@ static struct xprt_class xs_udp_transport = {
12048 + .owner = THIS_MODULE,
12049 + .ident = XPRT_TRANSPORT_UDP,
12050 + .setup = xs_setup_udp,
12051 ++ .netid = { "udp", "udp6", "" },
12052 + };
12053 +
12054 + static struct xprt_class xs_tcp_transport = {
12055 +@@ -3220,6 +3223,7 @@ static struct xprt_class xs_tcp_transport = {
12056 + .owner = THIS_MODULE,
12057 + .ident = XPRT_TRANSPORT_TCP,
12058 + .setup = xs_setup_tcp,
12059 ++ .netid = { "tcp", "tcp6", "" },
12060 + };
12061 +
12062 + static struct xprt_class xs_bc_tcp_transport = {
12063 +@@ -3228,6 +3232,7 @@ static struct xprt_class xs_bc_tcp_transport = {
12064 + .owner = THIS_MODULE,
12065 + .ident = XPRT_TRANSPORT_BC_TCP,
12066 + .setup = xs_setup_bc_tcp,
12067 ++ .netid = { "" },
12068 + };
12069 +
12070 + /**
12071 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
12072 +index dbac5c0995a0f..5bb2316befb98 100644
12073 +--- a/net/wireless/nl80211.c
12074 ++++ b/net/wireless/nl80211.c
12075 +@@ -12033,7 +12033,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
12076 + struct net_device *dev = info->user_ptr[1];
12077 + struct wireless_dev *wdev = dev->ieee80211_ptr;
12078 + struct nlattr *tb[NUM_NL80211_REKEY_DATA];
12079 +- struct cfg80211_gtk_rekey_data rekey_data;
12080 ++ struct cfg80211_gtk_rekey_data rekey_data = {};
12081 + int err;
12082 +
12083 + if (!info->attrs[NL80211_ATTR_REKEY_DATA])
12084 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
12085 +index f9eb5efb237c7..2bc0d6e3e124c 100644
12086 +--- a/net/xdp/xsk.c
12087 ++++ b/net/xdp/xsk.c
12088 +@@ -426,14 +426,16 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
12089 + return __xsk_sendmsg(sk);
12090 + }
12091 +
12092 +-static unsigned int xsk_poll(struct file *file, struct socket *sock,
12093 ++static __poll_t xsk_poll(struct file *file, struct socket *sock,
12094 + struct poll_table_struct *wait)
12095 + {
12096 +- unsigned int mask = datagram_poll(file, sock, wait);
12097 ++ __poll_t mask = 0;
12098 + struct sock *sk = sock->sk;
12099 + struct xdp_sock *xs = xdp_sk(sk);
12100 + struct xdp_umem *umem;
12101 +
12102 ++ sock_poll_wait(file, sock, wait);
12103 ++
12104 + if (unlikely(!xsk_is_bound(xs)))
12105 + return mask;
12106 +
12107 +@@ -448,9 +450,9 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock,
12108 + }
12109 +
12110 + if (xs->rx && !xskq_empty_desc(xs->rx))
12111 +- mask |= POLLIN | POLLRDNORM;
12112 ++ mask |= EPOLLIN | EPOLLRDNORM;
12113 + if (xs->tx && !xskq_full_desc(xs->tx))
12114 +- mask |= POLLOUT | POLLWRNORM;
12115 ++ mask |= EPOLLOUT | EPOLLWRNORM;
12116 +
12117 + return mask;
12118 + }
12119 +diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
12120 +old mode 100644
12121 +new mode 100755
12122 +index 090b96eaf7f76..0eda9754f50b8
12123 +--- a/samples/bpf/lwt_len_hist.sh
12124 ++++ b/samples/bpf/lwt_len_hist.sh
12125 +@@ -8,6 +8,8 @@ VETH1=tst_lwt1b
12126 + TRACE_ROOT=/sys/kernel/debug/tracing
12127 +
12128 + function cleanup {
12129 ++ # To reset saved histogram, remove pinned map
12130 ++ rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
12131 + ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
12132 + ip link del $VETH0 2> /dev/null
12133 + ip link del $VETH1 2> /dev/null
12134 +diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
12135 +old mode 100644
12136 +new mode 100755
12137 +diff --git a/scripts/Makefile.build b/scripts/Makefile.build
12138 +index 24a33c01bbf7c..9c689d011bced 100644
12139 +--- a/scripts/Makefile.build
12140 ++++ b/scripts/Makefile.build
12141 +@@ -234,6 +234,9 @@ objtool_dep = $(objtool_obj) \
12142 + ifdef CONFIG_TRIM_UNUSED_KSYMS
12143 + cmd_gen_ksymdeps = \
12144 + $(CONFIG_SHELL) $(srctree)/scripts/gen_ksymdeps.sh $@ >> $(dot-target).cmd
12145 ++
12146 ++# List module undefined symbols
12147 ++undefined_syms = $(NM) $< | $(AWK) '$$1 == "U" { printf("%s%s", x++ ? " " : "", $$2) }';
12148 + endif
12149 +
12150 + define rule_cc_o_c
12151 +@@ -253,13 +256,6 @@ define rule_as_o_S
12152 + $(call cmd,modversions_S)
12153 + endef
12154 +
12155 +-# List module undefined symbols (or empty line if not enabled)
12156 +-ifdef CONFIG_TRIM_UNUSED_KSYMS
12157 +-cmd_undef_syms = $(NM) $< | sed -n 's/^ *U //p' | xargs echo
12158 +-else
12159 +-cmd_undef_syms = echo
12160 +-endif
12161 +-
12162 + # Built-in and composite module parts
12163 + $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
12164 + $(call cmd,force_checksrc)
12165 +@@ -267,7 +263,7 @@ $(obj)/%.o: $(src)/%.c $(recordmcount_source) $(objtool_dep) FORCE
12166 +
12167 + cmd_mod = { \
12168 + echo $(if $($*-objs)$($*-y)$($*-m), $(addprefix $(obj)/, $($*-objs) $($*-y) $($*-m)), $(@:.mod=.o)); \
12169 +- $(cmd_undef_syms); \
12170 ++ $(undefined_syms) echo; \
12171 + } > $@
12172 +
12173 + $(obj)/%.mod: $(obj)/%.o FORCE
12174 +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
12175 +index 0c9b114202796..a358af93cd7fc 100755
12176 +--- a/scripts/checkpatch.pl
12177 ++++ b/scripts/checkpatch.pl
12178 +@@ -4150,7 +4150,7 @@ sub process {
12179 + $fix) {
12180 + fix_delete_line($fixlinenr, $rawline);
12181 + my $fixed_line = $rawline;
12182 +- $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/;
12183 ++ $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/;
12184 + my $line1 = $1;
12185 + my $line2 = $2;
12186 + fix_insert_line($fixlinenr, ltrim($line1));
12187 +diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
12188 +index 0243086fb1685..0590f86df6e40 100644
12189 +--- a/scripts/kconfig/preprocess.c
12190 ++++ b/scripts/kconfig/preprocess.c
12191 +@@ -114,7 +114,7 @@ static char *do_error_if(int argc, char *argv[])
12192 + if (!strcmp(argv[0], "y"))
12193 + pperror("%s", argv[1]);
12194 +
12195 +- return NULL;
12196 ++ return xstrdup("");
12197 + }
12198 +
12199 + static char *do_filename(int argc, char *argv[])
12200 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
12201 +index b06baf5d3cd32..e15f8d37d1f28 100644
12202 +--- a/security/integrity/ima/ima_crypto.c
12203 ++++ b/security/integrity/ima/ima_crypto.c
12204 +@@ -411,7 +411,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
12205 + loff_t i_size;
12206 + int rc;
12207 + struct file *f = file;
12208 +- bool new_file_instance = false, modified_mode = false;
12209 ++ bool new_file_instance = false;
12210 +
12211 + /*
12212 + * For consistency, fail file's opened with the O_DIRECT flag on
12213 +@@ -429,18 +429,10 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
12214 + O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
12215 + flags |= O_RDONLY;
12216 + f = dentry_open(&file->f_path, flags, file->f_cred);
12217 +- if (IS_ERR(f)) {
12218 +- /*
12219 +- * Cannot open the file again, lets modify f_mode
12220 +- * of original and continue
12221 +- */
12222 +- pr_info_ratelimited("Unable to reopen file for reading.\n");
12223 +- f = file;
12224 +- f->f_mode |= FMODE_READ;
12225 +- modified_mode = true;
12226 +- } else {
12227 +- new_file_instance = true;
12228 +- }
12229 ++ if (IS_ERR(f))
12230 ++ return PTR_ERR(f);
12231 ++
12232 ++ new_file_instance = true;
12233 + }
12234 +
12235 + i_size = i_size_read(file_inode(f));
12236 +@@ -455,8 +447,6 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
12237 + out:
12238 + if (new_file_instance)
12239 + fput(f);
12240 +- else if (modified_mode)
12241 +- f->f_mode &= ~FMODE_READ;
12242 + return rc;
12243 + }
12244 +
12245 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
12246 +index 212f48025db81..717a398ef4d05 100644
12247 +--- a/security/selinux/hooks.c
12248 ++++ b/security/selinux/hooks.c
12249 +@@ -1499,7 +1499,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
12250 + * inode_doinit with a dentry, before these inodes could
12251 + * be used again by userspace.
12252 + */
12253 +- goto out;
12254 ++ goto out_invalid;
12255 + }
12256 +
12257 + rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid,
12258 +@@ -1554,7 +1554,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
12259 + * could be used again by userspace.
12260 + */
12261 + if (!dentry)
12262 +- goto out;
12263 ++ goto out_invalid;
12264 + rc = selinux_genfs_get_sid(dentry, sclass,
12265 + sbsec->flags, &sid);
12266 + if (rc) {
12267 +@@ -1579,11 +1579,10 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
12268 + out:
12269 + spin_lock(&isec->lock);
12270 + if (isec->initialized == LABEL_PENDING) {
12271 +- if (!sid || rc) {
12272 ++ if (rc) {
12273 + isec->initialized = LABEL_INVALID;
12274 + goto out_unlock;
12275 + }
12276 +-
12277 + isec->initialized = LABEL_INITIALIZED;
12278 + isec->sid = sid;
12279 + }
12280 +@@ -1591,6 +1590,15 @@ out:
12281 + out_unlock:
12282 + spin_unlock(&isec->lock);
12283 + return rc;
12284 ++
12285 ++out_invalid:
12286 ++ spin_lock(&isec->lock);
12287 ++ if (isec->initialized == LABEL_PENDING) {
12288 ++ isec->initialized = LABEL_INVALID;
12289 ++ isec->sid = sid;
12290 ++ }
12291 ++ spin_unlock(&isec->lock);
12292 ++ return 0;
12293 + }
12294 +
12295 + /* Convert a Linux signal to an access vector. */
12296 +diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
12297 +index 6850d13aa98c5..fe1ea03582cbb 100644
12298 +--- a/sound/core/memalloc.c
12299 ++++ b/sound/core/memalloc.c
12300 +@@ -76,7 +76,8 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
12301 + /* Assign the pool into private_data field */
12302 + dmab->private_data = pool;
12303 +
12304 +- dmab->area = gen_pool_dma_alloc(pool, size, &dmab->addr);
12305 ++ dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
12306 ++ PAGE_SIZE);
12307 + }
12308 +
12309 + /**
12310 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
12311 +index 46004e329a24a..0b03777d01116 100644
12312 +--- a/sound/core/oss/pcm_oss.c
12313 ++++ b/sound/core/oss/pcm_oss.c
12314 +@@ -693,6 +693,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
12315 +
12316 + oss_buffer_size = snd_pcm_plug_client_size(substream,
12317 + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
12318 ++ if (!oss_buffer_size)
12319 ++ return -EINVAL;
12320 + oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
12321 + if (atomic_read(&substream->mmap_count)) {
12322 + if (oss_buffer_size > runtime->oss.mmap_bytes)
12323 +@@ -728,17 +730,21 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
12324 +
12325 + min_period_size = snd_pcm_plug_client_size(substream,
12326 + snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
12327 +- min_period_size *= oss_frame_size;
12328 +- min_period_size = roundup_pow_of_two(min_period_size);
12329 +- if (oss_period_size < min_period_size)
12330 +- oss_period_size = min_period_size;
12331 ++ if (min_period_size) {
12332 ++ min_period_size *= oss_frame_size;
12333 ++ min_period_size = roundup_pow_of_two(min_period_size);
12334 ++ if (oss_period_size < min_period_size)
12335 ++ oss_period_size = min_period_size;
12336 ++ }
12337 +
12338 + max_period_size = snd_pcm_plug_client_size(substream,
12339 + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
12340 +- max_period_size *= oss_frame_size;
12341 +- max_period_size = rounddown_pow_of_two(max_period_size);
12342 +- if (oss_period_size > max_period_size)
12343 +- oss_period_size = max_period_size;
12344 ++ if (max_period_size) {
12345 ++ max_period_size *= oss_frame_size;
12346 ++ max_period_size = rounddown_pow_of_two(max_period_size);
12347 ++ if (oss_period_size > max_period_size)
12348 ++ oss_period_size = max_period_size;
12349 ++ }
12350 +
12351 + oss_periods = oss_buffer_size / oss_period_size;
12352 +
12353 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
12354 +index 6da296def283e..326f95ce5ceb1 100644
12355 +--- a/sound/pci/hda/hda_codec.c
12356 ++++ b/sound/pci/hda/hda_codec.c
12357 +@@ -1798,7 +1798,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
12358 + return -EBUSY;
12359 +
12360 + /* OK, let it free */
12361 +- snd_hdac_device_unregister(&codec->core);
12362 ++ device_release_driver(hda_codec_dev(codec));
12363 +
12364 + /* allow device access again */
12365 + snd_hda_unlock_devices(bus);
12366 +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
12367 +index 6dbe99131bc4b..91b4a29a8c366 100644
12368 +--- a/sound/pci/hda/hda_sysfs.c
12369 ++++ b/sound/pci/hda/hda_sysfs.c
12370 +@@ -139,7 +139,7 @@ static int reconfig_codec(struct hda_codec *codec)
12371 + "The codec is being used, can't reconfigure.\n");
12372 + goto error;
12373 + }
12374 +- err = snd_hda_codec_configure(codec);
12375 ++ err = device_reprobe(hda_codec_dev(codec));
12376 + if (err < 0)
12377 + goto error;
12378 + err = snd_card_register(codec->card);
12379 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
12380 +index 459aff6c10bc5..d7b2aae6d4289 100644
12381 +--- a/sound/pci/hda/patch_ca0132.c
12382 ++++ b/sound/pci/hda/patch_ca0132.c
12383 +@@ -93,7 +93,7 @@ enum {
12384 + };
12385 +
12386 + /* Strings for Input Source Enum Control */
12387 +-static const char *const in_src_str[3] = {"Rear Mic", "Line", "Front Mic" };
12388 ++static const char *const in_src_str[3] = { "Microphone", "Line In", "Front Microphone" };
12389 + #define IN_SRC_NUM_OF_INPUTS 3
12390 + enum {
12391 + REAR_MIC,
12392 +@@ -1147,7 +1147,7 @@ static const struct hda_pintbl ae5_pincfgs[] = {
12393 + { 0x0e, 0x01c510f0 }, /* SPDIF In */
12394 + { 0x0f, 0x01017114 }, /* Port A -- Rear L/R. */
12395 + { 0x10, 0x01017012 }, /* Port D -- Center/LFE or FP Hp */
12396 +- { 0x11, 0x01a170ff }, /* Port B -- LineMicIn2 / Rear Headphone */
12397 ++ { 0x11, 0x012170ff }, /* Port B -- LineMicIn2 / Rear Headphone */
12398 + { 0x12, 0x01a170f0 }, /* Port C -- LineIn1 */
12399 + { 0x13, 0x908700f0 }, /* What U Hear In*/
12400 + { 0x18, 0x50d000f0 }, /* N/A */
12401 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
12402 +index c804c15debc69..ec0938923f5de 100644
12403 +--- a/sound/pci/hda/patch_realtek.c
12404 ++++ b/sound/pci/hda/patch_realtek.c
12405 +@@ -2506,6 +2506,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
12406 + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
12407 + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
12408 + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
12409 ++ SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
12410 + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
12411 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
12412 + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
12413 +@@ -3094,6 +3095,7 @@ static void alc_disable_headset_jack_key(struct hda_codec *codec)
12414 + case 0x10ec0215:
12415 + case 0x10ec0225:
12416 + case 0x10ec0285:
12417 ++ case 0x10ec0287:
12418 + case 0x10ec0295:
12419 + case 0x10ec0289:
12420 + case 0x10ec0299:
12421 +@@ -3120,6 +3122,7 @@ static void alc_enable_headset_jack_key(struct hda_codec *codec)
12422 + case 0x10ec0215:
12423 + case 0x10ec0225:
12424 + case 0x10ec0285:
12425 ++ case 0x10ec0287:
12426 + case 0x10ec0295:
12427 + case 0x10ec0289:
12428 + case 0x10ec0299:
12429 +@@ -7733,11 +7736,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12430 + SND_PCI_QUIRK(0x1025, 0x0762, "Acer Aspire E1-472", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
12431 + SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
12432 + SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
12433 ++ SND_PCI_QUIRK(0x1025, 0x101c, "Acer Veriton N2510G", ALC269_FIXUP_LIFEBOOK),
12434 + SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
12435 + SND_PCI_QUIRK(0x1025, 0x1065, "Acer Aspire C20-820", ALC269VC_FIXUP_ACER_HEADSET_MIC),
12436 + SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
12437 + SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
12438 + SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
12439 ++ SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
12440 ++ SND_PCI_QUIRK(0x1025, 0x1167, "Acer Veriton N6640G", ALC269_FIXUP_LIFEBOOK),
12441 + SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
12442 + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
12443 + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
12444 +@@ -7882,6 +7888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12445 + SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
12446 + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
12447 + SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
12448 ++ SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
12449 + SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
12450 + SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
12451 + SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
12452 +@@ -7902,6 +7909,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12453 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
12454 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
12455 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
12456 ++ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
12457 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
12458 + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
12459 + SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
12460 +@@ -7939,6 +7947,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
12461 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
12462 + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
12463 + SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
12464 ++ SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
12465 + SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
12466 + SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
12467 + SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
12468 +@@ -8521,11 +8530,20 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
12469 + {0x12, 0x90a60130},
12470 + {0x19, 0x03a11020},
12471 + {0x21, 0x0321101f}),
12472 ++ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK,
12473 ++ {0x14, 0x90170110},
12474 ++ {0x19, 0x04a11040},
12475 ++ {0x21, 0x04211020}),
12476 + SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
12477 + {0x12, 0x90a60130},
12478 + {0x14, 0x90170110},
12479 + {0x19, 0x04a11040},
12480 + {0x21, 0x04211020}),
12481 ++ SND_HDA_PIN_QUIRK(0x10ec0287, 0x17aa, "Lenovo", ALC285_FIXUP_THINKPAD_HEADSET_JACK,
12482 ++ {0x14, 0x90170110},
12483 ++ {0x17, 0x90170111},
12484 ++ {0x19, 0x03a11030},
12485 ++ {0x21, 0x03211020}),
12486 + SND_HDA_PIN_QUIRK(0x10ec0286, 0x1025, "Acer", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE,
12487 + {0x12, 0x90a60130},
12488 + {0x17, 0x90170110},
12489 +diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
12490 +index f4ee6798154af..1612ec65aaf66 100644
12491 +--- a/sound/soc/amd/acp-da7219-max98357a.c
12492 ++++ b/sound/soc/amd/acp-da7219-max98357a.c
12493 +@@ -73,8 +73,13 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
12494 + return ret;
12495 + }
12496 +
12497 +- da7219_dai_wclk = clk_get(component->dev, "da7219-dai-wclk");
12498 +- da7219_dai_bclk = clk_get(component->dev, "da7219-dai-bclk");
12499 ++ da7219_dai_wclk = devm_clk_get(component->dev, "da7219-dai-wclk");
12500 ++ if (IS_ERR(da7219_dai_wclk))
12501 ++ return PTR_ERR(da7219_dai_wclk);
12502 ++
12503 ++ da7219_dai_bclk = devm_clk_get(component->dev, "da7219-dai-bclk");
12504 ++ if (IS_ERR(da7219_dai_bclk))
12505 ++ return PTR_ERR(da7219_dai_bclk);
12506 +
12507 + ret = snd_soc_card_jack_new(card, "Headset Jack",
12508 + SND_JACK_HEADSET | SND_JACK_LINEOUT |
12509 +diff --git a/sound/soc/codecs/cx2072x.c b/sound/soc/codecs/cx2072x.c
12510 +index 1c1ba7bea4d81..8ee4b2e1ff68f 100644
12511 +--- a/sound/soc/codecs/cx2072x.c
12512 ++++ b/sound/soc/codecs/cx2072x.c
12513 +@@ -1579,7 +1579,7 @@ static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = {
12514 + .id = CX2072X_DAI_DSP,
12515 + .probe = cx2072x_dsp_dai_probe,
12516 + .playback = {
12517 +- .stream_name = "Playback",
12518 ++ .stream_name = "DSP Playback",
12519 + .channels_min = 2,
12520 + .channels_max = 2,
12521 + .rates = CX2072X_RATES_DSP,
12522 +@@ -1591,7 +1591,7 @@ static struct snd_soc_dai_driver soc_codec_cx2072x_dai[] = {
12523 + .name = "cx2072x-aec",
12524 + .id = 3,
12525 + .capture = {
12526 +- .stream_name = "Capture",
12527 ++ .stream_name = "AEC Capture",
12528 + .channels_min = 2,
12529 + .channels_max = 2,
12530 + .rates = CX2072X_RATES_DSP,
12531 +diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
12532 +index 37e4bb3dbd8a9..229f2986cd96b 100644
12533 +--- a/sound/soc/codecs/wm8997.c
12534 ++++ b/sound/soc/codecs/wm8997.c
12535 +@@ -1177,6 +1177,8 @@ static int wm8997_probe(struct platform_device *pdev)
12536 + goto err_spk_irqs;
12537 + }
12538 +
12539 ++ return ret;
12540 ++
12541 + err_spk_irqs:
12542 + arizona_free_spk_irqs(arizona);
12543 +
12544 +diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
12545 +index 7c18992195733..817ccddd63448 100644
12546 +--- a/sound/soc/codecs/wm8998.c
12547 ++++ b/sound/soc/codecs/wm8998.c
12548 +@@ -1375,7 +1375,7 @@ static int wm8998_probe(struct platform_device *pdev)
12549 +
12550 + ret = arizona_init_spk_irqs(arizona);
12551 + if (ret < 0)
12552 +- return ret;
12553 ++ goto err_pm_disable;
12554 +
12555 + ret = devm_snd_soc_register_component(&pdev->dev,
12556 + &soc_component_dev_wm8998,
12557 +@@ -1390,6 +1390,8 @@ static int wm8998_probe(struct platform_device *pdev)
12558 +
12559 + err_spk_irqs:
12560 + arizona_free_spk_irqs(arizona);
12561 ++err_pm_disable:
12562 ++ pm_runtime_disable(&pdev->dev);
12563 +
12564 + return ret;
12565 + }
12566 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
12567 +index 4c56b782500db..13672928da997 100644
12568 +--- a/sound/soc/codecs/wm_adsp.c
12569 ++++ b/sound/soc/codecs/wm_adsp.c
12570 +@@ -1496,7 +1496,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
12571 + ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL);
12572 + if (!ctl_work) {
12573 + ret = -ENOMEM;
12574 +- goto err_ctl_cache;
12575 ++ goto err_list_del;
12576 + }
12577 +
12578 + ctl_work->dsp = dsp;
12579 +@@ -1506,7 +1506,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
12580 +
12581 + return 0;
12582 +
12583 +-err_ctl_cache:
12584 ++err_list_del:
12585 ++ list_del(&ctl->list);
12586 + kfree(ctl->cache);
12587 + err_ctl_name:
12588 + kfree(ctl->name);
12589 +diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
12590 +index 0bbd86390be59..9bfd2aabbfe63 100644
12591 +--- a/sound/soc/jz4740/jz4740-i2s.c
12592 ++++ b/sound/soc/jz4740/jz4740-i2s.c
12593 +@@ -309,10 +309,14 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
12594 + switch (clk_id) {
12595 + case JZ4740_I2S_CLKSRC_EXT:
12596 + parent = clk_get(NULL, "ext");
12597 ++ if (IS_ERR(parent))
12598 ++ return PTR_ERR(parent);
12599 + clk_set_parent(i2s->clk_i2s, parent);
12600 + break;
12601 + case JZ4740_I2S_CLKSRC_PLL:
12602 + parent = clk_get(NULL, "pll half");
12603 ++ if (IS_ERR(parent))
12604 ++ return PTR_ERR(parent);
12605 + clk_set_parent(i2s->clk_i2s, parent);
12606 + ret = clk_set_rate(i2s->clk_i2s, freq);
12607 + break;
12608 +diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
12609 +index 2e3676147ceaf..e0d24592ebd70 100644
12610 +--- a/sound/soc/meson/Kconfig
12611 ++++ b/sound/soc/meson/Kconfig
12612 +@@ -1,6 +1,6 @@
12613 + # SPDX-License-Identifier: GPL-2.0-only
12614 + menu "ASoC support for Amlogic platforms"
12615 +- depends on ARCH_MESON || COMPILE_TEST
12616 ++ depends on ARCH_MESON || (COMPILE_TEST && COMMON_CLK)
12617 +
12618 + config SND_MESON_AXG_FIFO
12619 + tristate
12620 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
12621 +index cc4e9aa80fb0d..1196167364d48 100644
12622 +--- a/sound/soc/soc-pcm.c
12623 ++++ b/sound/soc/soc-pcm.c
12624 +@@ -2346,6 +2346,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
12625 + case SNDRV_PCM_TRIGGER_START:
12626 + case SNDRV_PCM_TRIGGER_RESUME:
12627 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
12628 ++ case SNDRV_PCM_TRIGGER_DRAIN:
12629 + ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
12630 + break;
12631 + case SNDRV_PCM_TRIGGER_STOP:
12632 +@@ -2363,6 +2364,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
12633 + case SNDRV_PCM_TRIGGER_START:
12634 + case SNDRV_PCM_TRIGGER_RESUME:
12635 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
12636 ++ case SNDRV_PCM_TRIGGER_DRAIN:
12637 + ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
12638 + break;
12639 + case SNDRV_PCM_TRIGGER_STOP:
12640 +diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
12641 +index d0a8d5810c0a5..9655dec4166b6 100644
12642 +--- a/sound/soc/sunxi/sun4i-i2s.c
12643 ++++ b/sound/soc/sunxi/sun4i-i2s.c
12644 +@@ -442,11 +442,11 @@ static int sun8i_i2s_set_chan_cfg(const struct sun4i_i2s *i2s,
12645 + switch (i2s->format & SND_SOC_DAIFMT_FORMAT_MASK) {
12646 + case SND_SOC_DAIFMT_DSP_A:
12647 + case SND_SOC_DAIFMT_DSP_B:
12648 +- case SND_SOC_DAIFMT_LEFT_J:
12649 +- case SND_SOC_DAIFMT_RIGHT_J:
12650 + lrck_period = params_physical_width(params) * slots;
12651 + break;
12652 +
12653 ++ case SND_SOC_DAIFMT_LEFT_J:
12654 ++ case SND_SOC_DAIFMT_RIGHT_J:
12655 + case SND_SOC_DAIFMT_I2S:
12656 + lrck_period = params_physical_width(params);
12657 + break;
12658 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
12659 +index b118cf97607f3..385a488c25cb0 100644
12660 +--- a/sound/usb/clock.c
12661 ++++ b/sound/usb/clock.c
12662 +@@ -531,6 +531,12 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
12663 + }
12664 +
12665 + crate = data[0] | (data[1] << 8) | (data[2] << 16);
12666 ++ if (!crate) {
12667 ++ dev_info(&dev->dev, "failed to read current rate; disabling the check\n");
12668 ++ chip->sample_rate_read_error = 3; /* three strikes, see above */
12669 ++ return 0;
12670 ++ }
12671 ++
12672 + if (crate != rate) {
12673 + dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate);
12674 + // runtime->rate = crate;
12675 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
12676 +index 931964716228e..a7f31766d14df 100644
12677 +--- a/sound/usb/quirks.c
12678 ++++ b/sound/usb/quirks.c
12679 +@@ -1731,6 +1731,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
12680 + case 0x25ce: /* Mytek devices */
12681 + case 0x278b: /* Rotel? */
12682 + case 0x292b: /* Gustard/Ess based devices */
12683 ++ case 0x2972: /* FiiO devices */
12684 + case 0x2ab6: /* T+A devices */
12685 + case 0x3353: /* Khadas devices */
12686 + case 0x3842: /* EVGA */
12687 +diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
12688 +index ef46c28488085..869ef7e22bd91 100644
12689 +--- a/tools/perf/util/parse-regs-options.c
12690 ++++ b/tools/perf/util/parse-regs-options.c
12691 +@@ -52,7 +52,7 @@ __parse_regs(const struct option *opt, const char *str, int unset, bool intr)
12692 + }
12693 + fputc('\n', stderr);
12694 + /* just printing available regs */
12695 +- return -1;
12696 ++ goto error;
12697 + }
12698 + for (r = sample_reg_masks; r->name; r++) {
12699 + if ((r->mask & mask) && !strcasecmp(s, r->name))
12700 +diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
12701 +index bf50f464234fe..f778f8e7e65a3 100644
12702 +--- a/tools/perf/util/probe-file.c
12703 ++++ b/tools/perf/util/probe-file.c
12704 +@@ -777,7 +777,7 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note,
12705 + const char *sdtgrp)
12706 + {
12707 + struct strbuf buf;
12708 +- char *ret = NULL, **args;
12709 ++ char *ret = NULL;
12710 + int i, args_count, err;
12711 + unsigned long long ref_ctr_offset;
12712 +
12713 +@@ -799,12 +799,19 @@ static char *synthesize_sdt_probe_command(struct sdt_note *note,
12714 + goto out;
12715 +
12716 + if (note->args) {
12717 +- args = argv_split(note->args, &args_count);
12718 ++ char **args = argv_split(note->args, &args_count);
12719 ++
12720 ++ if (args == NULL)
12721 ++ goto error;
12722 +
12723 + for (i = 0; i < args_count; ++i) {
12724 +- if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0)
12725 ++ if (synthesize_sdt_probe_arg(&buf, i, args[i]) < 0) {
12726 ++ argv_free(args);
12727 + goto error;
12728 ++ }
12729 + }
12730 ++
12731 ++ argv_free(args);
12732 + }
12733 +
12734 + out:
12735 +diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
12736 +index 6889c19a628c2..544bd1028baee 100644
12737 +--- a/tools/testing/selftests/bpf/Makefile
12738 ++++ b/tools/testing/selftests/bpf/Makefile
12739 +@@ -144,7 +144,8 @@ endif
12740 + # build would have failed anyways.
12741 + define get_sys_includes
12742 + $(shell $(1) -v -E - </dev/null 2>&1 \
12743 +- | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }')
12744 ++ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \
12745 ++$(shell $(1) -dM -E - </dev/null | grep '#define __riscv_xlen ' | sed 's/#define /-D/' | sed 's/ /=/')
12746 + endef
12747 + CLANG_SYS_INCLUDES = $(call get_sys_includes,$(CLANG))
12748 + BPF_CFLAGS = -I. -I./include/uapi -I../../../include/uapi \
12749 +diff --git a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
12750 +index 504df69c83df4..0f98724120deb 100644
12751 +--- a/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
12752 ++++ b/tools/testing/selftests/bpf/progs/test_tunnel_kern.c
12753 +@@ -15,7 +15,6 @@
12754 + #include <linux/ip.h>
12755 + #include <linux/ipv6.h>
12756 + #include <linux/types.h>
12757 +-#include <linux/tcp.h>
12758 + #include <linux/socket.h>
12759 + #include <linux/pkt_cls.h>
12760 + #include <linux/erspan.h>
12761 +@@ -528,12 +527,11 @@ int _ipip_set_tunnel(struct __sk_buff *skb)
12762 + struct bpf_tunnel_key key = {};
12763 + void *data = (void *)(long)skb->data;
12764 + struct iphdr *iph = data;
12765 +- struct tcphdr *tcp = data + sizeof(*iph);
12766 + void *data_end = (void *)(long)skb->data_end;
12767 + int ret;
12768 +
12769 + /* single length check */
12770 +- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
12771 ++ if (data + sizeof(*iph) > data_end) {
12772 + ERROR(1);
12773 + return TC_ACT_SHOT;
12774 + }
12775 +@@ -541,16 +539,6 @@ int _ipip_set_tunnel(struct __sk_buff *skb)
12776 + key.tunnel_ttl = 64;
12777 + if (iph->protocol == IPPROTO_ICMP) {
12778 + key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
12779 +- } else {
12780 +- if (iph->protocol != IPPROTO_TCP || iph->ihl != 5)
12781 +- return TC_ACT_SHOT;
12782 +-
12783 +- if (tcp->dest == bpf_htons(5200))
12784 +- key.remote_ipv4 = 0xac100164; /* 172.16.1.100 */
12785 +- else if (tcp->dest == bpf_htons(5201))
12786 +- key.remote_ipv4 = 0xac100165; /* 172.16.1.101 */
12787 +- else
12788 +- return TC_ACT_SHOT;
12789 + }
12790 +
12791 + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0);
12792 +@@ -585,19 +573,20 @@ int _ipip6_set_tunnel(struct __sk_buff *skb)
12793 + struct bpf_tunnel_key key = {};
12794 + void *data = (void *)(long)skb->data;
12795 + struct iphdr *iph = data;
12796 +- struct tcphdr *tcp = data + sizeof(*iph);
12797 + void *data_end = (void *)(long)skb->data_end;
12798 + int ret;
12799 +
12800 + /* single length check */
12801 +- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
12802 ++ if (data + sizeof(*iph) > data_end) {
12803 + ERROR(1);
12804 + return TC_ACT_SHOT;
12805 + }
12806 +
12807 + __builtin_memset(&key, 0x0, sizeof(key));
12808 +- key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
12809 + key.tunnel_ttl = 64;
12810 ++ if (iph->protocol == IPPROTO_ICMP) {
12811 ++ key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
12812 ++ }
12813 +
12814 + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
12815 + BPF_F_TUNINFO_IPV6);
12816 +@@ -634,35 +623,18 @@ int _ip6ip6_set_tunnel(struct __sk_buff *skb)
12817 + struct bpf_tunnel_key key = {};
12818 + void *data = (void *)(long)skb->data;
12819 + struct ipv6hdr *iph = data;
12820 +- struct tcphdr *tcp = data + sizeof(*iph);
12821 + void *data_end = (void *)(long)skb->data_end;
12822 + int ret;
12823 +
12824 + /* single length check */
12825 +- if (data + sizeof(*iph) + sizeof(*tcp) > data_end) {
12826 ++ if (data + sizeof(*iph) > data_end) {
12827 + ERROR(1);
12828 + return TC_ACT_SHOT;
12829 + }
12830 +
12831 +- key.remote_ipv6[0] = bpf_htonl(0x2401db00);
12832 + key.tunnel_ttl = 64;
12833 +-
12834 + if (iph->nexthdr == 58 /* NEXTHDR_ICMP */) {
12835 +- key.remote_ipv6[3] = bpf_htonl(1);
12836 +- } else {
12837 +- if (iph->nexthdr != 6 /* NEXTHDR_TCP */) {
12838 +- ERROR(iph->nexthdr);
12839 +- return TC_ACT_SHOT;
12840 +- }
12841 +-
12842 +- if (tcp->dest == bpf_htons(5200)) {
12843 +- key.remote_ipv6[3] = bpf_htonl(1);
12844 +- } else if (tcp->dest == bpf_htons(5201)) {
12845 +- key.remote_ipv6[3] = bpf_htonl(2);
12846 +- } else {
12847 +- ERROR(tcp->dest);
12848 +- return TC_ACT_SHOT;
12849 +- }
12850 ++ key.remote_ipv6[3] = bpf_htonl(0x11); /* ::11 */
12851 + }
12852 +
12853 + ret = bpf_skb_set_tunnel_key(skb, &key, sizeof(key),
12854 +diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
12855 +index 1afa22c88e42a..8f918847ddf89 100755
12856 +--- a/tools/testing/selftests/bpf/test_offload.py
12857 ++++ b/tools/testing/selftests/bpf/test_offload.py
12858 +@@ -930,6 +930,7 @@ try:
12859 + start_test("Test disabling TC offloads is rejected while filters installed...")
12860 + ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
12861 + fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
12862 ++ sim.set_ethtool_tc_offloads(True)
12863 +
12864 + start_test("Test qdisc removal frees things...")
12865 + sim.tc_flush_filters()
12866 +diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
12867 +index bd12ec97a44df..1ccbe804e8e1c 100755
12868 +--- a/tools/testing/selftests/bpf/test_tunnel.sh
12869 ++++ b/tools/testing/selftests/bpf/test_tunnel.sh
12870 +@@ -24,12 +24,12 @@
12871 + # Root namespace with metadata-mode tunnel + BPF
12872 + # Device names and addresses:
12873 + # veth1 IP: 172.16.1.200, IPv6: 00::22 (underlay)
12874 +-# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200 (overlay)
12875 ++# tunnel dev <type>11, ex: gre11, IPv4: 10.1.1.200, IPv6: 1::22 (overlay)
12876 + #
12877 + # Namespace at_ns0 with native tunnel
12878 + # Device names and addresses:
12879 + # veth0 IPv4: 172.16.1.100, IPv6: 00::11 (underlay)
12880 +-# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100 (overlay)
12881 ++# tunnel dev <type>00, ex: gre00, IPv4: 10.1.1.100, IPv6: 1::11 (overlay)
12882 + #
12883 + #
12884 + # End-to-end ping packet flow
12885 +@@ -250,7 +250,7 @@ add_ipip_tunnel()
12886 + ip addr add dev $DEV 10.1.1.200/24
12887 + }
12888 +
12889 +-add_ipip6tnl_tunnel()
12890 ++add_ip6tnl_tunnel()
12891 + {
12892 + ip netns exec at_ns0 ip addr add ::11/96 dev veth0
12893 + ip netns exec at_ns0 ip link set dev veth0 up
12894 +@@ -262,11 +262,13 @@ add_ipip6tnl_tunnel()
12895 + ip link add dev $DEV_NS type $TYPE \
12896 + local ::11 remote ::22
12897 + ip netns exec at_ns0 ip addr add dev $DEV_NS 10.1.1.100/24
12898 ++ ip netns exec at_ns0 ip addr add dev $DEV_NS 1::11/96
12899 + ip netns exec at_ns0 ip link set dev $DEV_NS up
12900 +
12901 + # root namespace
12902 + ip link add dev $DEV type $TYPE external
12903 + ip addr add dev $DEV 10.1.1.200/24
12904 ++ ip addr add dev $DEV 1::22/96
12905 + ip link set dev $DEV up
12906 + }
12907 +
12908 +@@ -534,7 +536,7 @@ test_ipip6()
12909 +
12910 + check $TYPE
12911 + config_device
12912 +- add_ipip6tnl_tunnel
12913 ++ add_ip6tnl_tunnel
12914 + ip link set dev veth1 mtu 1500
12915 + attach_bpf $DEV ipip6_set_tunnel ipip6_get_tunnel
12916 + # underlay
12917 +@@ -553,6 +555,34 @@ test_ipip6()
12918 + echo -e ${GREEN}"PASS: $TYPE"${NC}
12919 + }
12920 +
12921 ++test_ip6ip6()
12922 ++{
12923 ++ TYPE=ip6tnl
12924 ++ DEV_NS=ip6ip6tnl00
12925 ++ DEV=ip6ip6tnl11
12926 ++ ret=0
12927 ++
12928 ++ check $TYPE
12929 ++ config_device
12930 ++ add_ip6tnl_tunnel
12931 ++ ip link set dev veth1 mtu 1500
12932 ++ attach_bpf $DEV ip6ip6_set_tunnel ip6ip6_get_tunnel
12933 ++ # underlay
12934 ++ ping6 $PING_ARG ::11
12935 ++ # ip6 over ip6
12936 ++ ping6 $PING_ARG 1::11
12937 ++ check_err $?
12938 ++ ip netns exec at_ns0 ping6 $PING_ARG 1::22
12939 ++ check_err $?
12940 ++ cleanup
12941 ++
12942 ++ if [ $ret -ne 0 ]; then
12943 ++ echo -e ${RED}"FAIL: ip6$TYPE"${NC}
12944 ++ return 1
12945 ++ fi
12946 ++ echo -e ${GREEN}"PASS: ip6$TYPE"${NC}
12947 ++}
12948 ++
12949 + setup_xfrm_tunnel()
12950 + {
12951 + auth=0x$(printf '1%.0s' {1..40})
12952 +@@ -646,6 +676,7 @@ cleanup()
12953 + ip link del veth1 2> /dev/null
12954 + ip link del ipip11 2> /dev/null
12955 + ip link del ipip6tnl11 2> /dev/null
12956 ++ ip link del ip6ip6tnl11 2> /dev/null
12957 + ip link del gretap11 2> /dev/null
12958 + ip link del ip6gre11 2> /dev/null
12959 + ip link del ip6gretap11 2> /dev/null
12960 +@@ -742,6 +773,10 @@ bpf_tunnel_test()
12961 + test_ipip6
12962 + errors=$(( $errors + $? ))
12963 +
12964 ++ echo "Testing IP6IP6 tunnel..."
12965 ++ test_ip6ip6
12966 ++ errors=$(( $errors + $? ))
12967 ++
12968 + echo "Testing IPSec tunnel..."
12969 + test_xfrm_tunnel
12970 + errors=$(( $errors + $? ))
12971 +diff --git a/tools/testing/selftests/net/udpgso_bench_rx.c b/tools/testing/selftests/net/udpgso_bench_rx.c
12972 +index db3d4a8b5a4c4..76a24052f4b47 100644
12973 +--- a/tools/testing/selftests/net/udpgso_bench_rx.c
12974 ++++ b/tools/testing/selftests/net/udpgso_bench_rx.c
12975 +@@ -113,6 +113,9 @@ static void do_poll(int fd, int timeout_ms)
12976 + interrupted = true;
12977 + break;
12978 + }
12979 ++
12980 ++ /* no events and more time to wait, do poll again */
12981 ++ continue;
12982 + }
12983 + if (pfd.revents != POLLIN)
12984 + error(1, errno, "poll: 0x%x expected 0x%x\n",
12985 +diff --git a/tools/testing/selftests/seccomp/config b/tools/testing/selftests/seccomp/config
12986 +index db1e11b08c8a4..764af1f853f95 100644
12987 +--- a/tools/testing/selftests/seccomp/config
12988 ++++ b/tools/testing/selftests/seccomp/config
12989 +@@ -1,2 +1,3 @@
12990 ++CONFIG_PID_NS=y
12991 + CONFIG_SECCOMP=y
12992 + CONFIG_SECCOMP_FILTER=y