Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Wed, 30 Dec 2020 12:52:10
Message-Id: 1609332706.69d400b9dc2731690a4900bf1099f8e6426c7e7a.mpagano@gentoo
1 commit: 69d400b9dc2731690a4900bf1099f8e6426c7e7a
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Dec 30 12:51:46 2020 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Dec 30 12:51:46 2020 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=69d400b9
7
8 Linux patch 4.19.164
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1163_linux-4.19.164.patch | 9937 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 9941 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index e1c0627..5ffa36c 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -691,6 +691,10 @@ Patch: 1162_linux-4.19.163.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.163
23
24 +Patch: 1163_linux-4.19.164.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.164
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1163_linux-4.19.164.patch b/1163_linux-4.19.164.patch
33 new file mode 100644
34 index 0000000..581268c
35 --- /dev/null
36 +++ b/1163_linux-4.19.164.patch
37 @@ -0,0 +1,9937 @@
38 +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
39 +index 7371643dd8d4f..558332df02a84 100644
40 +--- a/Documentation/admin-guide/kernel-parameters.txt
41 ++++ b/Documentation/admin-guide/kernel-parameters.txt
42 +@@ -4998,6 +4998,7 @@
43 + device);
44 + j = NO_REPORT_LUNS (don't use report luns
45 + command, uas only);
46 ++ k = NO_SAME (do not use WRITE_SAME, uas only)
47 + l = NOT_LOCKABLE (don't try to lock and
48 + unlock ejectable media, not on uas);
49 + m = MAX_SECTORS_64 (don't transfer more
50 +diff --git a/Makefile b/Makefile
51 +index b651d77eb2df9..d02af6881a5fe 100644
52 +--- a/Makefile
53 ++++ b/Makefile
54 +@@ -1,7 +1,7 @@
55 + # SPDX-License-Identifier: GPL-2.0
56 + VERSION = 4
57 + PATCHLEVEL = 19
58 +-SUBLEVEL = 163
59 ++SUBLEVEL = 164
60 + EXTRAVERSION =
61 + NAME = "People's Front"
62 +
63 +@@ -745,8 +745,11 @@ KBUILD_CFLAGS += $(call cc-option, -gsplit-dwarf, -g)
64 + else
65 + KBUILD_CFLAGS += -g
66 + endif
67 ++ifneq ($(LLVM_IAS),1)
68 + KBUILD_AFLAGS += -Wa,-gdwarf-2
69 + endif
70 ++endif
71 ++
72 + ifdef CONFIG_DEBUG_INFO_DWARF4
73 + KBUILD_CFLAGS += $(call cc-option, -gdwarf-4,)
74 + endif
75 +diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
76 +index 0fed32b959232..a211e87aa6d93 100644
77 +--- a/arch/arc/kernel/stacktrace.c
78 ++++ b/arch/arc/kernel/stacktrace.c
79 +@@ -41,15 +41,15 @@
80 +
81 + #ifdef CONFIG_ARC_DW2_UNWIND
82 +
83 +-static void seed_unwind_frame_info(struct task_struct *tsk,
84 +- struct pt_regs *regs,
85 +- struct unwind_frame_info *frame_info)
86 ++static int
87 ++seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
88 ++ struct unwind_frame_info *frame_info)
89 + {
90 + /*
91 + * synchronous unwinding (e.g. dump_stack)
92 + * - uses current values of SP and friends
93 + */
94 +- if (tsk == NULL && regs == NULL) {
95 ++ if (regs == NULL && (tsk == NULL || tsk == current)) {
96 + unsigned long fp, sp, blink, ret;
97 + frame_info->task = current;
98 +
99 +@@ -68,11 +68,15 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
100 + frame_info->call_frame = 0;
101 + } else if (regs == NULL) {
102 + /*
103 +- * Asynchronous unwinding of sleeping task
104 +- * - Gets SP etc from task's pt_regs (saved bottom of kernel
105 +- * mode stack of task)
106 ++ * Asynchronous unwinding of a likely sleeping task
107 ++ * - first ensure it is actually sleeping
108 ++ * - if so, it will be in __switch_to, kernel mode SP of task
109 ++ * is safe-kept and BLINK at a well known location in there
110 + */
111 +
112 ++ if (tsk->state == TASK_RUNNING)
113 ++ return -1;
114 ++
115 + frame_info->task = tsk;
116 +
117 + frame_info->regs.r27 = TSK_K_FP(tsk);
118 +@@ -106,6 +110,8 @@ static void seed_unwind_frame_info(struct task_struct *tsk,
119 + frame_info->regs.r63 = regs->ret;
120 + frame_info->call_frame = 0;
121 + }
122 ++
123 ++ return 0;
124 + }
125 +
126 + #endif
127 +@@ -119,7 +125,8 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
128 + unsigned int address;
129 + struct unwind_frame_info frame_info;
130 +
131 +- seed_unwind_frame_info(tsk, regs, &frame_info);
132 ++ if (seed_unwind_frame_info(tsk, regs, &frame_info))
133 ++ return 0;
134 +
135 + while (1) {
136 + address = UNW_PC(&frame_info);
137 +diff --git a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
138 +index 3e7d093d7a9a2..966d9a6c40fca 100644
139 +--- a/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
140 ++++ b/arch/arm/boot/dts/armada-xp-98dx3236.dtsi
141 +@@ -266,11 +266,6 @@
142 + reg = <0x11000 0x100>;
143 + };
144 +
145 +-&i2c1 {
146 +- compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
147 +- reg = <0x11100 0x100>;
148 +-};
149 +-
150 + &mpic {
151 + reg = <0x20a00 0x2d0>, <0x21070 0x58>;
152 + };
153 +diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
154 +index 22dade6393d06..d1dbe3b6ad5a7 100644
155 +--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
156 ++++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
157 +@@ -22,9 +22,9 @@
158 + #size-cells = <1>;
159 + ranges;
160 +
161 +- vga_memory: framebuffer@7f000000 {
162 ++ vga_memory: framebuffer@9f000000 {
163 + no-map;
164 +- reg = <0x7f000000 0x01000000>;
165 ++ reg = <0x9f000000 0x01000000>; /* 16M */
166 + };
167 + };
168 +
169 +diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
170 +index 02c1d2958d780..74440dad43354 100644
171 +--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
172 ++++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
173 +@@ -243,6 +243,11 @@
174 + atmel,pins =
175 + <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */
176 + };
177 ++ pinctrl_usb_default: usb_default {
178 ++ atmel,pins =
179 ++ <AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
180 ++ AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
181 ++ };
182 + };
183 + };
184 + };
185 +@@ -260,6 +265,8 @@
186 + &pioE 3 GPIO_ACTIVE_LOW
187 + &pioE 4 GPIO_ACTIVE_LOW
188 + >;
189 ++ pinctrl-names = "default";
190 ++ pinctrl-0 = <&pinctrl_usb_default>;
191 + status = "okay";
192 + };
193 +
194 +diff --git a/arch/arm/boot/dts/at91-sama5d4_xplained.dts b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
195 +index 7d554b9ab27fd..e998d72d8b107 100644
196 +--- a/arch/arm/boot/dts/at91-sama5d4_xplained.dts
197 ++++ b/arch/arm/boot/dts/at91-sama5d4_xplained.dts
198 +@@ -170,6 +170,11 @@
199 + atmel,pins =
200 + <AT91_PIOE 31 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>;
201 + };
202 ++ pinctrl_usb_default: usb_default {
203 ++ atmel,pins =
204 ++ <AT91_PIOE 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
205 ++ AT91_PIOE 14 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
206 ++ };
207 + pinctrl_key_gpio: key_gpio_0 {
208 + atmel,pins =
209 + <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
210 +@@ -195,6 +200,8 @@
211 + &pioE 11 GPIO_ACTIVE_HIGH
212 + &pioE 14 GPIO_ACTIVE_HIGH
213 + >;
214 ++ pinctrl-names = "default";
215 ++ pinctrl-0 = <&pinctrl_usb_default>;
216 + status = "okay";
217 + };
218 +
219 +diff --git a/arch/arm/boot/dts/at91sam9rl.dtsi b/arch/arm/boot/dts/at91sam9rl.dtsi
220 +index ad495f5a5790f..cdf016232fb7d 100644
221 +--- a/arch/arm/boot/dts/at91sam9rl.dtsi
222 ++++ b/arch/arm/boot/dts/at91sam9rl.dtsi
223 +@@ -277,23 +277,26 @@
224 + atmel,adc-use-res = "highres";
225 +
226 + trigger0 {
227 +- trigger-name = "timer-counter-0";
228 ++ trigger-name = "external-rising";
229 + trigger-value = <0x1>;
230 ++ trigger-external;
231 + };
232 ++
233 + trigger1 {
234 +- trigger-name = "timer-counter-1";
235 +- trigger-value = <0x3>;
236 ++ trigger-name = "external-falling";
237 ++ trigger-value = <0x2>;
238 ++ trigger-external;
239 + };
240 +
241 + trigger2 {
242 +- trigger-name = "timer-counter-2";
243 +- trigger-value = <0x5>;
244 ++ trigger-name = "external-any";
245 ++ trigger-value = <0x3>;
246 ++ trigger-external;
247 + };
248 +
249 + trigger3 {
250 +- trigger-name = "external";
251 +- trigger-value = <0x13>;
252 +- trigger-external;
253 ++ trigger-name = "continuous";
254 ++ trigger-value = <0x6>;
255 + };
256 + };
257 +
258 +diff --git a/arch/arm/boot/dts/exynos5410-odroidxu.dts b/arch/arm/boot/dts/exynos5410-odroidxu.dts
259 +index a2046f5f998c1..840a854ee8385 100644
260 +--- a/arch/arm/boot/dts/exynos5410-odroidxu.dts
261 ++++ b/arch/arm/boot/dts/exynos5410-odroidxu.dts
262 +@@ -324,6 +324,8 @@
263 + regulator-name = "vddq_lcd";
264 + regulator-min-microvolt = <1800000>;
265 + regulator-max-microvolt = <1800000>;
266 ++ /* Supplies also GPK and GPJ */
267 ++ regulator-always-on;
268 + };
269 +
270 + ldo8_reg: LDO8 {
271 +@@ -626,11 +628,11 @@
272 + };
273 +
274 + &usbdrd_dwc3_0 {
275 +- dr_mode = "host";
276 ++ dr_mode = "peripheral";
277 + };
278 +
279 + &usbdrd_dwc3_1 {
280 +- dr_mode = "peripheral";
281 ++ dr_mode = "host";
282 + };
283 +
284 + &usbdrd3_0 {
285 +diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
286 +index 369a8a7f21050..481ee99aa9c97 100644
287 +--- a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
288 ++++ b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
289 +@@ -560,6 +560,34 @@
290 + interrupt-controller;
291 + #interrupt-cells = <2>;
292 + };
293 ++
294 ++ usb3_1_oc: usb3-1-oc {
295 ++ samsung,pins = "gpk2-4", "gpk2-5";
296 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
297 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
298 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
299 ++ };
300 ++
301 ++ usb3_1_vbusctrl: usb3-1-vbusctrl {
302 ++ samsung,pins = "gpk2-6", "gpk2-7";
303 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
304 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
305 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
306 ++ };
307 ++
308 ++ usb3_0_oc: usb3-0-oc {
309 ++ samsung,pins = "gpk3-0", "gpk3-1";
310 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
311 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_UP>;
312 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
313 ++ };
314 ++
315 ++ usb3_0_vbusctrl: usb3-0-vbusctrl {
316 ++ samsung,pins = "gpk3-2", "gpk3-3";
317 ++ samsung,pin-function = <EXYNOS_PIN_FUNC_2>;
318 ++ samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
319 ++ samsung,pin-drv = <EXYNOS5420_PIN_DRV_LV1>;
320 ++ };
321 + };
322 +
323 + &pinctrl_2 {
324 +diff --git a/arch/arm/boot/dts/exynos5410.dtsi b/arch/arm/boot/dts/exynos5410.dtsi
325 +index 57fc9c949e54a..95b794b1ea62e 100644
326 +--- a/arch/arm/boot/dts/exynos5410.dtsi
327 ++++ b/arch/arm/boot/dts/exynos5410.dtsi
328 +@@ -392,6 +392,8 @@
329 + &usbdrd3_0 {
330 + clocks = <&clock CLK_USBD300>;
331 + clock-names = "usbdrd30";
332 ++ pinctrl-names = "default";
333 ++ pinctrl-0 = <&usb3_0_oc>, <&usb3_0_vbusctrl>;
334 + };
335 +
336 + &usbdrd_phy0 {
337 +@@ -403,6 +405,8 @@
338 + &usbdrd3_1 {
339 + clocks = <&clock CLK_USBD301>;
340 + clock-names = "usbdrd30";
341 ++ pinctrl-names = "default";
342 ++ pinctrl-0 = <&usb3_1_oc>, <&usb3_1_vbusctrl>;
343 + };
344 +
345 + &usbdrd_dwc3_1 {
346 +diff --git a/arch/arm/boot/dts/omap4-panda-es.dts b/arch/arm/boot/dts/omap4-panda-es.dts
347 +index 19d02df8d8a59..70fd28120c275 100644
348 +--- a/arch/arm/boot/dts/omap4-panda-es.dts
349 ++++ b/arch/arm/boot/dts/omap4-panda-es.dts
350 +@@ -49,7 +49,7 @@
351 +
352 + button_pins: pinmux_button_pins {
353 + pinctrl-single,pins = <
354 +- OMAP4_IOPAD(0x11b, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
355 ++ OMAP4_IOPAD(0x0fc, PIN_INPUT_PULLUP | MUX_MODE3) /* gpio_113 */
356 + >;
357 + };
358 + };
359 +diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
360 +index b405992eb6016..d856c16d00156 100644
361 +--- a/arch/arm/boot/dts/sama5d2.dtsi
362 ++++ b/arch/arm/boot/dts/sama5d2.dtsi
363 +@@ -1247,6 +1247,7 @@
364 + clocks = <&securam_clk>;
365 + #address-cells = <1>;
366 + #size-cells = <1>;
367 ++ no-memory-wc;
368 + ranges = <0 0xf8044000 0x1420>;
369 + };
370 +
371 +@@ -1297,7 +1298,7 @@
372 +
373 + can0: can@f8054000 {
374 + compatible = "bosch,m_can";
375 +- reg = <0xf8054000 0x4000>, <0x210000 0x4000>;
376 ++ reg = <0xf8054000 0x4000>, <0x210000 0x1c00>;
377 + reg-names = "m_can", "message_ram";
378 + interrupts = <56 IRQ_TYPE_LEVEL_HIGH 7>,
379 + <64 IRQ_TYPE_LEVEL_HIGH 7>;
380 +@@ -1490,7 +1491,7 @@
381 +
382 + can1: can@fc050000 {
383 + compatible = "bosch,m_can";
384 +- reg = <0xfc050000 0x4000>, <0x210000 0x4000>;
385 ++ reg = <0xfc050000 0x4000>, <0x210000 0x3800>;
386 + reg-names = "m_can", "message_ram";
387 + interrupts = <57 IRQ_TYPE_LEVEL_HIGH 7>,
388 + <65 IRQ_TYPE_LEVEL_HIGH 7>;
389 +@@ -1500,7 +1501,7 @@
390 + assigned-clocks = <&can1_gclk>;
391 + assigned-clock-parents = <&utmi>;
392 + assigned-clock-rates = <40000000>;
393 +- bosch,mram-cfg = <0x1100 0 0 64 0 0 32 32>;
394 ++ bosch,mram-cfg = <0x1c00 0 0 64 0 0 32 32>;
395 + status = "disabled";
396 + };
397 +
398 +diff --git a/arch/arm/boot/dts/sun8i-v3s.dtsi b/arch/arm/boot/dts/sun8i-v3s.dtsi
399 +index 92fcb756a08a9..97cac6d636923 100644
400 +--- a/arch/arm/boot/dts/sun8i-v3s.dtsi
401 ++++ b/arch/arm/boot/dts/sun8i-v3s.dtsi
402 +@@ -419,7 +419,7 @@
403 + gic: interrupt-controller@1c81000 {
404 + compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
405 + reg = <0x01c81000 0x1000>,
406 +- <0x01c82000 0x1000>,
407 ++ <0x01c82000 0x2000>,
408 + <0x01c84000 0x2000>,
409 + <0x01c86000 0x2000>;
410 + interrupt-controller;
411 +diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
412 +index 6b1148cafffdb..90add5ded3f1f 100644
413 +--- a/arch/arm/kernel/head.S
414 ++++ b/arch/arm/kernel/head.S
415 +@@ -674,12 +674,8 @@ ARM_BE8(rev16 ip, ip)
416 + ldrcc r7, [r4], #4 @ use branch for delay slot
417 + bcc 1b
418 + bx lr
419 +-#else
420 +-#ifdef CONFIG_CPU_ENDIAN_BE8
421 +- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
422 + #else
423 + moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
424 +-#endif
425 + b 2f
426 + 1: ldr ip, [r7, r3]
427 + #ifdef CONFIG_CPU_ENDIAN_BE8
428 +@@ -688,7 +684,7 @@ ARM_BE8(rev16 ip, ip)
429 + tst ip, #0x000f0000 @ check the rotation field
430 + orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
431 + biceq ip, ip, #0x00004000 @ clear bit 22
432 +- orreq ip, ip, r0 @ mask in offset bits 7-0
433 ++ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0
434 + #else
435 + bic ip, ip, #0x000000ff
436 + tst ip, #0xf00 @ check the rotation field
437 +diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
438 +index e348bcfe389da..cb8b02a1abe26 100644
439 +--- a/arch/arm/mach-shmobile/pm-rmobile.c
440 ++++ b/arch/arm/mach-shmobile/pm-rmobile.c
441 +@@ -330,6 +330,7 @@ static int __init rmobile_init_pm_domains(void)
442 +
443 + pmd = of_get_child_by_name(np, "pm-domains");
444 + if (!pmd) {
445 ++ iounmap(base);
446 + pr_warn("%pOF lacks pm-domains node\n", np);
447 + continue;
448 + }
449 +diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
450 +index de4b0e932f22e..aa08b8cb01524 100644
451 +--- a/arch/arm/mach-sunxi/sunxi.c
452 ++++ b/arch/arm/mach-sunxi/sunxi.c
453 +@@ -66,6 +66,7 @@ static const char * const sun8i_board_dt_compat[] = {
454 + "allwinner,sun8i-h2-plus",
455 + "allwinner,sun8i-h3",
456 + "allwinner,sun8i-r40",
457 ++ "allwinner,sun8i-v3",
458 + "allwinner,sun8i-v3s",
459 + NULL,
460 + };
461 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
462 +index bdf7c6c5983ce..30fa9302a4dc8 100644
463 +--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
464 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
465 +@@ -399,7 +399,7 @@
466 + #size-cells = <1>;
467 + compatible = "winbond,w25q16", "jedec,spi-nor";
468 + reg = <0>;
469 +- spi-max-frequency = <3000000>;
470 ++ spi-max-frequency = <104000000>;
471 + };
472 + };
473 +
474 +diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
475 +index 31b1a606cb664..5c5e57026c275 100644
476 +--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
477 ++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
478 +@@ -62,8 +62,10 @@
479 + };
480 +
481 + psci {
482 +- compatible = "arm,psci-0.2";
483 ++ compatible = "arm,psci";
484 + method = "smc";
485 ++ cpu_off = <0x84000002>;
486 ++ cpu_on = <0xC4000003>;
487 + };
488 +
489 + soc: soc {
490 +@@ -494,13 +496,6 @@
491 + pmu_system_controller: system-controller@105c0000 {
492 + compatible = "samsung,exynos7-pmu", "syscon";
493 + reg = <0x105c0000 0x5000>;
494 +-
495 +- reboot: syscon-reboot {
496 +- compatible = "syscon-reboot";
497 +- regmap = <&pmu_system_controller>;
498 +- offset = <0x0400>;
499 +- mask = <0x1>;
500 +- };
501 + };
502 +
503 + rtc: rtc@10590000 {
504 +@@ -638,3 +633,4 @@
505 + };
506 +
507 + #include "exynos7-pinctrl.dtsi"
508 ++#include "arm/exynos-syscon-restart.dtsi"
509 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
510 +index 91061d9cf78bc..5b1ece4a68d67 100644
511 +--- a/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
512 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-roc-cc.dts
513 +@@ -255,6 +255,7 @@
514 + };
515 +
516 + &usb20_otg {
517 ++ dr_mode = "host";
518 + status = "okay";
519 + };
520 +
521 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
522 +index 92186edefeb96..6be7c67584ba9 100644
523 +--- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
524 ++++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
525 +@@ -1085,8 +1085,8 @@
526 +
527 + uart0 {
528 + uart0_xfer: uart0-xfer {
529 +- rockchip,pins = <1 RK_PB1 1 &pcfg_pull_up>,
530 +- <1 RK_PB0 1 &pcfg_pull_none>;
531 ++ rockchip,pins = <1 RK_PB1 1 &pcfg_pull_none>,
532 ++ <1 RK_PB0 1 &pcfg_pull_up>;
533 + };
534 +
535 + uart0_cts: uart0-cts {
536 +@@ -1104,8 +1104,8 @@
537 +
538 + uart1 {
539 + uart1_xfer: uart1-xfer {
540 +- rockchip,pins = <3 RK_PA4 4 &pcfg_pull_up>,
541 +- <3 RK_PA6 4 &pcfg_pull_none>;
542 ++ rockchip,pins = <3 RK_PA4 4 &pcfg_pull_none>,
543 ++ <3 RK_PA6 4 &pcfg_pull_up>;
544 + };
545 +
546 + uart1_cts: uart1-cts {
547 +@@ -1123,15 +1123,15 @@
548 +
549 + uart2-0 {
550 + uart2m0_xfer: uart2m0-xfer {
551 +- rockchip,pins = <1 RK_PA0 2 &pcfg_pull_up>,
552 +- <1 RK_PA1 2 &pcfg_pull_none>;
553 ++ rockchip,pins = <1 RK_PA0 2 &pcfg_pull_none>,
554 ++ <1 RK_PA1 2 &pcfg_pull_up>;
555 + };
556 + };
557 +
558 + uart2-1 {
559 + uart2m1_xfer: uart2m1-xfer {
560 +- rockchip,pins = <2 RK_PA0 1 &pcfg_pull_up>,
561 +- <2 RK_PA1 1 &pcfg_pull_none>;
562 ++ rockchip,pins = <2 RK_PA0 1 &pcfg_pull_none>,
563 ++ <2 RK_PA1 1 &pcfg_pull_up>;
564 + };
565 + };
566 +
567 +diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
568 +index f14e8c5c41acc..f4ee7c4f83b8b 100644
569 +--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
570 ++++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
571 +@@ -29,6 +29,9 @@
572 + i2c6 = &i2c6;
573 + i2c7 = &i2c7;
574 + i2c8 = &i2c8;
575 ++ mmc0 = &sdio0;
576 ++ mmc1 = &sdmmc;
577 ++ mmc2 = &sdhci;
578 + serial0 = &uart0;
579 + serial1 = &uart1;
580 + serial2 = &uart2;
581 +diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
582 +index f9b0b09153e0e..eab3de4f2ad25 100644
583 +--- a/arch/arm64/include/asm/atomic_lse.h
584 ++++ b/arch/arm64/include/asm/atomic_lse.h
585 +@@ -32,7 +32,9 @@ static inline void atomic_##op(int i, atomic_t *v) \
586 + register int w0 asm ("w0") = i; \
587 + register atomic_t *x1 asm ("x1") = v; \
588 + \
589 +- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
590 ++ asm volatile( \
591 ++ __LSE_PREAMBLE \
592 ++ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \
593 + " " #asm_op " %w[i], %[v]\n") \
594 + : [i] "+r" (w0), [v] "+Q" (v->counter) \
595 + : "r" (x1) \
596 +@@ -52,7 +54,9 @@ static inline int atomic_fetch_##op##name(int i, atomic_t *v) \
597 + register int w0 asm ("w0") = i; \
598 + register atomic_t *x1 asm ("x1") = v; \
599 + \
600 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
601 ++ asm volatile( \
602 ++ __LSE_PREAMBLE \
603 ++ ARM64_LSE_ATOMIC_INSN( \
604 + /* LL/SC */ \
605 + __LL_SC_ATOMIC(fetch_##op##name), \
606 + /* LSE atomics */ \
607 +@@ -84,7 +88,9 @@ static inline int atomic_add_return##name(int i, atomic_t *v) \
608 + register int w0 asm ("w0") = i; \
609 + register atomic_t *x1 asm ("x1") = v; \
610 + \
611 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
612 ++ asm volatile( \
613 ++ __LSE_PREAMBLE \
614 ++ ARM64_LSE_ATOMIC_INSN( \
615 + /* LL/SC */ \
616 + __LL_SC_ATOMIC(add_return##name) \
617 + __nops(1), \
618 +@@ -110,7 +116,9 @@ static inline void atomic_and(int i, atomic_t *v)
619 + register int w0 asm ("w0") = i;
620 + register atomic_t *x1 asm ("x1") = v;
621 +
622 +- asm volatile(ARM64_LSE_ATOMIC_INSN(
623 ++ asm volatile(
624 ++ __LSE_PREAMBLE
625 ++ ARM64_LSE_ATOMIC_INSN(
626 + /* LL/SC */
627 + __LL_SC_ATOMIC(and)
628 + __nops(1),
629 +@@ -128,7 +136,9 @@ static inline int atomic_fetch_and##name(int i, atomic_t *v) \
630 + register int w0 asm ("w0") = i; \
631 + register atomic_t *x1 asm ("x1") = v; \
632 + \
633 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
634 ++ asm volatile( \
635 ++ __LSE_PREAMBLE \
636 ++ ARM64_LSE_ATOMIC_INSN( \
637 + /* LL/SC */ \
638 + __LL_SC_ATOMIC(fetch_and##name) \
639 + __nops(1), \
640 +@@ -154,7 +164,9 @@ static inline void atomic_sub(int i, atomic_t *v)
641 + register int w0 asm ("w0") = i;
642 + register atomic_t *x1 asm ("x1") = v;
643 +
644 +- asm volatile(ARM64_LSE_ATOMIC_INSN(
645 ++ asm volatile(
646 ++ __LSE_PREAMBLE
647 ++ ARM64_LSE_ATOMIC_INSN(
648 + /* LL/SC */
649 + __LL_SC_ATOMIC(sub)
650 + __nops(1),
651 +@@ -172,7 +184,9 @@ static inline int atomic_sub_return##name(int i, atomic_t *v) \
652 + register int w0 asm ("w0") = i; \
653 + register atomic_t *x1 asm ("x1") = v; \
654 + \
655 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
656 ++ asm volatile( \
657 ++ __LSE_PREAMBLE \
658 ++ ARM64_LSE_ATOMIC_INSN( \
659 + /* LL/SC */ \
660 + __LL_SC_ATOMIC(sub_return##name) \
661 + __nops(2), \
662 +@@ -200,7 +214,9 @@ static inline int atomic_fetch_sub##name(int i, atomic_t *v) \
663 + register int w0 asm ("w0") = i; \
664 + register atomic_t *x1 asm ("x1") = v; \
665 + \
666 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
667 ++ asm volatile( \
668 ++ __LSE_PREAMBLE \
669 ++ ARM64_LSE_ATOMIC_INSN( \
670 + /* LL/SC */ \
671 + __LL_SC_ATOMIC(fetch_sub##name) \
672 + __nops(1), \
673 +@@ -229,7 +245,9 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
674 + register long x0 asm ("x0") = i; \
675 + register atomic64_t *x1 asm ("x1") = v; \
676 + \
677 +- asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
678 ++ asm volatile( \
679 ++ __LSE_PREAMBLE \
680 ++ ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \
681 + " " #asm_op " %[i], %[v]\n") \
682 + : [i] "+r" (x0), [v] "+Q" (v->counter) \
683 + : "r" (x1) \
684 +@@ -249,7 +267,9 @@ static inline long atomic64_fetch_##op##name(long i, atomic64_t *v) \
685 + register long x0 asm ("x0") = i; \
686 + register atomic64_t *x1 asm ("x1") = v; \
687 + \
688 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
689 ++ asm volatile( \
690 ++ __LSE_PREAMBLE \
691 ++ ARM64_LSE_ATOMIC_INSN( \
692 + /* LL/SC */ \
693 + __LL_SC_ATOMIC64(fetch_##op##name), \
694 + /* LSE atomics */ \
695 +@@ -281,7 +301,9 @@ static inline long atomic64_add_return##name(long i, atomic64_t *v) \
696 + register long x0 asm ("x0") = i; \
697 + register atomic64_t *x1 asm ("x1") = v; \
698 + \
699 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
700 ++ asm volatile( \
701 ++ __LSE_PREAMBLE \
702 ++ ARM64_LSE_ATOMIC_INSN( \
703 + /* LL/SC */ \
704 + __LL_SC_ATOMIC64(add_return##name) \
705 + __nops(1), \
706 +@@ -307,7 +329,9 @@ static inline void atomic64_and(long i, atomic64_t *v)
707 + register long x0 asm ("x0") = i;
708 + register atomic64_t *x1 asm ("x1") = v;
709 +
710 +- asm volatile(ARM64_LSE_ATOMIC_INSN(
711 ++ asm volatile(
712 ++ __LSE_PREAMBLE
713 ++ ARM64_LSE_ATOMIC_INSN(
714 + /* LL/SC */
715 + __LL_SC_ATOMIC64(and)
716 + __nops(1),
717 +@@ -325,7 +349,9 @@ static inline long atomic64_fetch_and##name(long i, atomic64_t *v) \
718 + register long x0 asm ("x0") = i; \
719 + register atomic64_t *x1 asm ("x1") = v; \
720 + \
721 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
722 ++ asm volatile( \
723 ++ __LSE_PREAMBLE \
724 ++ ARM64_LSE_ATOMIC_INSN( \
725 + /* LL/SC */ \
726 + __LL_SC_ATOMIC64(fetch_and##name) \
727 + __nops(1), \
728 +@@ -351,7 +377,9 @@ static inline void atomic64_sub(long i, atomic64_t *v)
729 + register long x0 asm ("x0") = i;
730 + register atomic64_t *x1 asm ("x1") = v;
731 +
732 +- asm volatile(ARM64_LSE_ATOMIC_INSN(
733 ++ asm volatile(
734 ++ __LSE_PREAMBLE
735 ++ ARM64_LSE_ATOMIC_INSN(
736 + /* LL/SC */
737 + __LL_SC_ATOMIC64(sub)
738 + __nops(1),
739 +@@ -369,7 +397,9 @@ static inline long atomic64_sub_return##name(long i, atomic64_t *v) \
740 + register long x0 asm ("x0") = i; \
741 + register atomic64_t *x1 asm ("x1") = v; \
742 + \
743 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
744 ++ asm volatile( \
745 ++ __LSE_PREAMBLE \
746 ++ ARM64_LSE_ATOMIC_INSN( \
747 + /* LL/SC */ \
748 + __LL_SC_ATOMIC64(sub_return##name) \
749 + __nops(2), \
750 +@@ -397,7 +427,9 @@ static inline long atomic64_fetch_sub##name(long i, atomic64_t *v) \
751 + register long x0 asm ("x0") = i; \
752 + register atomic64_t *x1 asm ("x1") = v; \
753 + \
754 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
755 ++ asm volatile( \
756 ++ __LSE_PREAMBLE \
757 ++ ARM64_LSE_ATOMIC_INSN( \
758 + /* LL/SC */ \
759 + __LL_SC_ATOMIC64(fetch_sub##name) \
760 + __nops(1), \
761 +@@ -422,7 +454,9 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
762 + {
763 + register long x0 asm ("x0") = (long)v;
764 +
765 +- asm volatile(ARM64_LSE_ATOMIC_INSN(
766 ++ asm volatile(
767 ++ __LSE_PREAMBLE
768 ++ ARM64_LSE_ATOMIC_INSN(
769 + /* LL/SC */
770 + __LL_SC_ATOMIC64(dec_if_positive)
771 + __nops(6),
772 +@@ -455,7 +489,9 @@ static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
773 + register unsigned long x1 asm ("x1") = old; \
774 + register unsigned long x2 asm ("x2") = new; \
775 + \
776 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
777 ++ asm volatile( \
778 ++ __LSE_PREAMBLE \
779 ++ ARM64_LSE_ATOMIC_INSN( \
780 + /* LL/SC */ \
781 + __LL_SC_CMPXCHG(name) \
782 + __nops(2), \
783 +@@ -507,7 +543,9 @@ static inline long __cmpxchg_double##name(unsigned long old1, \
784 + register unsigned long x3 asm ("x3") = new2; \
785 + register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
786 + \
787 +- asm volatile(ARM64_LSE_ATOMIC_INSN( \
788 ++ asm volatile( \
789 ++ __LSE_PREAMBLE \
790 ++ ARM64_LSE_ATOMIC_INSN( \
791 + /* LL/SC */ \
792 + __LL_SC_CMPXCHG_DBL(name) \
793 + __nops(3), \
794 +diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
795 +index c67cae9d52293..151e69a93e341 100644
796 +--- a/arch/arm64/include/asm/kvm_host.h
797 ++++ b/arch/arm64/include/asm/kvm_host.h
798 +@@ -165,6 +165,7 @@ enum vcpu_sysreg {
799 + #define c2_TTBR1 (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
800 + #define c2_TTBR1_high (c2_TTBR1 + 1) /* TTBR1 top 32 bits */
801 + #define c2_TTBCR (TCR_EL1 * 2) /* Translation Table Base Control R. */
802 ++#define c2_TTBCR2 (c2_TTBCR + 1) /* Translation Table Base Control R. 2 */
803 + #define c3_DACR (DACR32_EL2 * 2)/* Domain Access Control Register */
804 + #define c5_DFSR (ESR_EL1 * 2) /* Data Fault Status Register */
805 + #define c5_IFSR (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
806 +diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
807 +index 8262325e2fc66..13536c4da2c26 100644
808 +--- a/arch/arm64/include/asm/lse.h
809 ++++ b/arch/arm64/include/asm/lse.h
810 +@@ -4,6 +4,8 @@
811 +
812 + #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
813 +
814 ++#define __LSE_PREAMBLE ".arch_extension lse\n"
815 ++
816 + #include <linux/compiler_types.h>
817 + #include <linux/export.h>
818 + #include <linux/stringify.h>
819 +@@ -20,8 +22,6 @@
820 +
821 + #else /* __ASSEMBLER__ */
822 +
823 +-__asm__(".arch_extension lse");
824 +-
825 + /* Move the ll/sc atomics out-of-line */
826 + #define __LL_SC_INLINE notrace
827 + #define __LL_SC_PREFIX(x) __ll_sc_##x
828 +@@ -33,7 +33,7 @@ __asm__(".arch_extension lse");
829 +
830 + /* In-line patching at runtime */
831 + #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
832 +- ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
833 ++ ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
834 +
835 + #endif /* __ASSEMBLER__ */
836 + #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
837 +diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
838 +index 1457a0ba83dbc..f2d2dbbbfca20 100644
839 +--- a/arch/arm64/kernel/syscall.c
840 ++++ b/arch/arm64/kernel/syscall.c
841 +@@ -102,8 +102,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
842 + regs->syscallno = scno;
843 +
844 + cortex_a76_erratum_1463225_svc_handler();
845 ++ user_exit_irqoff();
846 + local_daif_restore(DAIF_PROCCTX);
847 +- user_exit();
848 +
849 + if (has_syscall_work(flags)) {
850 + /* set default errno for user-issued syscall(-1) */
851 +diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
852 +index b53d0ebb87fcc..847b2d80ce870 100644
853 +--- a/arch/arm64/kvm/sys_regs.c
854 ++++ b/arch/arm64/kvm/sys_regs.c
855 +@@ -1661,6 +1661,7 @@ static const struct sys_reg_desc cp15_regs[] = {
856 + { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
857 + { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
858 + { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
859 ++ { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
860 + { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
861 + { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
862 + { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
863 +diff --git a/arch/arm64/lib/memcpy.S b/arch/arm64/lib/memcpy.S
864 +index 67613937711f1..dfedd4ab1a766 100644
865 +--- a/arch/arm64/lib/memcpy.S
866 ++++ b/arch/arm64/lib/memcpy.S
867 +@@ -68,9 +68,8 @@
868 + stp \ptr, \regB, [\regC], \val
869 + .endm
870 +
871 +- .weak memcpy
872 + ENTRY(__memcpy)
873 +-ENTRY(memcpy)
874 ++WEAK(memcpy)
875 + #include "copy_template.S"
876 + ret
877 + ENDPIPROC(memcpy)
878 +diff --git a/arch/arm64/lib/memmove.S b/arch/arm64/lib/memmove.S
879 +index a5a4459013b1a..e3de8f05c21a8 100644
880 +--- a/arch/arm64/lib/memmove.S
881 ++++ b/arch/arm64/lib/memmove.S
882 +@@ -57,9 +57,8 @@ C_h .req x12
883 + D_l .req x13
884 + D_h .req x14
885 +
886 +- .weak memmove
887 + ENTRY(__memmove)
888 +-ENTRY(memmove)
889 ++WEAK(memmove)
890 + cmp dstin, src
891 + b.lo __memcpy
892 + add tmp1, src, count
893 +diff --git a/arch/arm64/lib/memset.S b/arch/arm64/lib/memset.S
894 +index f2670a9f218c9..316263c47c006 100644
895 +--- a/arch/arm64/lib/memset.S
896 ++++ b/arch/arm64/lib/memset.S
897 +@@ -54,9 +54,8 @@ dst .req x8
898 + tmp3w .req w9
899 + tmp3 .req x9
900 +
901 +- .weak memset
902 + ENTRY(__memset)
903 +-ENTRY(memset)
904 ++WEAK(memset)
905 + mov dst, dstin /* Preserve return value. */
906 + and A_lw, val, #255
907 + orr A_lw, A_lw, A_lw, lsl #8
908 +diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
909 +index 29471038d817e..c6b99845fb377 100644
910 +--- a/arch/mips/bcm47xx/Kconfig
911 ++++ b/arch/mips/bcm47xx/Kconfig
912 +@@ -27,6 +27,7 @@ config BCM47XX_BCMA
913 + select BCMA
914 + select BCMA_HOST_SOC
915 + select BCMA_DRIVER_MIPS
916 ++ select BCMA_DRIVER_PCI if PCI
917 + select BCMA_DRIVER_PCI_HOSTMODE if PCI
918 + select BCMA_DRIVER_GPIO
919 + default y
920 +diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
921 +index 8954108df4570..f51e21ea53492 100644
922 +--- a/arch/powerpc/Makefile
923 ++++ b/arch/powerpc/Makefile
924 +@@ -251,7 +251,6 @@ endif
925 +
926 + cpu-as-$(CONFIG_4xx) += -Wa,-m405
927 + cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
928 +-cpu-as-$(CONFIG_E200) += -Wa,-me200
929 + cpu-as-$(CONFIG_E500) += -Wa,-me500
930 +
931 + # When using '-many -mpower4' gas will first try and find a matching power4
932 +diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
933 +index 751cf931bb3f4..a4f4820826b74 100644
934 +--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
935 ++++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
936 +@@ -434,9 +434,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
937 + if (pte_val(*ptep) & _PAGE_HASHPTE)
938 + flush_hash_entry(mm, ptep, addr);
939 + __asm__ __volatile__("\
940 +- stw%U0%X0 %2,%0\n\
941 ++ stw%X0 %2,%0\n\
942 + eieio\n\
943 +- stw%U0%X0 %L2,%1"
944 ++ stw%X1 %L2,%1"
945 + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
946 + : "r" (pte) : "memory");
947 +
948 +diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
949 +index 59b35b93eadec..d90093a88e096 100644
950 +--- a/arch/powerpc/include/asm/cputable.h
951 ++++ b/arch/powerpc/include/asm/cputable.h
952 +@@ -411,7 +411,6 @@ static inline void cpu_feature_keys_init(void) { }
953 + CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
954 + CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV | CPU_FTR_ALTIVEC_COMP | \
955 + CPU_FTR_CELL_TB_BUG | CPU_FTR_SMT)
956 +-#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
957 +
958 + /* 64-bit CPUs */
959 + #define CPU_FTRS_PPC970 (CPU_FTR_LWSYNC | \
960 +@@ -509,8 +508,6 @@ enum {
961 + CPU_FTRS_7447 | CPU_FTRS_7447A | CPU_FTRS_82XX |
962 + CPU_FTRS_G2_LE | CPU_FTRS_E300 | CPU_FTRS_E300C2 |
963 + CPU_FTRS_CLASSIC32 |
964 +-#else
965 +- CPU_FTRS_GENERIC_32 |
966 + #endif
967 + #ifdef CONFIG_PPC_8xx
968 + CPU_FTRS_8XX |
969 +@@ -585,8 +582,6 @@ enum {
970 + CPU_FTRS_7447 & CPU_FTRS_7447A & CPU_FTRS_82XX &
971 + CPU_FTRS_G2_LE & CPU_FTRS_E300 & CPU_FTRS_E300C2 &
972 + CPU_FTRS_CLASSIC32 &
973 +-#else
974 +- CPU_FTRS_GENERIC_32 &
975 + #endif
976 + #ifdef CONFIG_PPC_8xx
977 + CPU_FTRS_8XX &
978 +diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
979 +index b321c82b36247..913878d8e3bec 100644
980 +--- a/arch/powerpc/include/asm/nohash/pgtable.h
981 ++++ b/arch/powerpc/include/asm/nohash/pgtable.h
982 +@@ -151,9 +151,9 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
983 + */
984 + if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
985 + __asm__ __volatile__("\
986 +- stw%U0%X0 %2,%0\n\
987 ++ stw%X0 %2,%0\n\
988 + eieio\n\
989 +- stw%U0%X0 %L2,%1"
990 ++ stw%X1 %L2,%1"
991 + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
992 + : "r" (pte) : "memory");
993 + return;
994 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
995 +index 389da790c1296..4f7b225d78cff 100644
996 +--- a/arch/powerpc/kernel/head_64.S
997 ++++ b/arch/powerpc/kernel/head_64.S
998 +@@ -423,6 +423,10 @@ generic_secondary_common_init:
999 + /* From now on, r24 is expected to be logical cpuid */
1000 + mr r24,r5
1001 +
1002 ++ /* Create a temp kernel stack for use before relocation is on. */
1003 ++ ld r1,PACAEMERGSP(r13)
1004 ++ subi r1,r1,STACK_FRAME_OVERHEAD
1005 ++
1006 + /* See if we need to call a cpu state restore handler */
1007 + LOAD_REG_ADDR(r23, cur_cpu_spec)
1008 + ld r23,0(r23)
1009 +@@ -451,10 +455,6 @@ generic_secondary_common_init:
1010 + sync /* order paca.run and cur_cpu_spec */
1011 + isync /* In case code patching happened */
1012 +
1013 +- /* Create a temp kernel stack for use before relocation is on. */
1014 +- ld r1,PACAEMERGSP(r13)
1015 +- subi r1,r1,STACK_FRAME_OVERHEAD
1016 +-
1017 + b __secondary_start
1018 + #endif /* SMP */
1019 +
1020 +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
1021 +index 7e0722b62cae9..b3aa0cea6283f 100644
1022 +--- a/arch/powerpc/kernel/rtas.c
1023 ++++ b/arch/powerpc/kernel/rtas.c
1024 +@@ -1095,7 +1095,7 @@ static struct rtas_filter rtas_filters[] __ro_after_init = {
1025 + { "ibm,display-message", -1, 0, -1, -1, -1 },
1026 + { "ibm,errinjct", -1, 2, -1, -1, -1, 1024 },
1027 + { "ibm,close-errinjct", -1, -1, -1, -1, -1 },
1028 +- { "ibm,open-errinct", -1, -1, -1, -1, -1 },
1029 ++ { "ibm,open-errinjct", -1, -1, -1, -1, -1 },
1030 + { "ibm,get-config-addr-info2", -1, -1, -1, -1, -1 },
1031 + { "ibm,get-dynamic-sensor-state", -1, 1, -1, -1, -1 },
1032 + { "ibm,get-indices", -1, 2, 3, -1, -1 },
1033 +diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
1034 +index 508244bcf19c2..7787a26d47777 100644
1035 +--- a/arch/powerpc/kernel/setup-common.c
1036 ++++ b/arch/powerpc/kernel/setup-common.c
1037 +@@ -929,8 +929,6 @@ void __init setup_arch(char **cmdline_p)
1038 +
1039 + /* On BookE, setup per-core TLB data structures. */
1040 + setup_tlb_core_data();
1041 +-
1042 +- smp_release_cpus();
1043 + #endif
1044 +
1045 + /* Print various info about the machine that has been gathered so far. */
1046 +@@ -964,6 +962,8 @@ void __init setup_arch(char **cmdline_p)
1047 + exc_lvl_early_init();
1048 + emergency_stack_init();
1049 +
1050 ++ smp_release_cpus();
1051 ++
1052 + initmem_init();
1053 +
1054 + #ifdef CONFIG_DUMMY_CONSOLE
1055 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
1056 +index d407b73298171..70de138228286 100644
1057 +--- a/arch/powerpc/perf/core-book3s.c
1058 ++++ b/arch/powerpc/perf/core-book3s.c
1059 +@@ -2058,6 +2058,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1060 + local64_set(&event->hw.period_left, left);
1061 + perf_event_update_userpage(event);
1062 +
1063 ++ /*
1064 ++ * Due to hardware limitation, sometimes SIAR could sample a kernel
1065 ++ * address even when freeze on supervisor state (kernel) is set in
1066 ++ * MMCR2. Check attr.exclude_kernel and address to drop the sample in
1067 ++ * these cases.
1068 ++ */
1069 ++ if (event->attr.exclude_kernel && record)
1070 ++ if (is_kernel_addr(mfspr(SPRN_SIAR)))
1071 ++ record = 0;
1072 ++
1073 + /*
1074 + * Finally record data if requested.
1075 + */
1076 +diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
1077 +index 84d038ed3882a..ce6597a29bc9f 100644
1078 +--- a/arch/powerpc/platforms/powernv/memtrace.c
1079 ++++ b/arch/powerpc/platforms/powernv/memtrace.c
1080 +@@ -33,6 +33,7 @@ struct memtrace_entry {
1081 + char name[16];
1082 + };
1083 +
1084 ++static DEFINE_MUTEX(memtrace_mutex);
1085 + static u64 memtrace_size;
1086 +
1087 + static struct memtrace_entry *memtrace_array;
1088 +@@ -70,6 +71,23 @@ static int change_memblock_state(struct memory_block *mem, void *arg)
1089 + return 0;
1090 + }
1091 +
1092 ++static void memtrace_clear_range(unsigned long start_pfn,
1093 ++ unsigned long nr_pages)
1094 ++{
1095 ++ unsigned long pfn;
1096 ++
1097 ++ /*
1098 ++ * As pages are offline, we cannot trust the memmap anymore. As HIGHMEM
1099 ++ * does not apply, avoid passing around "struct page" and use
1100 ++ * clear_page() instead directly.
1101 ++ */
1102 ++ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
1103 ++ if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
1104 ++ cond_resched();
1105 ++ clear_page(__va(PFN_PHYS(pfn)));
1106 ++ }
1107 ++}
1108 ++
1109 + /* called with device_hotplug_lock held */
1110 + static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
1111 + {
1112 +@@ -114,6 +132,11 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
1113 + lock_device_hotplug();
1114 + for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
1115 + if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
1116 ++ /*
1117 ++ * Clear the range while we still have a linear
1118 ++ * mapping.
1119 ++ */
1120 ++ memtrace_clear_range(base_pfn, nr_pages);
1121 + /*
1122 + * Remove memory in memory block size chunks so that
1123 + * iomem resources are always split to the same size and
1124 +@@ -272,6 +295,7 @@ static int memtrace_online(void)
1125 +
1126 + static int memtrace_enable_set(void *data, u64 val)
1127 + {
1128 ++ int rc = -EAGAIN;
1129 + u64 bytes;
1130 +
1131 + /*
1132 +@@ -284,25 +308,31 @@ static int memtrace_enable_set(void *data, u64 val)
1133 + return -EINVAL;
1134 + }
1135 +
1136 ++ mutex_lock(&memtrace_mutex);
1137 ++
1138 + /* Re-add/online previously removed/offlined memory */
1139 + if (memtrace_size) {
1140 + if (memtrace_online())
1141 +- return -EAGAIN;
1142 ++ goto out_unlock;
1143 + }
1144 +
1145 +- if (!val)
1146 +- return 0;
1147 ++ if (!val) {
1148 ++ rc = 0;
1149 ++ goto out_unlock;
1150 ++ }
1151 +
1152 + /* Offline and remove memory */
1153 + if (memtrace_init_regions_runtime(val))
1154 +- return -EINVAL;
1155 ++ goto out_unlock;
1156 +
1157 + if (memtrace_init_debugfs())
1158 +- return -EINVAL;
1159 ++ goto out_unlock;
1160 +
1161 + memtrace_size = val;
1162 +-
1163 +- return 0;
1164 ++ rc = 0;
1165 ++out_unlock:
1166 ++ mutex_unlock(&memtrace_mutex);
1167 ++ return rc;
1168 + }
1169 +
1170 + static int memtrace_enable_get(void *data, u64 *val)
1171 +diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
1172 +index 52a021e1f86bf..5414d3295e0a1 100644
1173 +--- a/arch/powerpc/platforms/pseries/suspend.c
1174 ++++ b/arch/powerpc/platforms/pseries/suspend.c
1175 +@@ -26,7 +26,6 @@
1176 + #include <asm/mmu.h>
1177 + #include <asm/rtas.h>
1178 + #include <asm/topology.h>
1179 +-#include "../../kernel/cacheinfo.h"
1180 +
1181 + static u64 stream_id;
1182 + static struct device suspend_dev;
1183 +@@ -91,9 +90,7 @@ static void pseries_suspend_enable_irqs(void)
1184 + * Update configuration which can be modified based on device tree
1185 + * changes during resume.
1186 + */
1187 +- cacheinfo_cpu_offline(smp_processor_id());
1188 + post_mobility_fixup();
1189 +- cacheinfo_cpu_online(smp_processor_id());
1190 + }
1191 +
1192 + /**
1193 +@@ -223,7 +220,6 @@ static struct bus_type suspend_subsys = {
1194 +
1195 + static const struct platform_suspend_ops pseries_suspend_ops = {
1196 + .valid = suspend_valid_only_mem,
1197 +- .begin = pseries_suspend_begin,
1198 + .prepare_late = pseries_prepare_late,
1199 + .enter = pseries_suspend_enter,
1200 + };
1201 +diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
1202 +index d00123421e007..eefe1b94e0aad 100644
1203 +--- a/arch/powerpc/xmon/nonstdio.c
1204 ++++ b/arch/powerpc/xmon/nonstdio.c
1205 +@@ -182,7 +182,7 @@ void xmon_printf(const char *format, ...)
1206 +
1207 + if (n && rc == 0) {
1208 + /* No udbg hooks, fallback to printk() - dangerous */
1209 +- printk("%s", xmon_outbuf);
1210 ++ pr_cont("%s", xmon_outbuf);
1211 + }
1212 + }
1213 +
1214 +diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
1215 +index 888f247c9261a..c47bd581a08a9 100644
1216 +--- a/arch/s390/kernel/smp.c
1217 ++++ b/arch/s390/kernel/smp.c
1218 +@@ -863,24 +863,12 @@ static void smp_start_secondary(void *cpuvoid)
1219 + /* Upping and downing of CPUs */
1220 + int __cpu_up(unsigned int cpu, struct task_struct *tidle)
1221 + {
1222 +- struct pcpu *pcpu;
1223 +- int base, i, rc;
1224 ++ struct pcpu *pcpu = pcpu_devices + cpu;
1225 ++ int rc;
1226 +
1227 +- pcpu = pcpu_devices + cpu;
1228 + if (pcpu->state != CPU_STATE_CONFIGURED)
1229 + return -EIO;
1230 +- base = smp_get_base_cpu(cpu);
1231 +- for (i = 0; i <= smp_cpu_mtid; i++) {
1232 +- if (base + i < nr_cpu_ids)
1233 +- if (cpu_online(base + i))
1234 +- break;
1235 +- }
1236 +- /*
1237 +- * If this is the first CPU of the core to get online
1238 +- * do an initial CPU reset.
1239 +- */
1240 +- if (i > smp_cpu_mtid &&
1241 +- pcpu_sigp_retry(pcpu_devices + base, SIGP_INITIAL_CPU_RESET, 0) !=
1242 ++ if (pcpu_sigp_retry(pcpu, SIGP_INITIAL_CPU_RESET, 0) !=
1243 + SIGP_CC_ORDER_CODE_ACCEPTED)
1244 + return -EIO;
1245 +
1246 +diff --git a/arch/s390/purgatory/head.S b/arch/s390/purgatory/head.S
1247 +index 2e3707b12eddb..9b2d7a71fd1bd 100644
1248 +--- a/arch/s390/purgatory/head.S
1249 ++++ b/arch/s390/purgatory/head.S
1250 +@@ -61,14 +61,15 @@
1251 + jh 10b
1252 + .endm
1253 +
1254 +-.macro START_NEXT_KERNEL base
1255 ++.macro START_NEXT_KERNEL base subcode
1256 + lg %r4,kernel_entry-\base(%r13)
1257 + lg %r5,load_psw_mask-\base(%r13)
1258 + ogr %r4,%r5
1259 + stg %r4,0(%r0)
1260 +
1261 + xgr %r0,%r0
1262 +- diag %r0,%r0,0x308
1263 ++ lghi %r1,\subcode
1264 ++ diag %r0,%r1,0x308
1265 + .endm
1266 +
1267 + .text
1268 +@@ -123,7 +124,7 @@ ENTRY(purgatory_start)
1269 + je .start_crash_kernel
1270 +
1271 + /* start normal kernel */
1272 +- START_NEXT_KERNEL .base_crash
1273 ++ START_NEXT_KERNEL .base_crash 0
1274 +
1275 + .return_old_kernel:
1276 + lmg %r6,%r15,gprregs-.base_crash(%r13)
1277 +@@ -227,7 +228,7 @@ ENTRY(purgatory_start)
1278 + MEMCPY %r9,%r10,%r11
1279 +
1280 + /* start crash kernel */
1281 +- START_NEXT_KERNEL .base_dst
1282 ++ START_NEXT_KERNEL .base_dst 1
1283 +
1284 +
1285 + load_psw_mask:
1286 +diff --git a/arch/um/drivers/chan_user.c b/arch/um/drivers/chan_user.c
1287 +index 3fd7c3efdb18d..9cffbbb15c569 100644
1288 +--- a/arch/um/drivers/chan_user.c
1289 ++++ b/arch/um/drivers/chan_user.c
1290 +@@ -26,10 +26,10 @@ int generic_read(int fd, char *c_out, void *unused)
1291 + n = read(fd, c_out, sizeof(*c_out));
1292 + if (n > 0)
1293 + return n;
1294 +- else if (errno == EAGAIN)
1295 +- return 0;
1296 + else if (n == 0)
1297 + return -EIO;
1298 ++ else if (errno == EAGAIN)
1299 ++ return 0;
1300 + return -errno;
1301 + }
1302 +
1303 +diff --git a/arch/um/drivers/xterm.c b/arch/um/drivers/xterm.c
1304 +index 20e30be44795b..e3b422ebce09f 100644
1305 +--- a/arch/um/drivers/xterm.c
1306 ++++ b/arch/um/drivers/xterm.c
1307 +@@ -18,6 +18,7 @@
1308 + struct xterm_chan {
1309 + int pid;
1310 + int helper_pid;
1311 ++ int chan_fd;
1312 + char *title;
1313 + int device;
1314 + int raw;
1315 +@@ -33,6 +34,7 @@ static void *xterm_init(char *str, int device, const struct chan_opts *opts)
1316 + return NULL;
1317 + *data = ((struct xterm_chan) { .pid = -1,
1318 + .helper_pid = -1,
1319 ++ .chan_fd = -1,
1320 + .device = device,
1321 + .title = opts->xterm_title,
1322 + .raw = opts->raw } );
1323 +@@ -149,6 +151,7 @@ static int xterm_open(int input, int output, int primary, void *d,
1324 + goto out_kill;
1325 + }
1326 +
1327 ++ data->chan_fd = fd;
1328 + new = xterm_fd(fd, &data->helper_pid);
1329 + if (new < 0) {
1330 + err = new;
1331 +@@ -206,6 +209,8 @@ static void xterm_close(int fd, void *d)
1332 + os_kill_process(data->helper_pid, 0);
1333 + data->helper_pid = -1;
1334 +
1335 ++ if (data->chan_fd != -1)
1336 ++ os_close_file(data->chan_fd);
1337 + os_close_file(fd);
1338 + }
1339 +
1340 +diff --git a/arch/um/os-Linux/irq.c b/arch/um/os-Linux/irq.c
1341 +index 365823010346a..90ef404622805 100644
1342 +--- a/arch/um/os-Linux/irq.c
1343 ++++ b/arch/um/os-Linux/irq.c
1344 +@@ -48,7 +48,7 @@ int os_epoll_triggered(int index, int events)
1345 + int os_event_mask(int irq_type)
1346 + {
1347 + if (irq_type == IRQ_READ)
1348 +- return EPOLLIN | EPOLLPRI;
1349 ++ return EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP | EPOLLRDHUP;
1350 + if (irq_type == IRQ_WRITE)
1351 + return EPOLLOUT;
1352 + return 0;
1353 +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
1354 +index 3c1e51ead0722..cd2aa72e21239 100644
1355 +--- a/arch/x86/include/asm/apic.h
1356 ++++ b/arch/x86/include/asm/apic.h
1357 +@@ -252,6 +252,7 @@ static inline u64 native_x2apic_icr_read(void)
1358 +
1359 + extern int x2apic_mode;
1360 + extern int x2apic_phys;
1361 ++extern void __init x2apic_set_max_apicid(u32 apicid);
1362 + extern void __init check_x2apic(void);
1363 + extern void x2apic_setup(void);
1364 + static inline int x2apic_enabled(void)
1365 +diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
1366 +index 71ea49e7db747..02806d95ad6ee 100644
1367 +--- a/arch/x86/include/asm/pgtable_types.h
1368 ++++ b/arch/x86/include/asm/pgtable_types.h
1369 +@@ -148,6 +148,7 @@ enum page_cache_mode {
1370 + #endif
1371 +
1372 + #define _PAGE_CACHE_MASK (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)
1373 ++#define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
1374 + #define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
1375 + #define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
1376 +
1377 +diff --git a/arch/x86/include/asm/sync_core.h b/arch/x86/include/asm/sync_core.h
1378 +index c67caafd33817..43b5e02a7b4b9 100644
1379 +--- a/arch/x86/include/asm/sync_core.h
1380 ++++ b/arch/x86/include/asm/sync_core.h
1381 +@@ -16,12 +16,13 @@ static inline void sync_core_before_usermode(void)
1382 + /* With PTI, we unconditionally serialize before running user code. */
1383 + if (static_cpu_has(X86_FEATURE_PTI))
1384 + return;
1385 ++
1386 + /*
1387 +- * Return from interrupt and NMI is done through iret, which is core
1388 +- * serializing.
1389 ++ * Even if we're in an interrupt, we might reschedule before returning,
1390 ++ * in which case we could switch to a different thread in the same mm
1391 ++ * and return using SYSRET or SYSEXIT. Instead of trying to keep
1392 ++ * track of our need to sync the core, just sync right away.
1393 + */
1394 +- if (in_irq() || in_nmi())
1395 +- return;
1396 + sync_core();
1397 + }
1398 +
1399 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
1400 +index e9456a2eef585..ab8187271d470 100644
1401 +--- a/arch/x86/kernel/apic/apic.c
1402 ++++ b/arch/x86/kernel/apic/apic.c
1403 +@@ -1813,20 +1813,22 @@ static __init void try_to_enable_x2apic(int remap_mode)
1404 + return;
1405 +
1406 + if (remap_mode != IRQ_REMAP_X2APIC_MODE) {
1407 +- /* IR is required if there is APIC ID > 255 even when running
1408 +- * under KVM
1409 ++ /*
1410 ++ * Using X2APIC without IR is not architecturally supported
1411 ++ * on bare metal but may be supported in guests.
1412 + */
1413 +- if (max_physical_apicid > 255 ||
1414 +- !x86_init.hyper.x2apic_available()) {
1415 ++ if (!x86_init.hyper.x2apic_available()) {
1416 + pr_info("x2apic: IRQ remapping doesn't support X2APIC mode\n");
1417 + x2apic_disable();
1418 + return;
1419 + }
1420 +
1421 + /*
1422 +- * without IR all CPUs can be addressed by IOAPIC/MSI
1423 +- * only in physical mode
1424 ++ * Without IR, all CPUs can be addressed by IOAPIC/MSI only
1425 ++ * in physical mode, and CPUs with an APIC ID that cannnot
1426 ++ * be addressed must not be brought online.
1427 + */
1428 ++ x2apic_set_max_apicid(255);
1429 + x2apic_phys = 1;
1430 + }
1431 + x2apic_enable();
1432 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
1433 +index 8b7e0b46e86ea..f0d0535e8f345 100644
1434 +--- a/arch/x86/kernel/apic/vector.c
1435 ++++ b/arch/x86/kernel/apic/vector.c
1436 +@@ -274,20 +274,24 @@ static int assign_irq_vector_any_locked(struct irq_data *irqd)
1437 + const struct cpumask *affmsk = irq_data_get_affinity_mask(irqd);
1438 + int node = irq_data_get_node(irqd);
1439 +
1440 +- if (node == NUMA_NO_NODE)
1441 +- goto all;
1442 +- /* Try the intersection of @affmsk and node mask */
1443 +- cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
1444 +- if (!assign_vector_locked(irqd, vector_searchmask))
1445 +- return 0;
1446 +- /* Try the node mask */
1447 +- if (!assign_vector_locked(irqd, cpumask_of_node(node)))
1448 +- return 0;
1449 +-all:
1450 ++ if (node != NUMA_NO_NODE) {
1451 ++ /* Try the intersection of @affmsk and node mask */
1452 ++ cpumask_and(vector_searchmask, cpumask_of_node(node), affmsk);
1453 ++ if (!assign_vector_locked(irqd, vector_searchmask))
1454 ++ return 0;
1455 ++ }
1456 ++
1457 + /* Try the full affinity mask */
1458 + cpumask_and(vector_searchmask, affmsk, cpu_online_mask);
1459 + if (!assign_vector_locked(irqd, vector_searchmask))
1460 + return 0;
1461 ++
1462 ++ if (node != NUMA_NO_NODE) {
1463 ++ /* Try the node mask */
1464 ++ if (!assign_vector_locked(irqd, cpumask_of_node(node)))
1465 ++ return 0;
1466 ++ }
1467 ++
1468 + /* Try the full online mask */
1469 + return assign_vector_locked(irqd, cpu_online_mask);
1470 + }
1471 +diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
1472 +index b5cf9e7b3830c..ed56d2850e96a 100644
1473 +--- a/arch/x86/kernel/apic/x2apic_phys.c
1474 ++++ b/arch/x86/kernel/apic/x2apic_phys.c
1475 +@@ -13,6 +13,12 @@
1476 + int x2apic_phys;
1477 +
1478 + static struct apic apic_x2apic_phys;
1479 ++static u32 x2apic_max_apicid __ro_after_init;
1480 ++
1481 ++void __init x2apic_set_max_apicid(u32 apicid)
1482 ++{
1483 ++ x2apic_max_apicid = apicid;
1484 ++}
1485 +
1486 + static int __init set_x2apic_phys_mode(char *arg)
1487 + {
1488 +@@ -103,6 +109,9 @@ static int x2apic_phys_probe(void)
1489 + /* Common x2apic functions, also used by x2apic_cluster */
1490 + int x2apic_apic_id_valid(u32 apicid)
1491 + {
1492 ++ if (x2apic_max_apicid && apicid > x2apic_max_apicid)
1493 ++ return 0;
1494 ++
1495 + return 1;
1496 + }
1497 +
1498 +diff --git a/arch/x86/kernel/cpu/intel_rdt.h b/arch/x86/kernel/cpu/intel_rdt.h
1499 +index 2b483b739cf1b..8412234eabd35 100644
1500 +--- a/arch/x86/kernel/cpu/intel_rdt.h
1501 ++++ b/arch/x86/kernel/cpu/intel_rdt.h
1502 +@@ -251,7 +251,6 @@ struct rftype {
1503 + * struct mbm_state - status for each MBM counter in each domain
1504 + * @chunks: Total data moved (multiply by rdt_group.mon_scale to get bytes)
1505 + * @prev_msr Value of IA32_QM_CTR for this RMID last time we read it
1506 +- * @chunks_bw Total local data moved. Used for bandwidth calculation
1507 + * @prev_bw_msr:Value of previous IA32_QM_CTR for bandwidth counting
1508 + * @prev_bw The most recent bandwidth in MBps
1509 + * @delta_bw Difference between the current and previous bandwidth
1510 +@@ -260,7 +259,6 @@ struct rftype {
1511 + struct mbm_state {
1512 + u64 chunks;
1513 + u64 prev_msr;
1514 +- u64 chunks_bw;
1515 + u64 prev_bw_msr;
1516 + u32 prev_bw;
1517 + u32 delta_bw;
1518 +diff --git a/arch/x86/kernel/cpu/intel_rdt_monitor.c b/arch/x86/kernel/cpu/intel_rdt_monitor.c
1519 +index 3d4ec80a6bb96..5dfa5ab9a5ae2 100644
1520 +--- a/arch/x86/kernel/cpu/intel_rdt_monitor.c
1521 ++++ b/arch/x86/kernel/cpu/intel_rdt_monitor.c
1522 +@@ -290,8 +290,6 @@ static void mbm_bw_count(u32 rmid, struct rmid_read *rr)
1523 + return;
1524 +
1525 + chunks = mbm_overflow_count(m->prev_bw_msr, tval);
1526 +- m->chunks_bw += chunks;
1527 +- m->chunks = m->chunks_bw;
1528 + cur_bw = (chunks * r->mon_scale) >> 20;
1529 +
1530 + if (m->delta_comp)
1531 +@@ -461,15 +459,14 @@ static void mbm_update(struct rdt_domain *d, int rmid)
1532 + }
1533 + if (is_mbm_local_enabled()) {
1534 + rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
1535 ++ __mon_event_count(rmid, &rr);
1536 +
1537 + /*
1538 + * Call the MBA software controller only for the
1539 + * control groups and when user has enabled
1540 + * the software controller explicitly.
1541 + */
1542 +- if (!is_mba_sc(NULL))
1543 +- __mon_event_count(rmid, &rr);
1544 +- else
1545 ++ if (is_mba_sc(NULL))
1546 + mbm_bw_count(rmid, &rr);
1547 + }
1548 + }
1549 +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
1550 +index 07e290244ca94..dfc3ab44bc5d3 100644
1551 +--- a/arch/x86/kernel/kprobes/core.c
1552 ++++ b/arch/x86/kernel/kprobes/core.c
1553 +@@ -1041,6 +1041,11 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1554 + * So clear it by resetting the current kprobe:
1555 + */
1556 + regs->flags &= ~X86_EFLAGS_TF;
1557 ++ /*
1558 ++ * Since the single step (trap) has been cancelled,
1559 ++ * we need to restore BTF here.
1560 ++ */
1561 ++ restore_btf();
1562 +
1563 + /*
1564 + * If the TF flag was set before the kprobe hit,
1565 +diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
1566 +index 9d05572370edc..84b0078272d1d 100644
1567 +--- a/arch/x86/lib/memcpy_64.S
1568 ++++ b/arch/x86/lib/memcpy_64.S
1569 +@@ -14,8 +14,6 @@
1570 + * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
1571 + */
1572 +
1573 +-.weak memcpy
1574 +-
1575 + /*
1576 + * memcpy - Copy a memory block.
1577 + *
1578 +@@ -28,7 +26,9 @@
1579 + * rax original destination
1580 + */
1581 + ENTRY(__memcpy)
1582 +-ENTRY(memcpy)
1583 ++.weak memcpy
1584 ++.p2align 4, 0x90
1585 ++memcpy:
1586 + ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
1587 + "jmp memcpy_erms", X86_FEATURE_ERMS
1588 +
1589 +diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
1590 +index bbec69d8223bf..e1cfc880f42df 100644
1591 +--- a/arch/x86/lib/memmove_64.S
1592 ++++ b/arch/x86/lib/memmove_64.S
1593 +@@ -25,8 +25,8 @@
1594 + * rax: dest
1595 + */
1596 + .weak memmove
1597 +-
1598 +-ENTRY(memmove)
1599 ++.p2align 4, 0x90
1600 ++memmove:
1601 + ENTRY(__memmove)
1602 +
1603 + /* Handle more 32 bytes in loop */
1604 +diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
1605 +index 9bc861c71e754..084189acdcd0d 100644
1606 +--- a/arch/x86/lib/memset_64.S
1607 ++++ b/arch/x86/lib/memset_64.S
1608 +@@ -6,8 +6,6 @@
1609 + #include <asm/alternative-asm.h>
1610 + #include <asm/export.h>
1611 +
1612 +-.weak memset
1613 +-
1614 + /*
1615 + * ISO C memset - set a memory block to a byte value. This function uses fast
1616 + * string to get better performance than the original function. The code is
1617 +@@ -19,7 +17,9 @@
1618 + *
1619 + * rax original destination
1620 + */
1621 +-ENTRY(memset)
1622 ++.weak memset
1623 ++.p2align 4, 0x90
1624 ++memset:
1625 + ENTRY(__memset)
1626 + /*
1627 + * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
1628 +diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
1629 +index fe7a12599d8eb..968d7005f4a72 100644
1630 +--- a/arch/x86/mm/ident_map.c
1631 ++++ b/arch/x86/mm/ident_map.c
1632 +@@ -62,6 +62,7 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
1633 + unsigned long addr, unsigned long end)
1634 + {
1635 + unsigned long next;
1636 ++ int result;
1637 +
1638 + for (; addr < end; addr = next) {
1639 + p4d_t *p4d = p4d_page + p4d_index(addr);
1640 +@@ -73,13 +74,20 @@ static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
1641 +
1642 + if (p4d_present(*p4d)) {
1643 + pud = pud_offset(p4d, 0);
1644 +- ident_pud_init(info, pud, addr, next);
1645 ++ result = ident_pud_init(info, pud, addr, next);
1646 ++ if (result)
1647 ++ return result;
1648 ++
1649 + continue;
1650 + }
1651 + pud = (pud_t *)info->alloc_pgt_page(info->context);
1652 + if (!pud)
1653 + return -ENOMEM;
1654 +- ident_pud_init(info, pud, addr, next);
1655 ++
1656 ++ result = ident_pud_init(info, pud, addr, next);
1657 ++ if (result)
1658 ++ return result;
1659 ++
1660 + set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag));
1661 + }
1662 +
1663 +diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
1664 +index c9faf34cbb62e..1f25201de0afb 100644
1665 +--- a/arch/x86/mm/mem_encrypt_identity.c
1666 ++++ b/arch/x86/mm/mem_encrypt_identity.c
1667 +@@ -47,8 +47,8 @@
1668 + #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
1669 +
1670 + #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
1671 +-#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
1672 +- (_PAGE_PAT | _PAGE_PWT))
1673 ++#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
1674 ++ (_PAGE_PAT_LARGE | _PAGE_PWT))
1675 +
1676 + #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
1677 +
1678 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
1679 +index b72296bd04a29..2f41a34c8f574 100644
1680 +--- a/arch/x86/mm/tlb.c
1681 ++++ b/arch/x86/mm/tlb.c
1682 +@@ -321,8 +321,14 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
1683 + /*
1684 + * The membarrier system call requires a full memory barrier and
1685 + * core serialization before returning to user-space, after
1686 +- * storing to rq->curr. Writing to CR3 provides that full
1687 +- * memory barrier and core serializing instruction.
1688 ++ * storing to rq->curr, when changing mm. This is because
1689 ++ * membarrier() sends IPIs to all CPUs that are in the target mm
1690 ++ * to make them issue memory barriers. However, if another CPU
1691 ++ * switches to/from the target mm concurrently with
1692 ++ * membarrier(), it can cause that CPU not to receive an IPI
1693 ++ * when it really should issue a memory barrier. Writing to CR3
1694 ++ * provides that full memory barrier and core serializing
1695 ++ * instruction.
1696 + */
1697 + if (real_prev == next) {
1698 + VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
1699 +diff --git a/block/blk-mq.c b/block/blk-mq.c
1700 +index db2db0b70d34f..0df43515ff949 100644
1701 +--- a/block/blk-mq.c
1702 ++++ b/block/blk-mq.c
1703 +@@ -1118,6 +1118,23 @@ static void blk_mq_update_dispatch_busy(struct blk_mq_hw_ctx *hctx, bool busy)
1704 +
1705 + #define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
1706 +
1707 ++static void blk_mq_handle_dev_resource(struct request *rq,
1708 ++ struct list_head *list)
1709 ++{
1710 ++ struct request *next =
1711 ++ list_first_entry_or_null(list, struct request, queuelist);
1712 ++
1713 ++ /*
1714 ++ * If an I/O scheduler has been configured and we got a driver tag for
1715 ++ * the next request already, free it.
1716 ++ */
1717 ++ if (next)
1718 ++ blk_mq_put_driver_tag(next);
1719 ++
1720 ++ list_add(&rq->queuelist, list);
1721 ++ __blk_mq_requeue_request(rq);
1722 ++}
1723 ++
1724 + /*
1725 + * Returns true if we did some work AND can potentially do more.
1726 + */
1727 +@@ -1185,17 +1202,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
1728 +
1729 + ret = q->mq_ops->queue_rq(hctx, &bd);
1730 + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
1731 +- /*
1732 +- * If an I/O scheduler has been configured and we got a
1733 +- * driver tag for the next request already, free it
1734 +- * again.
1735 +- */
1736 +- if (!list_empty(list)) {
1737 +- nxt = list_first_entry(list, struct request, queuelist);
1738 +- blk_mq_put_driver_tag(nxt);
1739 +- }
1740 +- list_add(&rq->queuelist, list);
1741 +- __blk_mq_requeue_request(rq);
1742 ++ blk_mq_handle_dev_resource(rq, list);
1743 + break;
1744 + }
1745 +
1746 +diff --git a/crypto/af_alg.c b/crypto/af_alg.c
1747 +index 272879d7b0d1f..d0276a4ed9876 100644
1748 +--- a/crypto/af_alg.c
1749 ++++ b/crypto/af_alg.c
1750 +@@ -151,7 +151,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1751 + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
1752 + struct sock *sk = sock->sk;
1753 + struct alg_sock *ask = alg_sk(sk);
1754 +- struct sockaddr_alg *sa = (void *)uaddr;
1755 ++ struct sockaddr_alg_new *sa = (void *)uaddr;
1756 + const struct af_alg_type *type;
1757 + void *private;
1758 + int err;
1759 +@@ -159,7 +159,11 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1760 + if (sock->state == SS_CONNECTED)
1761 + return -EINVAL;
1762 +
1763 +- if (addr_len < sizeof(*sa))
1764 ++ BUILD_BUG_ON(offsetof(struct sockaddr_alg_new, salg_name) !=
1765 ++ offsetof(struct sockaddr_alg, salg_name));
1766 ++ BUILD_BUG_ON(offsetof(struct sockaddr_alg, salg_name) != sizeof(*sa));
1767 ++
1768 ++ if (addr_len < sizeof(*sa) + 1)
1769 + return -EINVAL;
1770 +
1771 + /* If caller uses non-allowed flag, return error. */
1772 +@@ -167,7 +171,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1773 + return -EINVAL;
1774 +
1775 + sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
1776 +- sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
1777 ++ sa->salg_name[addr_len - sizeof(*sa) - 1] = 0;
1778 +
1779 + type = alg_get_type(sa->salg_type);
1780 + if (IS_ERR(type) && PTR_ERR(type) == -ENOENT) {
1781 +diff --git a/crypto/ecdh.c b/crypto/ecdh.c
1782 +index bf6300175b9cd..a6e1a5d43fa7a 100644
1783 +--- a/crypto/ecdh.c
1784 ++++ b/crypto/ecdh.c
1785 +@@ -57,12 +57,13 @@ static int ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
1786 + return ecc_gen_privkey(ctx->curve_id, ctx->ndigits,
1787 + ctx->private_key);
1788 +
1789 +- if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
1790 +- (const u64 *)params.key, params.key_size) < 0)
1791 +- return -EINVAL;
1792 +-
1793 + memcpy(ctx->private_key, params.key, params.key_size);
1794 +
1795 ++ if (ecc_is_key_valid(ctx->curve_id, ctx->ndigits,
1796 ++ ctx->private_key, params.key_size) < 0) {
1797 ++ memzero_explicit(ctx->private_key, params.key_size);
1798 ++ return -EINVAL;
1799 ++ }
1800 + return 0;
1801 + }
1802 +
1803 +diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
1804 +index 67d97c0090a27..5d72baf60ac83 100644
1805 +--- a/drivers/acpi/acpi_pnp.c
1806 ++++ b/drivers/acpi/acpi_pnp.c
1807 +@@ -320,6 +320,9 @@ static bool matching_id(const char *idstr, const char *list_id)
1808 + {
1809 + int i;
1810 +
1811 ++ if (strlen(idstr) != strlen(list_id))
1812 ++ return false;
1813 ++
1814 + if (memcmp(idstr, list_id, 3))
1815 + return false;
1816 +
1817 +diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
1818 +index ca735dc24d377..9617e58832719 100644
1819 +--- a/drivers/acpi/device_pm.c
1820 ++++ b/drivers/acpi/device_pm.c
1821 +@@ -702,7 +702,7 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
1822 + static DEFINE_MUTEX(acpi_wakeup_lock);
1823 +
1824 + static int __acpi_device_wakeup_enable(struct acpi_device *adev,
1825 +- u32 target_state, int max_count)
1826 ++ u32 target_state)
1827 + {
1828 + struct acpi_device_wakeup *wakeup = &adev->wakeup;
1829 + acpi_status status;
1830 +@@ -710,9 +710,10 @@ static int __acpi_device_wakeup_enable(struct acpi_device *adev,
1831 +
1832 + mutex_lock(&acpi_wakeup_lock);
1833 +
1834 +- if (wakeup->enable_count >= max_count)
1835 ++ if (wakeup->enable_count >= INT_MAX) {
1836 ++ acpi_handle_info(adev->handle, "Wakeup enable count out of bounds!\n");
1837 + goto out;
1838 +-
1839 ++ }
1840 + if (wakeup->enable_count > 0)
1841 + goto inc;
1842 +
1843 +@@ -749,7 +750,7 @@ out:
1844 + */
1845 + static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
1846 + {
1847 +- return __acpi_device_wakeup_enable(adev, target_state, 1);
1848 ++ return __acpi_device_wakeup_enable(adev, target_state);
1849 + }
1850 +
1851 + /**
1852 +@@ -779,8 +780,12 @@ out:
1853 + mutex_unlock(&acpi_wakeup_lock);
1854 + }
1855 +
1856 +-static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
1857 +- int max_count)
1858 ++/**
1859 ++ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
1860 ++ * @dev: Device to enable/disable to generate wakeup events.
1861 ++ * @enable: Whether to enable or disable the wakeup functionality.
1862 ++ */
1863 ++int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
1864 + {
1865 + struct acpi_device *adev;
1866 + int error;
1867 +@@ -800,36 +805,14 @@ static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
1868 + return 0;
1869 + }
1870 +
1871 +- error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
1872 +- max_count);
1873 ++ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state());
1874 + if (!error)
1875 + dev_dbg(dev, "Wakeup enabled by ACPI\n");
1876 +
1877 + return error;
1878 + }
1879 +-
1880 +-/**
1881 +- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
1882 +- * @dev: Device to enable/disable to generate wakeup events.
1883 +- * @enable: Whether to enable or disable the wakeup functionality.
1884 +- */
1885 +-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
1886 +-{
1887 +- return __acpi_pm_set_device_wakeup(dev, enable, 1);
1888 +-}
1889 + EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
1890 +
1891 +-/**
1892 +- * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
1893 +- * @dev: Bridge device to enable/disable to generate wakeup events.
1894 +- * @enable: Whether to enable or disable the wakeup functionality.
1895 +- */
1896 +-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
1897 +-{
1898 +- return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
1899 +-}
1900 +-EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
1901 +-
1902 + /**
1903 + * acpi_dev_pm_low_power - Put ACPI device into a low-power state.
1904 + * @dev: Device to put into a low-power state.
1905 +diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
1906 +index 316a0fc785e36..d3f9a320e880e 100644
1907 +--- a/drivers/acpi/resource.c
1908 ++++ b/drivers/acpi/resource.c
1909 +@@ -549,7 +549,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
1910 + ret = c->preproc(ares, c->preproc_data);
1911 + if (ret < 0) {
1912 + c->error = ret;
1913 +- return AE_CTRL_TERMINATE;
1914 ++ return AE_ABORT_METHOD;
1915 + } else if (ret > 0) {
1916 + return AE_OK;
1917 + }
1918 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
1919 +index 93896c992245b..42af2f37ba4e1 100644
1920 +--- a/drivers/block/xen-blkback/xenbus.c
1921 ++++ b/drivers/block/xen-blkback/xenbus.c
1922 +@@ -264,6 +264,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
1923 +
1924 + if (ring->xenblkd) {
1925 + kthread_stop(ring->xenblkd);
1926 ++ ring->xenblkd = NULL;
1927 + wake_up(&ring->shutdown_wq);
1928 + }
1929 +
1930 +@@ -651,7 +652,8 @@ static int xen_blkbk_probe(struct xenbus_device *dev,
1931 + /* setup back pointer */
1932 + be->blkif->be = be;
1933 +
1934 +- err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
1935 ++ err = xenbus_watch_pathfmt(dev, &be->backend_watch, NULL,
1936 ++ backend_changed,
1937 + "%s/%s", dev->nodename, "physical-device");
1938 + if (err)
1939 + goto fail;
1940 +diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
1941 +index 5a68cd4dd71cb..7ffeb37e8f202 100644
1942 +--- a/drivers/bluetooth/hci_h5.c
1943 ++++ b/drivers/bluetooth/hci_h5.c
1944 +@@ -257,6 +257,9 @@ static int h5_close(struct hci_uart *hu)
1945 + skb_queue_purge(&h5->rel);
1946 + skb_queue_purge(&h5->unrel);
1947 +
1948 ++ kfree_skb(h5->rx_skb);
1949 ++ h5->rx_skb = NULL;
1950 ++
1951 + if (h5->vnd && h5->vnd->close)
1952 + h5->vnd->close(h5);
1953 +
1954 +diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
1955 +index e906ecfe23dd8..9cb0733a03991 100644
1956 +--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
1957 ++++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
1958 +@@ -292,8 +292,10 @@ int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev,
1959 + goto error;
1960 +
1961 + mc_adev = resource->data;
1962 +- if (!mc_adev)
1963 ++ if (!mc_adev) {
1964 ++ error = -EINVAL;
1965 + goto error;
1966 ++ }
1967 +
1968 + *new_mc_adev = mc_adev;
1969 + return 0;
1970 +diff --git a/drivers/bus/mips_cdmm.c b/drivers/bus/mips_cdmm.c
1971 +index 1b14256376d24..7c1da45be166e 100644
1972 +--- a/drivers/bus/mips_cdmm.c
1973 ++++ b/drivers/bus/mips_cdmm.c
1974 +@@ -544,10 +544,8 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
1975 + dev_set_name(&dev->dev, "cdmm%u-%u", cpu, id);
1976 + ++id;
1977 + ret = device_register(&dev->dev);
1978 +- if (ret) {
1979 ++ if (ret)
1980 + put_device(&dev->dev);
1981 +- kfree(dev);
1982 +- }
1983 + }
1984 + }
1985 +
1986 +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
1987 +index 4080d4e78e8e4..f3aaefafba893 100644
1988 +--- a/drivers/clk/clk-s2mps11.c
1989 ++++ b/drivers/clk/clk-s2mps11.c
1990 +@@ -211,6 +211,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
1991 + return ret;
1992 +
1993 + err_reg:
1994 ++ of_node_put(s2mps11_clks[0].clk_np);
1995 + while (--i >= 0)
1996 + clkdev_drop(s2mps11_clks[i].lookup);
1997 +
1998 +diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
1999 +index 612d65ede10a0..5370514959e15 100644
2000 +--- a/drivers/clk/mvebu/armada-37xx-xtal.c
2001 ++++ b/drivers/clk/mvebu/armada-37xx-xtal.c
2002 +@@ -15,8 +15,8 @@
2003 + #include <linux/platform_device.h>
2004 + #include <linux/regmap.h>
2005 +
2006 +-#define NB_GPIO1_LATCH 0xC
2007 +-#define XTAL_MODE BIT(31)
2008 ++#define NB_GPIO1_LATCH 0x8
2009 ++#define XTAL_MODE BIT(9)
2010 +
2011 + static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
2012 + {
2013 +diff --git a/drivers/clk/renesas/r9a06g032-clocks.c b/drivers/clk/renesas/r9a06g032-clocks.c
2014 +index 6d2b568915597..6e03b467395b2 100644
2015 +--- a/drivers/clk/renesas/r9a06g032-clocks.c
2016 ++++ b/drivers/clk/renesas/r9a06g032-clocks.c
2017 +@@ -51,7 +51,7 @@ struct r9a06g032_clkdesc {
2018 + u16 sel, g1, r1, g2, r2;
2019 + } dual;
2020 + };
2021 +-} __packed;
2022 ++};
2023 +
2024 + #define I_GATE(_clk, _rst, _rdy, _midle, _scon, _mirack, _mistat) \
2025 + { .gate = _clk, .reset = _rst, \
2026 +diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2027 +index 9ac6c299e0744..19304d6b2c05d 100644
2028 +--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2029 ++++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
2030 +@@ -381,6 +381,7 @@ static struct clk_div_table ths_div_table[] = {
2031 + { .val = 1, .div = 2 },
2032 + { .val = 2, .div = 4 },
2033 + { .val = 3, .div = 6 },
2034 ++ { /* Sentinel */ },
2035 + };
2036 + static const char * const ths_parents[] = { "osc24M" };
2037 + static struct ccu_div ths_clk = {
2038 +diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
2039 +index 61e3ba12773ea..d9789378caf55 100644
2040 +--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
2041 ++++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
2042 +@@ -328,6 +328,7 @@ static struct clk_div_table ths_div_table[] = {
2043 + { .val = 1, .div = 2 },
2044 + { .val = 2, .div = 4 },
2045 + { .val = 3, .div = 6 },
2046 ++ { /* Sentinel */ },
2047 + };
2048 + static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M",
2049 + 0x074, 0, 2, ths_div_table, BIT(31), 0);
2050 +diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
2051 +index de466b4446da9..0efcb200dde5a 100644
2052 +--- a/drivers/clk/tegra/clk-id.h
2053 ++++ b/drivers/clk/tegra/clk-id.h
2054 +@@ -233,6 +233,7 @@ enum clk_id {
2055 + tegra_clk_sdmmc4,
2056 + tegra_clk_sdmmc4_8,
2057 + tegra_clk_se,
2058 ++ tegra_clk_se_10,
2059 + tegra_clk_soc_therm,
2060 + tegra_clk_soc_therm_8,
2061 + tegra_clk_sor0,
2062 +diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
2063 +index b137c5d34eec4..9d05fb48686db 100644
2064 +--- a/drivers/clk/tegra/clk-tegra-periph.c
2065 ++++ b/drivers/clk/tegra/clk-tegra-periph.c
2066 +@@ -650,7 +650,7 @@ static struct tegra_periph_init_data periph_clks[] = {
2067 + INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
2068 + INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9),
2069 + INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
2070 +- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
2071 ++ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10),
2072 + INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
2073 + INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
2074 + INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
2075 +diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
2076 +index 071af44b1ba85..e33ce851837e4 100644
2077 +--- a/drivers/clk/ti/fapll.c
2078 ++++ b/drivers/clk/ti/fapll.c
2079 +@@ -497,6 +497,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
2080 + {
2081 + struct clk_init_data *init;
2082 + struct fapll_synth *synth;
2083 ++ struct clk *clk = ERR_PTR(-ENOMEM);
2084 +
2085 + init = kzalloc(sizeof(*init), GFP_KERNEL);
2086 + if (!init)
2087 +@@ -519,13 +520,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
2088 + synth->hw.init = init;
2089 + synth->clk_pll = pll_clk;
2090 +
2091 +- return clk_register(NULL, &synth->hw);
2092 ++ clk = clk_register(NULL, &synth->hw);
2093 ++ if (IS_ERR(clk)) {
2094 ++ pr_err("failed to register clock\n");
2095 ++ goto free;
2096 ++ }
2097 ++
2098 ++ return clk;
2099 +
2100 + free:
2101 + kfree(synth);
2102 + kfree(init);
2103 +
2104 +- return ERR_PTR(-ENOMEM);
2105 ++ return clk;
2106 + }
2107 +
2108 + static void __init ti_fapll_setup(struct device_node *node)
2109 +diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
2110 +index 0445ad7e559e5..e67ab217eef41 100644
2111 +--- a/drivers/clocksource/arm_arch_timer.c
2112 ++++ b/drivers/clocksource/arm_arch_timer.c
2113 +@@ -827,15 +827,24 @@ static void arch_timer_evtstrm_enable(int divider)
2114 +
2115 + static void arch_timer_configure_evtstream(void)
2116 + {
2117 +- int evt_stream_div, pos;
2118 ++ int evt_stream_div, lsb;
2119 ++
2120 ++ /*
2121 ++ * As the event stream can at most be generated at half the frequency
2122 ++ * of the counter, use half the frequency when computing the divider.
2123 ++ */
2124 ++ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ / 2;
2125 ++
2126 ++ /*
2127 ++ * Find the closest power of two to the divisor. If the adjacent bit
2128 ++ * of lsb (last set bit, starts from 0) is set, then we use (lsb + 1).
2129 ++ */
2130 ++ lsb = fls(evt_stream_div) - 1;
2131 ++ if (lsb > 0 && (evt_stream_div & BIT(lsb - 1)))
2132 ++ lsb++;
2133 +
2134 +- /* Find the closest power of two to the divisor */
2135 +- evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
2136 +- pos = fls(evt_stream_div);
2137 +- if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
2138 +- pos--;
2139 + /* enable event stream */
2140 +- arch_timer_evtstrm_enable(min(pos, 15));
2141 ++ arch_timer_evtstrm_enable(max(0, min(lsb, 15)));
2142 + }
2143 +
2144 + static void arch_counter_set_user_access(void)
2145 +diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
2146 +index 29d51755e18b2..a7eb858a84a0f 100644
2147 +--- a/drivers/clocksource/cadence_ttc_timer.c
2148 ++++ b/drivers/clocksource/cadence_ttc_timer.c
2149 +@@ -419,10 +419,8 @@ static int __init ttc_setup_clockevent(struct clk *clk,
2150 + ttcce->ttc.clk = clk;
2151 +
2152 + err = clk_prepare_enable(ttcce->ttc.clk);
2153 +- if (err) {
2154 +- kfree(ttcce);
2155 +- return err;
2156 +- }
2157 ++ if (err)
2158 ++ goto out_kfree;
2159 +
2160 + ttcce->ttc.clk_rate_change_nb.notifier_call =
2161 + ttc_rate_change_clockevent_cb;
2162 +@@ -432,7 +430,7 @@ static int __init ttc_setup_clockevent(struct clk *clk,
2163 + &ttcce->ttc.clk_rate_change_nb);
2164 + if (err) {
2165 + pr_warn("Unable to register clock notifier.\n");
2166 +- return err;
2167 ++ goto out_kfree;
2168 + }
2169 +
2170 + ttcce->ttc.freq = clk_get_rate(ttcce->ttc.clk);
2171 +@@ -461,15 +459,17 @@ static int __init ttc_setup_clockevent(struct clk *clk,
2172 +
2173 + err = request_irq(irq, ttc_clock_event_interrupt,
2174 + IRQF_TIMER, ttcce->ce.name, ttcce);
2175 +- if (err) {
2176 +- kfree(ttcce);
2177 +- return err;
2178 +- }
2179 ++ if (err)
2180 ++ goto out_kfree;
2181 +
2182 + clockevents_config_and_register(&ttcce->ce,
2183 + ttcce->ttc.freq / PRESCALE, 1, 0xfffe);
2184 +
2185 + return 0;
2186 ++
2187 ++out_kfree:
2188 ++ kfree(ttcce);
2189 ++ return err;
2190 + }
2191 +
2192 + /**
2193 +diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
2194 +index 1608f7105c9f8..ad743f2f31e78 100644
2195 +--- a/drivers/cpufreq/highbank-cpufreq.c
2196 ++++ b/drivers/cpufreq/highbank-cpufreq.c
2197 +@@ -104,6 +104,13 @@ out_put_node:
2198 + }
2199 + module_init(hb_cpufreq_driver_init);
2200 +
2201 ++static const struct of_device_id __maybe_unused hb_cpufreq_of_match[] = {
2202 ++ { .compatible = "calxeda,highbank" },
2203 ++ { .compatible = "calxeda,ecx-2000" },
2204 ++ { },
2205 ++};
2206 ++MODULE_DEVICE_TABLE(of, hb_cpufreq_of_match);
2207 ++
2208 + MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@×××××××.com>");
2209 + MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver");
2210 + MODULE_LICENSE("GPL");
2211 +diff --git a/drivers/cpufreq/loongson1-cpufreq.c b/drivers/cpufreq/loongson1-cpufreq.c
2212 +index be89416e2358f..9d902f67f8716 100644
2213 +--- a/drivers/cpufreq/loongson1-cpufreq.c
2214 ++++ b/drivers/cpufreq/loongson1-cpufreq.c
2215 +@@ -217,6 +217,7 @@ static struct platform_driver ls1x_cpufreq_platdrv = {
2216 +
2217 + module_platform_driver(ls1x_cpufreq_platdrv);
2218 +
2219 ++MODULE_ALIAS("platform:ls1x-cpufreq");
2220 + MODULE_AUTHOR("Kelvin Cheung <keguang.zhang@×××××.com>");
2221 + MODULE_DESCRIPTION("Loongson1 CPUFreq driver");
2222 + MODULE_LICENSE("GPL");
2223 +diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c
2224 +index eb8920d398181..5a81e20f02824 100644
2225 +--- a/drivers/cpufreq/mediatek-cpufreq.c
2226 ++++ b/drivers/cpufreq/mediatek-cpufreq.c
2227 +@@ -554,6 +554,7 @@ static const struct of_device_id mtk_cpufreq_machines[] __initconst = {
2228 +
2229 + { }
2230 + };
2231 ++MODULE_DEVICE_TABLE(of, mtk_cpufreq_machines);
2232 +
2233 + static int __init mtk_cpufreq_driver_init(void)
2234 + {
2235 +diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
2236 +index 87a98ec77773a..0338885332a75 100644
2237 +--- a/drivers/cpufreq/scpi-cpufreq.c
2238 ++++ b/drivers/cpufreq/scpi-cpufreq.c
2239 +@@ -246,6 +246,7 @@ static struct platform_driver scpi_cpufreq_platdrv = {
2240 + };
2241 + module_platform_driver(scpi_cpufreq_platdrv);
2242 +
2243 ++MODULE_ALIAS("platform:scpi-cpufreq");
2244 + MODULE_AUTHOR("Sudeep Holla <sudeep.holla@×××.com>");
2245 + MODULE_DESCRIPTION("ARM SCPI CPUFreq interface driver");
2246 + MODULE_LICENSE("GPL v2");
2247 +diff --git a/drivers/cpufreq/sti-cpufreq.c b/drivers/cpufreq/sti-cpufreq.c
2248 +index 6b5d241c30b70..2d09960afa591 100644
2249 +--- a/drivers/cpufreq/sti-cpufreq.c
2250 ++++ b/drivers/cpufreq/sti-cpufreq.c
2251 +@@ -295,6 +295,13 @@ register_cpufreq_dt:
2252 + }
2253 + module_init(sti_cpufreq_init);
2254 +
2255 ++static const struct of_device_id __maybe_unused sti_cpufreq_of_match[] = {
2256 ++ { .compatible = "st,stih407" },
2257 ++ { .compatible = "st,stih410" },
2258 ++ { },
2259 ++};
2260 ++MODULE_DEVICE_TABLE(of, sti_cpufreq_of_match);
2261 ++
2262 + MODULE_DESCRIPTION("STMicroelectronics CPUFreq/OPP driver");
2263 + MODULE_AUTHOR("Ajitpal Singh <ajitpal.singh@××.com>");
2264 + MODULE_AUTHOR("Lee Jones <lee.jones@××××××.org>");
2265 +diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
2266 +index 68d5ea818b6c0..cd00afb5786e8 100644
2267 +--- a/drivers/crypto/amcc/crypto4xx_core.c
2268 ++++ b/drivers/crypto/amcc/crypto4xx_core.c
2269 +@@ -926,7 +926,7 @@ int crypto4xx_build_pd(struct crypto_async_request *req,
2270 + }
2271 +
2272 + pd->pd_ctl.w = PD_CTL_HOST_READY |
2273 +- ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
2274 ++ ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) ||
2275 + (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
2276 + PD_CTL_HASH_FINAL : 0);
2277 + pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
2278 +diff --git a/drivers/crypto/inside-secure/safexcel.c b/drivers/crypto/inside-secure/safexcel.c
2279 +index 86c699c14f849..bc6c5cb7de239 100644
2280 +--- a/drivers/crypto/inside-secure/safexcel.c
2281 ++++ b/drivers/crypto/inside-secure/safexcel.c
2282 +@@ -1066,7 +1066,7 @@ static int safexcel_probe(struct platform_device *pdev)
2283 +
2284 + priv->ring[i].rdr_req = devm_kcalloc(dev,
2285 + EIP197_DEFAULT_RING_SIZE,
2286 +- sizeof(priv->ring[i].rdr_req),
2287 ++ sizeof(*priv->ring[i].rdr_req),
2288 + GFP_KERNEL);
2289 + if (!priv->ring[i].rdr_req) {
2290 + ret = -ENOMEM;
2291 +diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
2292 +index 9019f6b67986b..a5d6e1a0192bc 100644
2293 +--- a/drivers/crypto/omap-aes.c
2294 ++++ b/drivers/crypto/omap-aes.c
2295 +@@ -1163,7 +1163,7 @@ static int omap_aes_probe(struct platform_device *pdev)
2296 + if (err < 0) {
2297 + dev_err(dev, "%s: failed to get_sync(%d)\n",
2298 + __func__, err);
2299 +- goto err_res;
2300 ++ goto err_pm_disable;
2301 + }
2302 +
2303 + omap_aes_dma_stop(dd);
2304 +@@ -1276,6 +1276,7 @@ err_engine:
2305 + omap_aes_dma_cleanup(dd);
2306 + err_irq:
2307 + tasklet_kill(&dd->done_task);
2308 ++err_pm_disable:
2309 + pm_runtime_disable(dev);
2310 + err_res:
2311 + dd = NULL;
2312 +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
2313 +index ff149e176f649..dac130bb807ae 100644
2314 +--- a/drivers/crypto/qat/qat_common/qat_hal.c
2315 ++++ b/drivers/crypto/qat/qat_common/qat_hal.c
2316 +@@ -1189,7 +1189,7 @@ static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
2317 + unsigned short mask;
2318 + unsigned short dr_offset = 0x10;
2319 +
2320 +- status = ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
2321 ++ ctx_enables = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES);
2322 + if (CE_INUSE_CONTEXTS & ctx_enables) {
2323 + if (ctx & 0x1) {
2324 + pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
2325 +diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
2326 +index ea16308fae0a5..c70a7c4f5b739 100644
2327 +--- a/drivers/crypto/talitos.c
2328 ++++ b/drivers/crypto/talitos.c
2329 +@@ -474,7 +474,7 @@ DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
2330 + /*
2331 + * locate current (offending) descriptor
2332 + */
2333 +-static u32 current_desc_hdr(struct device *dev, int ch)
2334 ++static __be32 current_desc_hdr(struct device *dev, int ch)
2335 + {
2336 + struct talitos_private *priv = dev_get_drvdata(dev);
2337 + int tail, iter;
2338 +@@ -492,7 +492,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
2339 +
2340 + iter = tail;
2341 + while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
2342 +- priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
2343 ++ priv->chan[ch].fifo[iter].desc->next_desc != cpu_to_be32(cur_desc)) {
2344 + iter = (iter + 1) & (priv->fifo_len - 1);
2345 + if (iter == tail) {
2346 + dev_err(dev, "couldn't locate current descriptor\n");
2347 +@@ -500,7 +500,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
2348 + }
2349 + }
2350 +
2351 +- if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
2352 ++ if (priv->chan[ch].fifo[iter].desc->next_desc == cpu_to_be32(cur_desc)) {
2353 + struct talitos_edesc *edesc;
2354 +
2355 + edesc = container_of(priv->chan[ch].fifo[iter].desc,
2356 +@@ -515,13 +515,13 @@ static u32 current_desc_hdr(struct device *dev, int ch)
2357 + /*
2358 + * user diagnostics; report root cause of error based on execution unit status
2359 + */
2360 +-static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
2361 ++static void report_eu_error(struct device *dev, int ch, __be32 desc_hdr)
2362 + {
2363 + struct talitos_private *priv = dev_get_drvdata(dev);
2364 + int i;
2365 +
2366 + if (!desc_hdr)
2367 +- desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
2368 ++ desc_hdr = cpu_to_be32(in_be32(priv->chan[ch].reg + TALITOS_DESCBUF));
2369 +
2370 + switch (desc_hdr & DESC_HDR_SEL0_MASK) {
2371 + case DESC_HDR_SEL0_AFEU:
2372 +diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
2373 +index 8dc0aa4d73ab8..462adf7e4e952 100644
2374 +--- a/drivers/dma/mv_xor_v2.c
2375 ++++ b/drivers/dma/mv_xor_v2.c
2376 +@@ -777,8 +777,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
2377 + goto disable_clk;
2378 +
2379 + msi_desc = first_msi_entry(&pdev->dev);
2380 +- if (!msi_desc)
2381 ++ if (!msi_desc) {
2382 ++ ret = -ENODEV;
2383 + goto free_msi_irqs;
2384 ++ }
2385 + xor_dev->msi_desc = msi_desc;
2386 +
2387 + ret = devm_request_irq(&pdev->dev, msi_desc->irq,
2388 +diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
2389 +index cbe4158531979..fe25c98380ad0 100644
2390 +--- a/drivers/edac/amd64_edac.c
2391 ++++ b/drivers/edac/amd64_edac.c
2392 +@@ -18,6 +18,9 @@ static struct msr __percpu *msrs;
2393 + /* Per-node stuff */
2394 + static struct ecc_settings **ecc_stngs;
2395 +
2396 ++/* Device for the PCI component */
2397 ++static struct device *pci_ctl_dev;
2398 ++
2399 + /*
2400 + * Valid scrub rates for the K8 hardware memory scrubber. We map the scrubbing
2401 + * bandwidth to a valid bit pattern. The 'set' operation finds the 'matching-
2402 +@@ -2563,6 +2566,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2403 + return -ENODEV;
2404 + }
2405 +
2406 ++ if (!pci_ctl_dev)
2407 ++ pci_ctl_dev = &pvt->F0->dev;
2408 ++
2409 + edac_dbg(1, "F0: %s\n", pci_name(pvt->F0));
2410 + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2411 + edac_dbg(1, "F6: %s\n", pci_name(pvt->F6));
2412 +@@ -2587,6 +2593,9 @@ reserve_mc_sibling_devs(struct amd64_pvt *pvt, u16 pci_id1, u16 pci_id2)
2413 + return -ENODEV;
2414 + }
2415 +
2416 ++ if (!pci_ctl_dev)
2417 ++ pci_ctl_dev = &pvt->F2->dev;
2418 ++
2419 + edac_dbg(1, "F1: %s\n", pci_name(pvt->F1));
2420 + edac_dbg(1, "F2: %s\n", pci_name(pvt->F2));
2421 + edac_dbg(1, "F3: %s\n", pci_name(pvt->F3));
2422 +@@ -3441,21 +3450,10 @@ static void remove_one_instance(unsigned int nid)
2423 +
2424 + static void setup_pci_device(void)
2425 + {
2426 +- struct mem_ctl_info *mci;
2427 +- struct amd64_pvt *pvt;
2428 +-
2429 + if (pci_ctl)
2430 + return;
2431 +
2432 +- mci = edac_mc_find(0);
2433 +- if (!mci)
2434 +- return;
2435 +-
2436 +- pvt = mci->pvt_info;
2437 +- if (pvt->umc)
2438 +- pci_ctl = edac_pci_create_generic_ctl(&pvt->F0->dev, EDAC_MOD_STR);
2439 +- else
2440 +- pci_ctl = edac_pci_create_generic_ctl(&pvt->F2->dev, EDAC_MOD_STR);
2441 ++ pci_ctl = edac_pci_create_generic_ctl(pci_ctl_dev, EDAC_MOD_STR);
2442 + if (!pci_ctl) {
2443 + pr_warn("%s(): Unable to create PCI control\n", __func__);
2444 + pr_warn("%s(): PCI error report via EDAC not set\n", __func__);
2445 +@@ -3535,6 +3533,8 @@ static int __init amd64_edac_init(void)
2446 + return 0;
2447 +
2448 + err_pci:
2449 ++ pci_ctl_dev = NULL;
2450 ++
2451 + msrs_free(msrs);
2452 + msrs = NULL;
2453 +
2454 +@@ -3566,6 +3566,8 @@ static void __exit amd64_edac_exit(void)
2455 + kfree(ecc_stngs);
2456 + ecc_stngs = NULL;
2457 +
2458 ++ pci_ctl_dev = NULL;
2459 ++
2460 + msrs_free(msrs);
2461 + msrs = NULL;
2462 + }
2463 +diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
2464 +index 227651ff9666a..c221a0aec0f37 100644
2465 +--- a/drivers/extcon/extcon-max77693.c
2466 ++++ b/drivers/extcon/extcon-max77693.c
2467 +@@ -1275,4 +1275,4 @@ module_platform_driver(max77693_muic_driver);
2468 + MODULE_DESCRIPTION("Maxim MAX77693 Extcon driver");
2469 + MODULE_AUTHOR("Chanwoo Choi <cw00.choi@×××××××.com>");
2470 + MODULE_LICENSE("GPL");
2471 +-MODULE_ALIAS("platform:extcon-max77693");
2472 ++MODULE_ALIAS("platform:max77693-muic");
2473 +diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c
2474 +index 4935cda5301ea..4f1af323ec03b 100644
2475 +--- a/drivers/gpio/gpio-eic-sprd.c
2476 ++++ b/drivers/gpio/gpio-eic-sprd.c
2477 +@@ -599,7 +599,7 @@ static int sprd_eic_probe(struct platform_device *pdev)
2478 + */
2479 + res = platform_get_resource(pdev, IORESOURCE_MEM, i);
2480 + if (!res)
2481 +- continue;
2482 ++ break;
2483 +
2484 + sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res);
2485 + if (IS_ERR(sprd_eic->base[i]))
2486 +diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
2487 +index adc768f908f1a..3b78dcda47364 100644
2488 +--- a/drivers/gpio/gpio-mvebu.c
2489 ++++ b/drivers/gpio/gpio-mvebu.c
2490 +@@ -1191,6 +1191,13 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
2491 +
2492 + devm_gpiochip_add_data(&pdev->dev, &mvchip->chip, mvchip);
2493 +
2494 ++ /* Some MVEBU SoCs have simple PWM support for GPIO lines */
2495 ++ if (IS_ENABLED(CONFIG_PWM)) {
2496 ++ err = mvebu_pwm_probe(pdev, mvchip, id);
2497 ++ if (err)
2498 ++ return err;
2499 ++ }
2500 ++
2501 + /* Some gpio controllers do not provide irq support */
2502 + if (!have_irqs)
2503 + return 0;
2504 +@@ -1200,7 +1207,8 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
2505 + if (!mvchip->domain) {
2506 + dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
2507 + mvchip->chip.label);
2508 +- return -ENODEV;
2509 ++ err = -ENODEV;
2510 ++ goto err_pwm;
2511 + }
2512 +
2513 + err = irq_alloc_domain_generic_chips(
2514 +@@ -1248,14 +1256,12 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
2515 + mvchip);
2516 + }
2517 +
2518 +- /* Some MVEBU SoCs have simple PWM support for GPIO lines */
2519 +- if (IS_ENABLED(CONFIG_PWM))
2520 +- return mvebu_pwm_probe(pdev, mvchip, id);
2521 +-
2522 + return 0;
2523 +
2524 + err_domain:
2525 + irq_domain_remove(mvchip->domain);
2526 ++err_pwm:
2527 ++ pwmchip_remove(&mvchip->mvpwm->chip);
2528 +
2529 + return err;
2530 + }
2531 +diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
2532 +index 0e4f25d63fd2d..0b11210c882ee 100644
2533 +--- a/drivers/gpu/drm/drm_dp_aux_dev.c
2534 ++++ b/drivers/gpu/drm/drm_dp_aux_dev.c
2535 +@@ -60,7 +60,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
2536 +
2537 + mutex_lock(&aux_idr_mutex);
2538 + aux_dev = idr_find(&aux_idr, index);
2539 +- if (!kref_get_unless_zero(&aux_dev->refcount))
2540 ++ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
2541 + aux_dev = NULL;
2542 + mutex_unlock(&aux_idr_mutex);
2543 +
2544 +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2545 +index a0aafd9a37e60..c50fe915f5c84 100644
2546 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c
2547 ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2548 +@@ -2706,11 +2706,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2549 + {
2550 + int ret;
2551 +
2552 +- port = drm_dp_get_validated_port_ref(mgr, port);
2553 +- if (!port)
2554 ++ if (slots < 0)
2555 + return false;
2556 +
2557 +- if (slots < 0)
2558 ++ port = drm_dp_get_validated_port_ref(mgr, port);
2559 ++ if (!port)
2560 + return false;
2561 +
2562 + if (port->vcpi.vcpi > 0) {
2563 +@@ -2725,6 +2725,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2564 + if (ret) {
2565 + DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
2566 + DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
2567 ++ drm_dp_put_port(port);
2568 + goto out;
2569 + }
2570 + DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
2571 +diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
2572 +index 05eba6dec5ebf..3e8b804cf7e7e 100644
2573 +--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
2574 ++++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
2575 +@@ -2124,7 +2124,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
2576 + DRM_INFO("failed to retrieve link info, disabling eDP\n");
2577 + cdv_intel_dp_encoder_destroy(encoder);
2578 + cdv_intel_dp_destroy(connector);
2579 +- goto err_priv;
2580 ++ goto err_connector;
2581 + } else {
2582 + DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
2583 + intel_dp->dpcd[0], intel_dp->dpcd[1],
2584 +diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2585 +index 21a69b046625a..d15511b521cb7 100644
2586 +--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2587 ++++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
2588 +@@ -554,6 +554,7 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
2589 + struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
2590 + void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
2591 + u32 val;
2592 ++ int ret;
2593 +
2594 + val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
2595 + val &= ~0x3;
2596 +@@ -568,6 +569,13 @@ static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
2597 + val |= cached->pll_mux;
2598 + pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
2599 +
2600 ++ ret = dsi_pll_10nm_vco_set_rate(&pll->clk_hw, pll_10nm->vco_current_rate, pll_10nm->vco_ref_clk_rate);
2601 ++ if (ret) {
2602 ++ DRM_DEV_ERROR(&pll_10nm->pdev->dev,
2603 ++ "restore vco rate failed. ret=%d\n", ret);
2604 ++ return ret;
2605 ++ }
2606 ++
2607 + DBG("DSI PLL%d", pll_10nm->id);
2608 +
2609 + return 0;
2610 +diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2611 +index e884183c018ac..cb5ce73f72694 100644
2612 +--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2613 ++++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
2614 +@@ -763,6 +763,7 @@ static int omap_dmm_probe(struct platform_device *dev)
2615 + &omap_dmm->refill_pa, GFP_KERNEL);
2616 + if (!omap_dmm->refill_va) {
2617 + dev_err(&dev->dev, "could not allocate refill memory\n");
2618 ++ ret = -ENOMEM;
2619 + goto fail;
2620 + }
2621 +
2622 +diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
2623 +index a2bd5876c6335..00808a3d67832 100644
2624 +--- a/drivers/gpu/drm/tegra/drm.c
2625 ++++ b/drivers/gpu/drm/tegra/drm.c
2626 +@@ -242,7 +242,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
2627 + if (!fpriv)
2628 + return -ENOMEM;
2629 +
2630 +- idr_init(&fpriv->contexts);
2631 ++ idr_init_base(&fpriv->contexts, 1);
2632 + mutex_init(&fpriv->lock);
2633 + filp->driver_priv = fpriv;
2634 +
2635 +diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
2636 +index 89cb70da2bfe6..83108e2430501 100644
2637 +--- a/drivers/gpu/drm/tegra/sor.c
2638 ++++ b/drivers/gpu/drm/tegra/sor.c
2639 +@@ -2668,17 +2668,23 @@ static int tegra_sor_init(struct host1x_client *client)
2640 + if (err < 0) {
2641 + dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
2642 + err);
2643 ++ clk_disable_unprepare(sor->clk);
2644 + return err;
2645 + }
2646 + }
2647 +
2648 + err = clk_prepare_enable(sor->clk_safe);
2649 +- if (err < 0)
2650 ++ if (err < 0) {
2651 ++ clk_disable_unprepare(sor->clk);
2652 + return err;
2653 ++ }
2654 +
2655 + err = clk_prepare_enable(sor->clk_dp);
2656 +- if (err < 0)
2657 ++ if (err < 0) {
2658 ++ clk_disable_unprepare(sor->clk_safe);
2659 ++ clk_disable_unprepare(sor->clk);
2660 + return err;
2661 ++ }
2662 +
2663 + return 0;
2664 + }
2665 +diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
2666 +index ac344ddb23bc8..f93384c232066 100644
2667 +--- a/drivers/gpu/drm/tve200/tve200_drv.c
2668 ++++ b/drivers/gpu/drm/tve200/tve200_drv.c
2669 +@@ -223,8 +223,8 @@ static int tve200_probe(struct platform_device *pdev)
2670 + }
2671 +
2672 + irq = platform_get_irq(pdev, 0);
2673 +- if (!irq) {
2674 +- ret = -EINVAL;
2675 ++ if (irq < 0) {
2676 ++ ret = irq;
2677 + goto clk_disable;
2678 + }
2679 +
2680 +diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
2681 +index 6b6d5ab82ec3f..1f6c91496d93a 100644
2682 +--- a/drivers/gpu/drm/xen/xen_drm_front.c
2683 ++++ b/drivers/gpu/drm/xen/xen_drm_front.c
2684 +@@ -410,7 +410,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
2685 + args->size = args->pitch * args->height;
2686 +
2687 + obj = xen_drm_front_gem_create(dev, args->size);
2688 +- if (IS_ERR_OR_NULL(obj)) {
2689 ++ if (IS_ERR(obj)) {
2690 + ret = PTR_ERR(obj);
2691 + goto fail;
2692 + }
2693 +diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
2694 +index 802662839e7ed..cba7852123d66 100644
2695 +--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
2696 ++++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
2697 +@@ -85,7 +85,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
2698 +
2699 + size = round_up(size, PAGE_SIZE);
2700 + xen_obj = gem_create_obj(dev, size);
2701 +- if (IS_ERR_OR_NULL(xen_obj))
2702 ++ if (IS_ERR(xen_obj))
2703 + return xen_obj;
2704 +
2705 + if (drm_info->front_info->cfg.be_alloc) {
2706 +@@ -119,7 +119,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
2707 + */
2708 + xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
2709 + xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
2710 +- if (IS_ERR_OR_NULL(xen_obj->pages)) {
2711 ++ if (IS_ERR(xen_obj->pages)) {
2712 + ret = PTR_ERR(xen_obj->pages);
2713 + xen_obj->pages = NULL;
2714 + goto fail;
2715 +@@ -138,7 +138,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
2716 + struct xen_gem_object *xen_obj;
2717 +
2718 + xen_obj = gem_create(dev, size);
2719 +- if (IS_ERR_OR_NULL(xen_obj))
2720 ++ if (IS_ERR(xen_obj))
2721 + return ERR_CAST(xen_obj);
2722 +
2723 + return &xen_obj->base;
2724 +@@ -196,7 +196,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
2725 +
2726 + size = attach->dmabuf->size;
2727 + xen_obj = gem_create_obj(dev, size);
2728 +- if (IS_ERR_OR_NULL(xen_obj))
2729 ++ if (IS_ERR(xen_obj))
2730 + return ERR_CAST(xen_obj);
2731 +
2732 + ret = gem_alloc_pages_array(xen_obj, size);
2733 +diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
2734 +index a3479eb72d794..d9700c69e5b7f 100644
2735 +--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
2736 ++++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
2737 +@@ -59,7 +59,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
2738 + int ret;
2739 +
2740 + fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
2741 +- if (IS_ERR_OR_NULL(fb))
2742 ++ if (IS_ERR(fb))
2743 + return fb;
2744 +
2745 + gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
2746 +diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
2747 +index f98c1e1b1dbdc..58a753ef27175 100644
2748 +--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
2749 ++++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
2750 +@@ -397,6 +397,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
2751 + },
2752 + .driver_data = (void *)&sipodev_desc
2753 + },
2754 ++ {
2755 ++ .ident = "Vero K147",
2756 ++ .matches = {
2757 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VERO"),
2758 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "K147"),
2759 ++ },
2760 ++ .driver_data = (void *)&sipodev_desc
2761 ++ },
2762 + { } /* Terminate list */
2763 + };
2764 +
2765 +diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
2766 +index 41a09f506803d..129c5e6bc6547 100644
2767 +--- a/drivers/hsi/controllers/omap_ssi_core.c
2768 ++++ b/drivers/hsi/controllers/omap_ssi_core.c
2769 +@@ -389,7 +389,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
2770 +
2771 + err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
2772 + if (err < 0)
2773 +- goto out_err;
2774 ++ return err;
2775 + ssi->id = err;
2776 +
2777 + ssi->owner = THIS_MODULE;
2778 +diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
2779 +index 3b684687b5a7d..9a3cb07555e3b 100644
2780 +--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
2781 ++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
2782 +@@ -183,6 +183,8 @@ static int tmc_pages_alloc(struct tmc_pages *tmc_pages,
2783 + } else {
2784 + page = alloc_pages_node(node,
2785 + GFP_KERNEL | __GFP_ZERO, 0);
2786 ++ if (!page)
2787 ++ goto err;
2788 + }
2789 + paddr = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
2790 + if (dma_mapping_error(dev, paddr))
2791 +diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
2792 +index 1f98566d5b3c1..5ae3ce60a33f7 100644
2793 +--- a/drivers/iio/adc/rockchip_saradc.c
2794 ++++ b/drivers/iio/adc/rockchip_saradc.c
2795 +@@ -383,7 +383,7 @@ static int rockchip_saradc_resume(struct device *dev)
2796 +
2797 + ret = clk_prepare_enable(info->clk);
2798 + if (ret)
2799 +- return ret;
2800 ++ clk_disable_unprepare(info->pclk);
2801 +
2802 + return ret;
2803 + }
2804 +diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
2805 +index c85659ca95078..e95d817c83905 100644
2806 +--- a/drivers/iio/imu/bmi160/bmi160_core.c
2807 ++++ b/drivers/iio/imu/bmi160/bmi160_core.c
2808 +@@ -385,8 +385,8 @@ static irqreturn_t bmi160_trigger_handler(int irq, void *p)
2809 + struct iio_poll_func *pf = p;
2810 + struct iio_dev *indio_dev = pf->indio_dev;
2811 + struct bmi160_data *data = iio_priv(indio_dev);
2812 +- __le16 buf[16];
2813 +- /* 3 sens x 3 axis x __le16 + 3 x __le16 pad + 4 x __le16 tstamp */
2814 ++ __le16 buf[12];
2815 ++ /* 2 sens x 3 axis x __le16 + 2 x __le16 pad + 4 x __le16 tstamp */
2816 + int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
2817 + __le16 sample;
2818 +
2819 +diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
2820 +index a0d089afa1a21..61b9e1bbcc318 100644
2821 +--- a/drivers/iio/industrialio-buffer.c
2822 ++++ b/drivers/iio/industrialio-buffer.c
2823 +@@ -850,12 +850,12 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
2824 + indio_dev->masklength,
2825 + in_ind + 1);
2826 + while (in_ind != out_ind) {
2827 +- in_ind = find_next_bit(indio_dev->active_scan_mask,
2828 +- indio_dev->masklength,
2829 +- in_ind + 1);
2830 + length = iio_storage_bytes_for_si(indio_dev, in_ind);
2831 + /* Make sure we are aligned */
2832 + in_loc = roundup(in_loc, length) + length;
2833 ++ in_ind = find_next_bit(indio_dev->active_scan_mask,
2834 ++ indio_dev->masklength,
2835 ++ in_ind + 1);
2836 + }
2837 + length = iio_storage_bytes_for_si(indio_dev, in_ind);
2838 + out_loc = roundup(out_loc, length);
2839 +diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
2840 +index ffe9ce798ea2d..d61c0645244ed 100644
2841 +--- a/drivers/iio/light/rpr0521.c
2842 ++++ b/drivers/iio/light/rpr0521.c
2843 +@@ -197,6 +197,17 @@ struct rpr0521_data {
2844 + bool pxs_need_dis;
2845 +
2846 + struct regmap *regmap;
2847 ++
2848 ++ /*
2849 ++ * Ensure correct naturally aligned timestamp.
2850 ++ * Note that the read will put garbage data into
2851 ++ * the padding but this should not be a problem
2852 ++ */
2853 ++ struct {
2854 ++ __le16 channels[3];
2855 ++ u8 garbage;
2856 ++ s64 ts __aligned(8);
2857 ++ } scan;
2858 + };
2859 +
2860 + static IIO_CONST_ATTR(in_intensity_scale_available, RPR0521_ALS_SCALE_AVAIL);
2861 +@@ -452,8 +463,6 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
2862 + struct rpr0521_data *data = iio_priv(indio_dev);
2863 + int err;
2864 +
2865 +- u8 buffer[16]; /* 3 16-bit channels + padding + ts */
2866 +-
2867 + /* Use irq timestamp when reasonable. */
2868 + if (iio_trigger_using_own(indio_dev) && data->irq_timestamp) {
2869 + pf->timestamp = data->irq_timestamp;
2870 +@@ -464,11 +473,11 @@ static irqreturn_t rpr0521_trigger_consumer_handler(int irq, void *p)
2871 + pf->timestamp = iio_get_time_ns(indio_dev);
2872 +
2873 + err = regmap_bulk_read(data->regmap, RPR0521_REG_PXS_DATA,
2874 +- &buffer,
2875 ++ data->scan.channels,
2876 + (3 * 2) + 1); /* 3 * 16-bit + (discarded) int clear reg. */
2877 + if (!err)
2878 + iio_push_to_buffers_with_timestamp(indio_dev,
2879 +- buffer, pf->timestamp);
2880 ++ &data->scan, pf->timestamp);
2881 + else
2882 + dev_err(&data->client->dev,
2883 + "Trigger consumer can't read from sensor.\n");
2884 +diff --git a/drivers/iio/light/st_uvis25.h b/drivers/iio/light/st_uvis25.h
2885 +index 5e970ab480cda..f9bb7a5755dda 100644
2886 +--- a/drivers/iio/light/st_uvis25.h
2887 ++++ b/drivers/iio/light/st_uvis25.h
2888 +@@ -28,6 +28,11 @@ struct st_uvis25_hw {
2889 + struct iio_trigger *trig;
2890 + bool enabled;
2891 + int irq;
2892 ++ /* Ensure timestamp is naturally aligned */
2893 ++ struct {
2894 ++ u8 chan;
2895 ++ s64 ts __aligned(8);
2896 ++ } scan;
2897 + };
2898 +
2899 + extern const struct dev_pm_ops st_uvis25_pm_ops;
2900 +diff --git a/drivers/iio/light/st_uvis25_core.c b/drivers/iio/light/st_uvis25_core.c
2901 +index 302635836e6ba..815211e024a8b 100644
2902 +--- a/drivers/iio/light/st_uvis25_core.c
2903 ++++ b/drivers/iio/light/st_uvis25_core.c
2904 +@@ -235,17 +235,19 @@ static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = {
2905 +
2906 + static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p)
2907 + {
2908 +- u8 buffer[ALIGN(sizeof(u8), sizeof(s64)) + sizeof(s64)];
2909 + struct iio_poll_func *pf = p;
2910 + struct iio_dev *iio_dev = pf->indio_dev;
2911 + struct st_uvis25_hw *hw = iio_priv(iio_dev);
2912 ++ unsigned int val;
2913 + int err;
2914 +
2915 +- err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, (int *)buffer);
2916 ++ err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val);
2917 + if (err < 0)
2918 + goto out;
2919 +
2920 +- iio_push_to_buffers_with_timestamp(iio_dev, buffer,
2921 ++ hw->scan.chan = val;
2922 ++
2923 ++ iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
2924 + iio_get_time_ns(iio_dev));
2925 +
2926 + out:
2927 +diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
2928 +index 7537547fb7eeb..bd001062dc65e 100644
2929 +--- a/drivers/iio/pressure/mpl3115.c
2930 ++++ b/drivers/iio/pressure/mpl3115.c
2931 +@@ -147,7 +147,14 @@ static irqreturn_t mpl3115_trigger_handler(int irq, void *p)
2932 + struct iio_poll_func *pf = p;
2933 + struct iio_dev *indio_dev = pf->indio_dev;
2934 + struct mpl3115_data *data = iio_priv(indio_dev);
2935 +- u8 buffer[16]; /* 32-bit channel + 16-bit channel + padding + ts */
2936 ++ /*
2937 ++ * 32-bit channel + 16-bit channel + padding + ts
2938 ++ * Note that it is possible for only one of the first 2
2939 ++ * channels to be enabled. If that happens, the first element
2940 ++ * of the buffer may be either 16 or 32-bits. As such we cannot
2941 ++ * use a simple structure definition to express this data layout.
2942 ++ */
2943 ++ u8 buffer[16] __aligned(8);
2944 + int ret, pos = 0;
2945 +
2946 + mutex_lock(&data->lock);
2947 +diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
2948 +index 4ebf63360a697..9bdb3fd97d264 100644
2949 +--- a/drivers/infiniband/core/cm.c
2950 ++++ b/drivers/infiniband/core/cm.c
2951 +@@ -1443,6 +1443,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
2952 + id.local_id);
2953 + if (IS_ERR(cm_id_priv->timewait_info)) {
2954 + ret = PTR_ERR(cm_id_priv->timewait_info);
2955 ++ cm_id_priv->timewait_info = NULL;
2956 + goto out;
2957 + }
2958 +
2959 +@@ -1969,6 +1970,7 @@ static int cm_req_handler(struct cm_work *work)
2960 + id.local_id);
2961 + if (IS_ERR(cm_id_priv->timewait_info)) {
2962 + ret = PTR_ERR(cm_id_priv->timewait_info);
2963 ++ cm_id_priv->timewait_info = NULL;
2964 + goto destroy;
2965 + }
2966 + cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
2967 +diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
2968 +index 957da3ffe593c..f8c9caa8aad6d 100644
2969 +--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
2970 ++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
2971 +@@ -1838,6 +1838,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
2972 + goto out;
2973 + }
2974 + qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
2975 ++ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
2976 + qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
2977 + qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
2978 + qp_attr->pkey_index = qplib_qp->pkey_index;
2979 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
2980 +index 1fd8798d91a73..43c611aa068c7 100644
2981 +--- a/drivers/infiniband/hw/cxgb4/cq.c
2982 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
2983 +@@ -1012,6 +1012,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
2984 +
2985 + rhp = to_c4iw_dev(ibdev);
2986 +
2987 ++ if (entries < 1 || entries > ibdev->attrs.max_cqe)
2988 ++ return ERR_PTR(-EINVAL);
2989 ++
2990 + if (vector >= rhp->rdev.lldi.nciq)
2991 + return ERR_PTR(-EINVAL);
2992 +
2993 +diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
2994 +index a5694dec3f2ee..098653b8157ed 100644
2995 +--- a/drivers/infiniband/hw/mthca/mthca_cq.c
2996 ++++ b/drivers/infiniband/hw/mthca/mthca_cq.c
2997 +@@ -609,7 +609,7 @@ static inline int mthca_poll_one(struct mthca_dev *dev,
2998 + entry->byte_len = MTHCA_ATOMIC_BYTE_LEN;
2999 + break;
3000 + default:
3001 +- entry->opcode = MTHCA_OPCODE_INVALID;
3002 ++ entry->opcode = 0xFF;
3003 + break;
3004 + }
3005 + } else {
3006 +diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
3007 +index 220a3e4717a35..e23575861f287 100644
3008 +--- a/drivers/infiniband/hw/mthca/mthca_dev.h
3009 ++++ b/drivers/infiniband/hw/mthca/mthca_dev.h
3010 +@@ -105,7 +105,6 @@ enum {
3011 + MTHCA_OPCODE_ATOMIC_CS = 0x11,
3012 + MTHCA_OPCODE_ATOMIC_FA = 0x12,
3013 + MTHCA_OPCODE_BIND_MW = 0x18,
3014 +- MTHCA_OPCODE_INVALID = 0xff
3015 + };
3016 +
3017 + enum {
3018 +diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
3019 +index 1c1eae0ef8c28..63db49144f62b 100644
3020 +--- a/drivers/infiniband/sw/rxe/rxe_req.c
3021 ++++ b/drivers/infiniband/sw/rxe/rxe_req.c
3022 +@@ -664,7 +664,8 @@ next_wqe:
3023 + }
3024 +
3025 + if (unlikely(qp_type(qp) == IB_QPT_RC &&
3026 +- qp->req.psn > (qp->comp.psn + RXE_MAX_UNACKED_PSNS))) {
3027 ++ psn_compare(qp->req.psn, (qp->comp.psn +
3028 ++ RXE_MAX_UNACKED_PSNS)) > 0)) {
3029 + qp->req.wait_psn = 1;
3030 + goto exit;
3031 + }
3032 +diff --git a/drivers/input/keyboard/cros_ec_keyb.c b/drivers/input/keyboard/cros_ec_keyb.c
3033 +index d560011815983..1edf0e8322ccc 100644
3034 +--- a/drivers/input/keyboard/cros_ec_keyb.c
3035 ++++ b/drivers/input/keyboard/cros_ec_keyb.c
3036 +@@ -183,6 +183,7 @@ static void cros_ec_keyb_process(struct cros_ec_keyb *ckdev,
3037 + "changed: [r%d c%d]: byte %02x\n",
3038 + row, col, new_state);
3039 +
3040 ++ input_event(idev, EV_MSC, MSC_SCAN, pos);
3041 + input_report_key(idev, keycodes[pos],
3042 + new_state);
3043 + }
3044 +diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
3045 +index aeeef50cef9bb..adb1ecc969eeb 100644
3046 +--- a/drivers/input/keyboard/omap4-keypad.c
3047 ++++ b/drivers/input/keyboard/omap4-keypad.c
3048 +@@ -199,12 +199,8 @@ static int omap4_keypad_open(struct input_dev *input)
3049 + return 0;
3050 + }
3051 +
3052 +-static void omap4_keypad_close(struct input_dev *input)
3053 ++static void omap4_keypad_stop(struct omap4_keypad *keypad_data)
3054 + {
3055 +- struct omap4_keypad *keypad_data = input_get_drvdata(input);
3056 +-
3057 +- disable_irq(keypad_data->irq);
3058 +-
3059 + /* Disable interrupts and wake-up events */
3060 + kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQENABLE,
3061 + OMAP4_VAL_IRQDISABLE);
3062 +@@ -213,7 +209,15 @@ static void omap4_keypad_close(struct input_dev *input)
3063 + /* clear pending interrupts */
3064 + kbd_write_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS,
3065 + kbd_read_irqreg(keypad_data, OMAP4_KBD_IRQSTATUS));
3066 ++}
3067 ++
3068 ++static void omap4_keypad_close(struct input_dev *input)
3069 ++{
3070 ++ struct omap4_keypad *keypad_data;
3071 +
3072 ++ keypad_data = input_get_drvdata(input);
3073 ++ disable_irq(keypad_data->irq);
3074 ++ omap4_keypad_stop(keypad_data);
3075 + enable_irq(keypad_data->irq);
3076 +
3077 + pm_runtime_put_sync(input->dev.parent);
3078 +@@ -236,13 +240,37 @@ static int omap4_keypad_parse_dt(struct device *dev,
3079 + return 0;
3080 + }
3081 +
3082 ++static int omap4_keypad_check_revision(struct device *dev,
3083 ++ struct omap4_keypad *keypad_data)
3084 ++{
3085 ++ unsigned int rev;
3086 ++
3087 ++ rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
3088 ++ rev &= 0x03 << 30;
3089 ++ rev >>= 30;
3090 ++ switch (rev) {
3091 ++ case KBD_REVISION_OMAP4:
3092 ++ keypad_data->reg_offset = 0x00;
3093 ++ keypad_data->irqreg_offset = 0x00;
3094 ++ break;
3095 ++ case KBD_REVISION_OMAP5:
3096 ++ keypad_data->reg_offset = 0x10;
3097 ++ keypad_data->irqreg_offset = 0x0c;
3098 ++ break;
3099 ++ default:
3100 ++ dev_err(dev, "Keypad reports unsupported revision %d", rev);
3101 ++ return -EINVAL;
3102 ++ }
3103 ++
3104 ++ return 0;
3105 ++}
3106 ++
3107 + static int omap4_keypad_probe(struct platform_device *pdev)
3108 + {
3109 + struct omap4_keypad *keypad_data;
3110 + struct input_dev *input_dev;
3111 + struct resource *res;
3112 + unsigned int max_keys;
3113 +- int rev;
3114 + int irq;
3115 + int error;
3116 +
3117 +@@ -282,41 +310,33 @@ static int omap4_keypad_probe(struct platform_device *pdev)
3118 + goto err_release_mem;
3119 + }
3120 +
3121 ++ pm_runtime_enable(&pdev->dev);
3122 +
3123 + /*
3124 + * Enable clocks for the keypad module so that we can read
3125 + * revision register.
3126 + */
3127 +- pm_runtime_enable(&pdev->dev);
3128 + error = pm_runtime_get_sync(&pdev->dev);
3129 + if (error) {
3130 + dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
3131 +- goto err_unmap;
3132 +- }
3133 +- rev = __raw_readl(keypad_data->base + OMAP4_KBD_REVISION);
3134 +- rev &= 0x03 << 30;
3135 +- rev >>= 30;
3136 +- switch (rev) {
3137 +- case KBD_REVISION_OMAP4:
3138 +- keypad_data->reg_offset = 0x00;
3139 +- keypad_data->irqreg_offset = 0x00;
3140 +- break;
3141 +- case KBD_REVISION_OMAP5:
3142 +- keypad_data->reg_offset = 0x10;
3143 +- keypad_data->irqreg_offset = 0x0c;
3144 +- break;
3145 +- default:
3146 +- dev_err(&pdev->dev,
3147 +- "Keypad reports unsupported revision %d", rev);
3148 +- error = -EINVAL;
3149 +- goto err_pm_put_sync;
3150 ++ pm_runtime_put_noidle(&pdev->dev);
3151 ++ } else {
3152 ++ error = omap4_keypad_check_revision(&pdev->dev,
3153 ++ keypad_data);
3154 ++ if (!error) {
3155 ++ /* Ensure device does not raise interrupts */
3156 ++ omap4_keypad_stop(keypad_data);
3157 ++ }
3158 ++ pm_runtime_put_sync(&pdev->dev);
3159 + }
3160 ++ if (error)
3161 ++ goto err_pm_disable;
3162 +
3163 + /* input device allocation */
3164 + keypad_data->input = input_dev = input_allocate_device();
3165 + if (!input_dev) {
3166 + error = -ENOMEM;
3167 +- goto err_pm_put_sync;
3168 ++ goto err_pm_disable;
3169 + }
3170 +
3171 + input_dev->name = pdev->name;
3172 +@@ -362,28 +382,25 @@ static int omap4_keypad_probe(struct platform_device *pdev)
3173 + goto err_free_keymap;
3174 + }
3175 +
3176 +- device_init_wakeup(&pdev->dev, true);
3177 +- pm_runtime_put_sync(&pdev->dev);
3178 +-
3179 + error = input_register_device(keypad_data->input);
3180 + if (error < 0) {
3181 + dev_err(&pdev->dev, "failed to register input device\n");
3182 +- goto err_pm_disable;
3183 ++ goto err_free_irq;
3184 + }
3185 +
3186 ++ device_init_wakeup(&pdev->dev, true);
3187 + platform_set_drvdata(pdev, keypad_data);
3188 ++
3189 + return 0;
3190 +
3191 +-err_pm_disable:
3192 +- pm_runtime_disable(&pdev->dev);
3193 ++err_free_irq:
3194 + free_irq(keypad_data->irq, keypad_data);
3195 + err_free_keymap:
3196 + kfree(keypad_data->keymap);
3197 + err_free_input:
3198 + input_free_device(input_dev);
3199 +-err_pm_put_sync:
3200 +- pm_runtime_put_sync(&pdev->dev);
3201 +-err_unmap:
3202 ++err_pm_disable:
3203 ++ pm_runtime_disable(&pdev->dev);
3204 + iounmap(keypad_data->base);
3205 + err_release_mem:
3206 + release_mem_region(res->start, resource_size(res));
3207 +diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
3208 +index 23c191a2a0715..cf4d507efaf6d 100644
3209 +--- a/drivers/input/misc/cm109.c
3210 ++++ b/drivers/input/misc/cm109.c
3211 +@@ -571,12 +571,15 @@ static int cm109_input_open(struct input_dev *idev)
3212 + dev->ctl_data->byte[HID_OR2] = dev->keybit;
3213 + dev->ctl_data->byte[HID_OR3] = 0x00;
3214 +
3215 ++ dev->ctl_urb_pending = 1;
3216 + error = usb_submit_urb(dev->urb_ctl, GFP_KERNEL);
3217 +- if (error)
3218 ++ if (error) {
3219 ++ dev->ctl_urb_pending = 0;
3220 + dev_err(&dev->intf->dev, "%s: usb_submit_urb (urb_ctl) failed %d\n",
3221 + __func__, error);
3222 +- else
3223 ++ } else {
3224 + dev->open = 1;
3225 ++ }
3226 +
3227 + mutex_unlock(&dev->pm_mutex);
3228 +
3229 +diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c
3230 +index c1b524ab46232..ba50f57134239 100644
3231 +--- a/drivers/input/mouse/cyapa_gen6.c
3232 ++++ b/drivers/input/mouse/cyapa_gen6.c
3233 +@@ -573,7 +573,7 @@ static int cyapa_pip_retrieve_data_structure(struct cyapa *cyapa,
3234 +
3235 + memset(&cmd, 0, sizeof(cmd));
3236 + put_unaligned_le16(PIP_OUTPUT_REPORT_ADDR, &cmd.head.addr);
3237 +- put_unaligned_le16(sizeof(cmd), &cmd.head.length - 2);
3238 ++ put_unaligned_le16(sizeof(cmd) - 2, &cmd.head.length);
3239 + cmd.head.report_id = PIP_APP_CMD_REPORT_ID;
3240 + cmd.head.cmd_code = PIP_RETRIEVE_DATA_STRUCTURE;
3241 + put_unaligned_le16(read_offset, &cmd.read_offset);
3242 +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
3243 +index adb8b23a63934..b256e3006a6fb 100644
3244 +--- a/drivers/input/serio/i8042-x86ia64io.h
3245 ++++ b/drivers/input/serio/i8042-x86ia64io.h
3246 +@@ -615,6 +615,48 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
3247 + DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
3248 + },
3249 + },
3250 ++ {
3251 ++ .matches = {
3252 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3253 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A114-31"),
3254 ++ },
3255 ++ },
3256 ++ {
3257 ++ .matches = {
3258 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3259 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A314-31"),
3260 ++ },
3261 ++ },
3262 ++ {
3263 ++ .matches = {
3264 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3265 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-31"),
3266 ++ },
3267 ++ },
3268 ++ {
3269 ++ .matches = {
3270 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3271 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-132"),
3272 ++ },
3273 ++ },
3274 ++ {
3275 ++ .matches = {
3276 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3277 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-332"),
3278 ++ },
3279 ++ },
3280 ++ {
3281 ++ .matches = {
3282 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3283 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire ES1-432"),
3284 ++ },
3285 ++ },
3286 ++ {
3287 ++ .matches = {
3288 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3289 ++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate Spin B118-RN"),
3290 ++ },
3291 ++ },
3292 + {
3293 + /* Advent 4211 */
3294 + .matches = {
3295 +diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
3296 +index a2f45aefce08a..b536768234b7c 100644
3297 +--- a/drivers/input/touchscreen/ads7846.c
3298 ++++ b/drivers/input/touchscreen/ads7846.c
3299 +@@ -35,6 +35,7 @@
3300 + #include <linux/regulator/consumer.h>
3301 + #include <linux/module.h>
3302 + #include <asm/irq.h>
3303 ++#include <asm/unaligned.h>
3304 +
3305 + /*
3306 + * This code has been heavily tested on a Nokia 770, and lightly
3307 +@@ -199,6 +200,26 @@ struct ads7846 {
3308 + #define REF_ON (READ_12BIT_DFR(x, 1, 1))
3309 + #define REF_OFF (READ_12BIT_DFR(y, 0, 0))
3310 +
3311 ++static int get_pendown_state(struct ads7846 *ts)
3312 ++{
3313 ++ if (ts->get_pendown_state)
3314 ++ return ts->get_pendown_state();
3315 ++
3316 ++ return !gpio_get_value(ts->gpio_pendown);
3317 ++}
3318 ++
3319 ++static void ads7846_report_pen_up(struct ads7846 *ts)
3320 ++{
3321 ++ struct input_dev *input = ts->input;
3322 ++
3323 ++ input_report_key(input, BTN_TOUCH, 0);
3324 ++ input_report_abs(input, ABS_PRESSURE, 0);
3325 ++ input_sync(input);
3326 ++
3327 ++ ts->pendown = false;
3328 ++ dev_vdbg(&ts->spi->dev, "UP\n");
3329 ++}
3330 ++
3331 + /* Must be called with ts->lock held */
3332 + static void ads7846_stop(struct ads7846 *ts)
3333 + {
3334 +@@ -215,6 +236,10 @@ static void ads7846_stop(struct ads7846 *ts)
3335 + static void ads7846_restart(struct ads7846 *ts)
3336 + {
3337 + if (!ts->disabled && !ts->suspended) {
3338 ++ /* Check if pen was released since last stop */
3339 ++ if (ts->pendown && !get_pendown_state(ts))
3340 ++ ads7846_report_pen_up(ts);
3341 ++
3342 + /* Tell IRQ thread that it may poll the device. */
3343 + ts->stopped = false;
3344 + mb();
3345 +@@ -410,7 +435,7 @@ static int ads7845_read12_ser(struct device *dev, unsigned command)
3346 +
3347 + if (status == 0) {
3348 + /* BE12 value, then padding */
3349 +- status = be16_to_cpu(*((u16 *)&req->sample[1]));
3350 ++ status = get_unaligned_be16(&req->sample[1]);
3351 + status = status >> 3;
3352 + status &= 0x0fff;
3353 + }
3354 +@@ -605,14 +630,6 @@ static const struct attribute_group ads784x_attr_group = {
3355 +
3356 + /*--------------------------------------------------------------------------*/
3357 +
3358 +-static int get_pendown_state(struct ads7846 *ts)
3359 +-{
3360 +- if (ts->get_pendown_state)
3361 +- return ts->get_pendown_state();
3362 +-
3363 +- return !gpio_get_value(ts->gpio_pendown);
3364 +-}
3365 +-
3366 + static void null_wait_for_sync(void)
3367 + {
3368 + }
3369 +@@ -785,10 +802,11 @@ static void ads7846_report_state(struct ads7846 *ts)
3370 + /* compute touch pressure resistance using equation #2 */
3371 + Rt = z2;
3372 + Rt -= z1;
3373 +- Rt *= x;
3374 + Rt *= ts->x_plate_ohms;
3375 ++ Rt = DIV_ROUND_CLOSEST(Rt, 16);
3376 ++ Rt *= x;
3377 + Rt /= z1;
3378 +- Rt = (Rt + 2047) >> 12;
3379 ++ Rt = DIV_ROUND_CLOSEST(Rt, 256);
3380 + } else {
3381 + Rt = 0;
3382 + }
3383 +@@ -871,16 +889,8 @@ static irqreturn_t ads7846_irq(int irq, void *handle)
3384 + msecs_to_jiffies(TS_POLL_PERIOD));
3385 + }
3386 +
3387 +- if (ts->pendown && !ts->stopped) {
3388 +- struct input_dev *input = ts->input;
3389 +-
3390 +- input_report_key(input, BTN_TOUCH, 0);
3391 +- input_report_abs(input, ABS_PRESSURE, 0);
3392 +- input_sync(input);
3393 +-
3394 +- ts->pendown = false;
3395 +- dev_vdbg(&ts->spi->dev, "UP\n");
3396 +- }
3397 ++ if (ts->pendown && !ts->stopped)
3398 ++ ads7846_report_pen_up(ts);
3399 +
3400 + return IRQ_HANDLED;
3401 + }
3402 +diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
3403 +index b20ba65992735..7e480e2364216 100644
3404 +--- a/drivers/input/touchscreen/goodix.c
3405 ++++ b/drivers/input/touchscreen/goodix.c
3406 +@@ -136,6 +136,18 @@ static const struct dmi_system_id rotated_screen[] = {
3407 + DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
3408 + },
3409 + },
3410 ++ {
3411 ++ .ident = "Teclast X98 Pro",
3412 ++ .matches = {
3413 ++ /*
3414 ++ * Only match BIOS date, because the manufacturers
3415 ++ * BIOS does not report the board name at all
3416 ++ * (sometimes)...
3417 ++ */
3418 ++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
3419 ++ DMI_MATCH(DMI_BIOS_DATE, "10/28/2015"),
3420 ++ },
3421 ++ },
3422 + {
3423 + .ident = "WinBook TW100",
3424 + .matches = {
3425 +diff --git a/drivers/irqchip/irq-alpine-msi.c b/drivers/irqchip/irq-alpine-msi.c
3426 +index 23a3b877f7f1d..ede02dc2bcd0b 100644
3427 +--- a/drivers/irqchip/irq-alpine-msi.c
3428 ++++ b/drivers/irqchip/irq-alpine-msi.c
3429 +@@ -165,8 +165,7 @@ static int alpine_msix_middle_domain_alloc(struct irq_domain *domain,
3430 + return 0;
3431 +
3432 + err_sgi:
3433 +- while (--i >= 0)
3434 +- irq_domain_free_irqs_parent(domain, virq, i);
3435 ++ irq_domain_free_irqs_parent(domain, virq, i - 1);
3436 + alpine_msix_free_sgi(priv, sgi, nr_irqs);
3437 + return err;
3438 + }
3439 +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
3440 +index d5cc32e80f5e2..cd58c123f547e 100644
3441 +--- a/drivers/irqchip/irq-gic-v3-its.c
3442 ++++ b/drivers/irqchip/irq-gic-v3-its.c
3443 +@@ -49,7 +49,6 @@
3444 + #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
3445 + #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
3446 + #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
3447 +-#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
3448 +
3449 + #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
3450 +
3451 +@@ -3240,9 +3239,6 @@ static int its_save_disable(void)
3452 + list_for_each_entry(its, &its_nodes, entry) {
3453 + void __iomem *base;
3454 +
3455 +- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3456 +- continue;
3457 +-
3458 + base = its->base;
3459 + its->ctlr_save = readl_relaxed(base + GITS_CTLR);
3460 + err = its_force_quiescent(base);
3461 +@@ -3261,9 +3257,6 @@ err:
3462 + list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
3463 + void __iomem *base;
3464 +
3465 +- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3466 +- continue;
3467 +-
3468 + base = its->base;
3469 + writel_relaxed(its->ctlr_save, base + GITS_CTLR);
3470 + }
3471 +@@ -3283,9 +3276,6 @@ static void its_restore_enable(void)
3472 + void __iomem *base;
3473 + int i;
3474 +
3475 +- if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
3476 +- continue;
3477 +-
3478 + base = its->base;
3479 +
3480 + /*
3481 +@@ -3293,7 +3283,10 @@ static void its_restore_enable(void)
3482 + * don't restore it since writing to CBASER or BASER<n>
3483 + * registers is undefined according to the GIC v3 ITS
3484 + * Specification.
3485 ++ *
3486 ++ * Firmware resuming with the ITS enabled is terminally broken.
3487 + */
3488 ++ WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE);
3489 + ret = its_force_quiescent(base);
3490 + if (ret) {
3491 + pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
3492 +@@ -3558,9 +3551,6 @@ static int __init its_probe_one(struct resource *res,
3493 + ctlr |= GITS_CTLR_ImDe;
3494 + writel_relaxed(ctlr, its->base + GITS_CTLR);
3495 +
3496 +- if (GITS_TYPER_HCC(typer))
3497 +- its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
3498 +-
3499 + err = its_init_domain(handle, its);
3500 + if (err)
3501 + goto out_free_tables;
3502 +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
3503 +index f666778ad2372..439277f48ff8b 100644
3504 +--- a/drivers/md/dm-ioctl.c
3505 ++++ b/drivers/md/dm-ioctl.c
3506 +@@ -1575,6 +1575,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
3507 +
3508 + if (!argc) {
3509 + DMWARN("Empty message received.");
3510 ++ r = -EINVAL;
3511 + goto out_argv;
3512 + }
3513 +
3514 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
3515 +index 36275c59e4e7b..f849db3035a05 100644
3516 +--- a/drivers/md/dm-table.c
3517 ++++ b/drivers/md/dm-table.c
3518 +@@ -1336,12 +1336,6 @@ void dm_table_event_callback(struct dm_table *t,
3519 +
3520 + void dm_table_event(struct dm_table *t)
3521 + {
3522 +- /*
3523 +- * You can no longer call dm_table_event() from interrupt
3524 +- * context, use a bottom half instead.
3525 +- */
3526 +- BUG_ON(in_interrupt());
3527 +-
3528 + mutex_lock(&_event_lock);
3529 + if (t->event_fn)
3530 + t->event_fn(t->event_context);
3531 +diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
3532 +index 4522e87d9d68d..107f36b9155fa 100644
3533 +--- a/drivers/md/md-cluster.c
3534 ++++ b/drivers/md/md-cluster.c
3535 +@@ -669,9 +669,27 @@ out:
3536 + * Takes the lock on the TOKEN lock resource so no other
3537 + * node can communicate while the operation is underway.
3538 + */
3539 +-static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
3540 ++static int lock_token(struct md_cluster_info *cinfo)
3541 + {
3542 +- int error, set_bit = 0;
3543 ++ int error;
3544 ++
3545 ++ error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
3546 ++ if (error) {
3547 ++ pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
3548 ++ __func__, __LINE__, error);
3549 ++ } else {
3550 ++ /* Lock the receive sequence */
3551 ++ mutex_lock(&cinfo->recv_mutex);
3552 ++ }
3553 ++ return error;
3554 ++}
3555 ++
3556 ++/* lock_comm()
3557 ++ * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
3558 ++ */
3559 ++static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
3560 ++{
3561 ++ int rv, set_bit = 0;
3562 + struct mddev *mddev = cinfo->mddev;
3563 +
3564 + /*
3565 +@@ -682,34 +700,19 @@ static int lock_token(struct md_cluster_info *cinfo, bool mddev_locked)
3566 + */
3567 + if (mddev_locked && !test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
3568 + &cinfo->state)) {
3569 +- error = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
3570 ++ rv = test_and_set_bit_lock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD,
3571 + &cinfo->state);
3572 +- WARN_ON_ONCE(error);
3573 ++ WARN_ON_ONCE(rv);
3574 + md_wakeup_thread(mddev->thread);
3575 + set_bit = 1;
3576 + }
3577 +- error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
3578 +- if (set_bit)
3579 +- clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
3580 +
3581 +- if (error)
3582 +- pr_err("md-cluster(%s:%d): failed to get EX on TOKEN (%d)\n",
3583 +- __func__, __LINE__, error);
3584 +-
3585 +- /* Lock the receive sequence */
3586 +- mutex_lock(&cinfo->recv_mutex);
3587 +- return error;
3588 +-}
3589 +-
3590 +-/* lock_comm()
3591 +- * Sets the MD_CLUSTER_SEND_LOCK bit to lock the send channel.
3592 +- */
3593 +-static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
3594 +-{
3595 + wait_event(cinfo->wait,
3596 + !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
3597 +-
3598 +- return lock_token(cinfo, mddev_locked);
3599 ++ rv = lock_token(cinfo);
3600 ++ if (set_bit)
3601 ++ clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
3602 ++ return rv;
3603 + }
3604 +
3605 + static void unlock_comm(struct md_cluster_info *cinfo)
3606 +@@ -789,9 +792,11 @@ static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
3607 + {
3608 + int ret;
3609 +
3610 +- lock_comm(cinfo, mddev_locked);
3611 +- ret = __sendmsg(cinfo, cmsg);
3612 +- unlock_comm(cinfo);
3613 ++ ret = lock_comm(cinfo, mddev_locked);
3614 ++ if (!ret) {
3615 ++ ret = __sendmsg(cinfo, cmsg);
3616 ++ unlock_comm(cinfo);
3617 ++ }
3618 + return ret;
3619 + }
3620 +
3621 +@@ -1063,7 +1068,7 @@ static int metadata_update_start(struct mddev *mddev)
3622 + return 0;
3623 + }
3624 +
3625 +- ret = lock_token(cinfo, 1);
3626 ++ ret = lock_token(cinfo);
3627 + clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
3628 + return ret;
3629 + }
3630 +@@ -1181,7 +1186,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
3631 + int raid_slot = -1;
3632 +
3633 + md_update_sb(mddev, 1);
3634 +- lock_comm(cinfo, 1);
3635 ++ if (lock_comm(cinfo, 1)) {
3636 ++ pr_err("%s: lock_comm failed\n", __func__);
3637 ++ return;
3638 ++ }
3639 +
3640 + memset(&cmsg, 0, sizeof(cmsg));
3641 + cmsg.type = cpu_to_le32(METADATA_UPDATED);
3642 +@@ -1330,7 +1338,8 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev)
3643 + cmsg.type = cpu_to_le32(NEWDISK);
3644 + memcpy(cmsg.uuid, uuid, 16);
3645 + cmsg.raid_slot = cpu_to_le32(rdev->desc_nr);
3646 +- lock_comm(cinfo, 1);
3647 ++ if (lock_comm(cinfo, 1))
3648 ++ return -EAGAIN;
3649 + ret = __sendmsg(cinfo, &cmsg);
3650 + if (ret) {
3651 + unlock_comm(cinfo);
3652 +diff --git a/drivers/md/md.c b/drivers/md/md.c
3653 +index a4e7e6c025d9c..80ca13594c182 100644
3654 +--- a/drivers/md/md.c
3655 ++++ b/drivers/md/md.c
3656 +@@ -6564,8 +6564,10 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev)
3657 + goto busy;
3658 +
3659 + kick_rdev:
3660 +- if (mddev_is_clustered(mddev))
3661 +- md_cluster_ops->remove_disk(mddev, rdev);
3662 ++ if (mddev_is_clustered(mddev)) {
3663 ++ if (md_cluster_ops->remove_disk(mddev, rdev))
3664 ++ goto busy;
3665 ++ }
3666 +
3667 + md_kick_rdev_from_array(rdev);
3668 + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
3669 +@@ -6895,6 +6897,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
3670 + return -EINVAL;
3671 + if (mddev->sync_thread ||
3672 + test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
3673 ++ test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
3674 + mddev->reshape_position != MaxSector)
3675 + return -EBUSY;
3676 +
3677 +@@ -7214,8 +7217,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
3678 + err = -EBUSY;
3679 + goto out;
3680 + }
3681 +- WARN_ON_ONCE(test_bit(MD_CLOSING, &mddev->flags));
3682 +- set_bit(MD_CLOSING, &mddev->flags);
3683 ++ if (test_and_set_bit(MD_CLOSING, &mddev->flags)) {
3684 ++ mutex_unlock(&mddev->open_mutex);
3685 ++ err = -EBUSY;
3686 ++ goto out;
3687 ++ }
3688 + did_set_md_closing = true;
3689 + mutex_unlock(&mddev->open_mutex);
3690 + sync_blockdev(bdev);
3691 +@@ -9238,8 +9244,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
3692 + }
3693 + }
3694 +
3695 +- if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
3696 +- update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
3697 ++ if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) {
3698 ++ ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
3699 ++ if (ret)
3700 ++ pr_warn("md: updating array disks failed. %d\n", ret);
3701 ++ }
3702 +
3703 + /* Finally set the event to be up to date */
3704 + mddev->events = le64_to_cpu(sb->events);
3705 +diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
3706 +index 43cfd1dbda014..afca47b97c2a2 100644
3707 +--- a/drivers/media/common/siano/smsdvb-main.c
3708 ++++ b/drivers/media/common/siano/smsdvb-main.c
3709 +@@ -1180,12 +1180,15 @@ static int smsdvb_hotplug(struct smscore_device_t *coredev,
3710 + rc = dvb_create_media_graph(&client->adapter, true);
3711 + if (rc < 0) {
3712 + pr_err("dvb_create_media_graph failed %d\n", rc);
3713 +- goto client_error;
3714 ++ goto media_graph_error;
3715 + }
3716 +
3717 + pr_info("DVB interface registered.\n");
3718 + return 0;
3719 +
3720 ++media_graph_error:
3721 ++ smsdvb_debugfs_release(client);
3722 ++
3723 + client_error:
3724 + dvb_unregister_frontend(&client->frontend);
3725 +
3726 +diff --git a/drivers/media/i2c/max2175.c b/drivers/media/i2c/max2175.c
3727 +index 008a082cb8ad7..dddc5ef50dd4e 100644
3728 +--- a/drivers/media/i2c/max2175.c
3729 ++++ b/drivers/media/i2c/max2175.c
3730 +@@ -511,7 +511,7 @@ static void max2175_set_bbfilter(struct max2175 *ctx)
3731 + }
3732 + }
3733 +
3734 +-static bool max2175_set_csm_mode(struct max2175 *ctx,
3735 ++static int max2175_set_csm_mode(struct max2175 *ctx,
3736 + enum max2175_csm_mode new_mode)
3737 + {
3738 + int ret = max2175_poll_csm_ready(ctx);
3739 +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
3740 +index ca1a4d8e972ec..2ad2870c03ae2 100644
3741 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
3742 ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
3743 +@@ -801,6 +801,7 @@ static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
3744 + atomic_dec(&q->bufs_queued);
3745 + vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
3746 + state);
3747 ++ q->bufs[i] = NULL;
3748 + }
3749 + }
3750 + }
3751 +@@ -1245,29 +1246,15 @@ static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
3752 + struct v4l2_subdev_format *fmt)
3753 + {
3754 + struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
3755 +- struct v4l2_subdev_format format;
3756 +- int ret;
3757 +-
3758 +- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
3759 +- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
3760 +- return 0;
3761 +- }
3762 +
3763 +- if (fmt->pad == CIO2_PAD_SINK) {
3764 +- format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
3765 +- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
3766 +- &format);
3767 ++ mutex_lock(&q->subdev_lock);
3768 +
3769 +- if (ret)
3770 +- return ret;
3771 +- /* update colorspace etc */
3772 +- q->subdev_fmt.colorspace = format.format.colorspace;
3773 +- q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
3774 +- q->subdev_fmt.quantization = format.format.quantization;
3775 +- q->subdev_fmt.xfer_func = format.format.xfer_func;
3776 +- }
3777 ++ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
3778 ++ fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
3779 ++ else
3780 ++ fmt->format = q->subdev_fmt;
3781 +
3782 +- fmt->format = q->subdev_fmt;
3783 ++ mutex_unlock(&q->subdev_lock);
3784 +
3785 + return 0;
3786 + }
3787 +@@ -1284,6 +1271,9 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
3788 + struct v4l2_subdev_format *fmt)
3789 + {
3790 + struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
3791 ++ struct v4l2_mbus_framefmt *mbus;
3792 ++ u32 mbus_code = fmt->format.code;
3793 ++ unsigned int i;
3794 +
3795 + /*
3796 + * Only allow setting sink pad format;
3797 +@@ -1292,16 +1282,29 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
3798 + if (fmt->pad == CIO2_PAD_SOURCE)
3799 + return cio2_subdev_get_fmt(sd, cfg, fmt);
3800 +
3801 +- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
3802 +- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
3803 +- } else {
3804 +- /* It's the sink, allow changing frame size */
3805 +- q->subdev_fmt.width = fmt->format.width;
3806 +- q->subdev_fmt.height = fmt->format.height;
3807 +- q->subdev_fmt.code = fmt->format.code;
3808 +- fmt->format = q->subdev_fmt;
3809 ++ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
3810 ++ mbus = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
3811 ++ else
3812 ++ mbus = &q->subdev_fmt;
3813 ++
3814 ++ fmt->format.code = formats[0].mbus_code;
3815 ++
3816 ++ for (i = 0; i < ARRAY_SIZE(formats); i++) {
3817 ++ if (formats[i].mbus_code == fmt->format.code) {
3818 ++ fmt->format.code = mbus_code;
3819 ++ break;
3820 ++ }
3821 + }
3822 +
3823 ++ fmt->format.width = min_t(u32, fmt->format.width, CIO2_IMAGE_MAX_WIDTH);
3824 ++ fmt->format.height = min_t(u32, fmt->format.height,
3825 ++ CIO2_IMAGE_MAX_LENGTH);
3826 ++ fmt->format.field = V4L2_FIELD_NONE;
3827 ++
3828 ++ mutex_lock(&q->subdev_lock);
3829 ++ *mbus = fmt->format;
3830 ++ mutex_unlock(&q->subdev_lock);
3831 ++
3832 + return 0;
3833 + }
3834 +
3835 +@@ -1549,6 +1552,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
3836 +
3837 + /* Initialize miscellaneous variables */
3838 + mutex_init(&q->lock);
3839 ++ mutex_init(&q->subdev_lock);
3840 +
3841 + /* Initialize formats to default values */
3842 + fmt = &q->subdev_fmt;
3843 +@@ -1666,6 +1670,7 @@ fail_vdev_media_entity:
3844 + fail_subdev_media_entity:
3845 + cio2_fbpt_exit(q, &cio2->pci_dev->dev);
3846 + fail_fbpt:
3847 ++ mutex_destroy(&q->subdev_lock);
3848 + mutex_destroy(&q->lock);
3849 +
3850 + return r;
3851 +@@ -1679,6 +1684,7 @@ static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
3852 + v4l2_device_unregister_subdev(&q->subdev);
3853 + media_entity_cleanup(&q->subdev.entity);
3854 + cio2_fbpt_exit(q, &cio2->pci_dev->dev);
3855 ++ mutex_destroy(&q->subdev_lock);
3856 + mutex_destroy(&q->lock);
3857 + }
3858 +
3859 +diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
3860 +index 240635be7a317..b73c016d8a1bc 100644
3861 +--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
3862 ++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
3863 +@@ -334,6 +334,7 @@ struct cio2_queue {
3864 +
3865 + /* Subdev, /dev/v4l-subdevX */
3866 + struct v4l2_subdev subdev;
3867 ++ struct mutex subdev_lock; /* Serialise acces to subdev_fmt field */
3868 + struct media_pad subdev_pads[CIO2_PADS];
3869 + struct v4l2_mbus_framefmt subdev_fmt;
3870 + atomic_t frame_sequence;
3871 +diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
3872 +index f33c0de3e8490..019bbc18cede6 100644
3873 +--- a/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
3874 ++++ b/drivers/media/pci/netup_unidvb/netup_unidvb_spi.c
3875 +@@ -184,7 +184,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
3876 + struct spi_master *master;
3877 + struct netup_spi *nspi;
3878 +
3879 +- master = spi_alloc_master(&ndev->pci_dev->dev,
3880 ++ master = devm_spi_alloc_master(&ndev->pci_dev->dev,
3881 + sizeof(struct netup_spi));
3882 + if (!master) {
3883 + dev_err(&ndev->pci_dev->dev,
3884 +@@ -217,6 +217,7 @@ int netup_spi_init(struct netup_unidvb_dev *ndev)
3885 + ndev->pci_slot,
3886 + ndev->pci_func);
3887 + if (!spi_new_device(master, &netup_spi_board)) {
3888 ++ spi_unregister_master(master);
3889 + ndev->spi = NULL;
3890 + dev_err(&ndev->pci_dev->dev,
3891 + "%s(): unable to create SPI device\n", __func__);
3892 +@@ -235,13 +236,13 @@ void netup_spi_release(struct netup_unidvb_dev *ndev)
3893 + if (!spi)
3894 + return;
3895 +
3896 ++ spi_unregister_master(spi->master);
3897 + spin_lock_irqsave(&spi->lock, flags);
3898 + reg = readw(&spi->regs->control_stat);
3899 + writew(reg | NETUP_SPI_CTRL_IRQ, &spi->regs->control_stat);
3900 + reg = readw(&spi->regs->control_stat);
3901 + writew(reg & ~NETUP_SPI_CTRL_IMASK, &spi->regs->control_stat);
3902 + spin_unlock_irqrestore(&spi->lock, flags);
3903 +- spi_unregister_master(spi->master);
3904 + ndev->spi = NULL;
3905 + }
3906 +
3907 +diff --git a/drivers/media/pci/saa7146/mxb.c b/drivers/media/pci/saa7146/mxb.c
3908 +index 6b5582b7c5955..6e25654da2567 100644
3909 +--- a/drivers/media/pci/saa7146/mxb.c
3910 ++++ b/drivers/media/pci/saa7146/mxb.c
3911 +@@ -653,16 +653,17 @@ static int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *
3912 + struct mxb *mxb = (struct mxb *)dev->ext_priv;
3913 +
3914 + DEB_D("VIDIOC_S_AUDIO %d\n", a->index);
3915 +- if (mxb_inputs[mxb->cur_input].audioset & (1 << a->index)) {
3916 +- if (mxb->cur_audinput != a->index) {
3917 +- mxb->cur_audinput = a->index;
3918 +- tea6420_route(mxb, a->index);
3919 +- if (mxb->cur_audinput == 0)
3920 +- mxb_update_audmode(mxb);
3921 +- }
3922 +- return 0;
3923 ++ if (a->index >= 32 ||
3924 ++ !(mxb_inputs[mxb->cur_input].audioset & (1 << a->index)))
3925 ++ return -EINVAL;
3926 ++
3927 ++ if (mxb->cur_audinput != a->index) {
3928 ++ mxb->cur_audinput = a->index;
3929 ++ tea6420_route(mxb, a->index);
3930 ++ if (mxb->cur_audinput == 0)
3931 ++ mxb_update_audmode(mxb);
3932 + }
3933 +- return -EINVAL;
3934 ++ return 0;
3935 + }
3936 +
3937 + #ifdef CONFIG_VIDEO_ADV_DEBUG
3938 +diff --git a/drivers/media/pci/solo6x10/solo6x10-g723.c b/drivers/media/pci/solo6x10/solo6x10-g723.c
3939 +index 2ac33b5cc4546..f06e6d35d846c 100644
3940 +--- a/drivers/media/pci/solo6x10/solo6x10-g723.c
3941 ++++ b/drivers/media/pci/solo6x10/solo6x10-g723.c
3942 +@@ -410,7 +410,7 @@ int solo_g723_init(struct solo_dev *solo_dev)
3943 +
3944 + ret = snd_ctl_add(card, snd_ctl_new1(&kctl, solo_dev));
3945 + if (ret < 0)
3946 +- return ret;
3947 ++ goto snd_error;
3948 +
3949 + ret = solo_snd_pcm_init(solo_dev);
3950 + if (ret < 0)
3951 +diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
3952 +index 79ca03ac449c3..3f64119e8c082 100644
3953 +--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
3954 ++++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_pm.c
3955 +@@ -103,6 +103,7 @@ int mtk_vcodec_init_dec_pm(struct mtk_vcodec_dev *mtkdev)
3956 + void mtk_vcodec_release_dec_pm(struct mtk_vcodec_dev *dev)
3957 + {
3958 + pm_runtime_disable(dev->pm.dev);
3959 ++ put_device(dev->pm.larbvdec);
3960 + }
3961 +
3962 + void mtk_vcodec_dec_pw_on(struct mtk_vcodec_pm *pm)
3963 +diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
3964 +index f500cea228a95..0114e81fa6fa2 100644
3965 +--- a/drivers/media/rc/sunxi-cir.c
3966 ++++ b/drivers/media/rc/sunxi-cir.c
3967 +@@ -129,6 +129,8 @@ static irqreturn_t sunxi_ir_irq(int irqno, void *dev_id)
3968 + } else if (status & REG_RXINT_RPEI_EN) {
3969 + ir_raw_event_set_idle(ir->rc, true);
3970 + ir_raw_event_handle(ir->rc);
3971 ++ } else {
3972 ++ ir_raw_event_handle(ir->rc);
3973 + }
3974 +
3975 + spin_unlock(&ir->ir_lock);
3976 +diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
3977 +index c9a2b29a60a57..93212ed80bf85 100644
3978 +--- a/drivers/media/usb/gspca/gspca.c
3979 ++++ b/drivers/media/usb/gspca/gspca.c
3980 +@@ -1585,6 +1585,7 @@ out:
3981 + input_unregister_device(gspca_dev->input_dev);
3982 + #endif
3983 + v4l2_ctrl_handler_free(gspca_dev->vdev.ctrl_handler);
3984 ++ v4l2_device_unregister(&gspca_dev->v4l2_dev);
3985 + kfree(gspca_dev->usb_buf);
3986 + kfree(gspca_dev);
3987 + return ret;
3988 +diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c
3989 +index 65ef755adfdc1..b2adde978c9be 100644
3990 +--- a/drivers/media/usb/msi2500/msi2500.c
3991 ++++ b/drivers/media/usb/msi2500/msi2500.c
3992 +@@ -1250,7 +1250,7 @@ static int msi2500_probe(struct usb_interface *intf,
3993 + }
3994 +
3995 + dev->master = master;
3996 +- master->bus_num = 0;
3997 ++ master->bus_num = -1;
3998 + master->num_chipselect = 1;
3999 + master->transfer_one_message = msi2500_transfer_one_message;
4000 + spi_master_set_devdata(master, dev);
4001 +diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
4002 +index 96055de6e8ce2..62f0128419717 100644
4003 +--- a/drivers/media/usb/tm6000/tm6000-video.c
4004 ++++ b/drivers/media/usb/tm6000/tm6000-video.c
4005 +@@ -463,11 +463,12 @@ static int tm6000_alloc_urb_buffers(struct tm6000_core *dev)
4006 + if (dev->urb_buffer)
4007 + return 0;
4008 +
4009 +- dev->urb_buffer = kmalloc_array(num_bufs, sizeof(void *), GFP_KERNEL);
4010 ++ dev->urb_buffer = kmalloc_array(num_bufs, sizeof(*dev->urb_buffer),
4011 ++ GFP_KERNEL);
4012 + if (!dev->urb_buffer)
4013 + return -ENOMEM;
4014 +
4015 +- dev->urb_dma = kmalloc_array(num_bufs, sizeof(dma_addr_t *),
4016 ++ dev->urb_dma = kmalloc_array(num_bufs, sizeof(*dev->urb_dma),
4017 + GFP_KERNEL);
4018 + if (!dev->urb_dma)
4019 + return -ENOMEM;
4020 +diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
4021 +index b1564cacd19e1..20ae8652adf44 100644
4022 +--- a/drivers/memstick/core/memstick.c
4023 ++++ b/drivers/memstick/core/memstick.c
4024 +@@ -469,7 +469,6 @@ static void memstick_check(struct work_struct *work)
4025 + host->card = card;
4026 + if (device_register(&card->dev)) {
4027 + put_device(&card->dev);
4028 +- kfree(host->card);
4029 + host->card = NULL;
4030 + }
4031 + } else
4032 +diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
4033 +index 627d6e62fe313..4559593ecd5a9 100644
4034 +--- a/drivers/memstick/host/r592.c
4035 ++++ b/drivers/memstick/host/r592.c
4036 +@@ -762,8 +762,10 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4037 + goto error3;
4038 +
4039 + dev->mmio = pci_ioremap_bar(pdev, 0);
4040 +- if (!dev->mmio)
4041 ++ if (!dev->mmio) {
4042 ++ error = -ENOMEM;
4043 + goto error4;
4044 ++ }
4045 +
4046 + dev->irq = pdev->irq;
4047 + spin_lock_init(&dev->irq_lock);
4048 +@@ -789,12 +791,14 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4049 + &dev->dummy_dma_page_physical_address, GFP_KERNEL);
4050 + r592_stop_dma(dev , 0);
4051 +
4052 +- if (request_irq(dev->irq, &r592_irq, IRQF_SHARED,
4053 +- DRV_NAME, dev))
4054 ++ error = request_irq(dev->irq, &r592_irq, IRQF_SHARED,
4055 ++ DRV_NAME, dev);
4056 ++ if (error)
4057 + goto error6;
4058 +
4059 + r592_update_card_detect(dev);
4060 +- if (memstick_add_host(host))
4061 ++ error = memstick_add_host(host);
4062 ++ if (error)
4063 + goto error7;
4064 +
4065 + message("driver successfully loaded");
4066 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
4067 +index c723a1e54b188..90656b625b9a6 100644
4068 +--- a/drivers/mmc/core/block.c
4069 ++++ b/drivers/mmc/core/block.c
4070 +@@ -631,7 +631,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
4071 +
4072 + memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
4073 +
4074 +- if (idata->rpmb || (cmd.flags & MMC_RSP_R1B)) {
4075 ++ if (idata->rpmb || (cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
4076 + /*
4077 + * Ensure RPMB/R1B command has completed by polling CMD13
4078 + * "Send Status".
4079 +diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
4080 +index c29205ee82e20..c24a26fbbffbb 100644
4081 +--- a/drivers/mtd/cmdlinepart.c
4082 ++++ b/drivers/mtd/cmdlinepart.c
4083 +@@ -231,7 +231,7 @@ static int mtdpart_setup_real(char *s)
4084 + struct cmdline_mtd_partition *this_mtd;
4085 + struct mtd_partition *parts;
4086 + int mtd_id_len, num_parts;
4087 +- char *p, *mtd_id, *semicol;
4088 ++ char *p, *mtd_id, *semicol, *open_parenth;
4089 +
4090 + /*
4091 + * Replace the first ';' by a NULL char so strrchr can work
4092 +@@ -241,6 +241,14 @@ static int mtdpart_setup_real(char *s)
4093 + if (semicol)
4094 + *semicol = '\0';
4095 +
4096 ++ /*
4097 ++ * make sure that part-names with ":" will not be handled as
4098 ++ * part of the mtd-id with an ":"
4099 ++ */
4100 ++ open_parenth = strchr(s, '(');
4101 ++ if (open_parenth)
4102 ++ *open_parenth = '\0';
4103 ++
4104 + mtd_id = s;
4105 +
4106 + /*
4107 +@@ -250,6 +258,10 @@ static int mtdpart_setup_real(char *s)
4108 + */
4109 + p = strrchr(s, ':');
4110 +
4111 ++ /* Restore the '(' now. */
4112 ++ if (open_parenth)
4113 ++ *open_parenth = '(';
4114 ++
4115 + /* Restore the ';' now. */
4116 + if (semicol)
4117 + *semicol = ';';
4118 +diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
4119 +index 9fcbcf4b217b6..2726f18242333 100644
4120 +--- a/drivers/mtd/nand/raw/qcom_nandc.c
4121 ++++ b/drivers/mtd/nand/raw/qcom_nandc.c
4122 +@@ -1578,6 +1578,8 @@ static int check_flash_errors(struct qcom_nand_host *host, int cw_cnt)
4123 + struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
4124 + int i;
4125 +
4126 ++ nandc_read_buffer_sync(nandc, true);
4127 ++
4128 + for (i = 0; i < cw_cnt; i++) {
4129 + u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
4130 +
4131 +diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
4132 +index 1d61ae7aaa66c..83954f424d413 100644
4133 +--- a/drivers/mtd/nand/spi/core.c
4134 ++++ b/drivers/mtd/nand/spi/core.c
4135 +@@ -378,6 +378,10 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
4136 + }
4137 + }
4138 +
4139 ++ if (req->ooblen)
4140 ++ memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs,
4141 ++ req->ooblen);
4142 ++
4143 + return 0;
4144 + }
4145 +
4146 +diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
4147 +index e226961905830..bed5ffa75b276 100644
4148 +--- a/drivers/net/can/softing/softing_main.c
4149 ++++ b/drivers/net/can/softing/softing_main.c
4150 +@@ -393,8 +393,13 @@ static int softing_netdev_open(struct net_device *ndev)
4151 +
4152 + /* check or determine and set bittime */
4153 + ret = open_candev(ndev);
4154 +- if (!ret)
4155 +- ret = softing_startstop(ndev, 1);
4156 ++ if (ret)
4157 ++ return ret;
4158 ++
4159 ++ ret = softing_startstop(ndev, 1);
4160 ++ if (ret < 0)
4161 ++ close_candev(ndev);
4162 ++
4163 + return ret;
4164 + }
4165 +
4166 +diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
4167 +index c458b81ba63af..d249a4309da2f 100644
4168 +--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
4169 ++++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
4170 +@@ -847,13 +847,13 @@ static int emac_probe(struct platform_device *pdev)
4171 + db->clk = devm_clk_get(&pdev->dev, NULL);
4172 + if (IS_ERR(db->clk)) {
4173 + ret = PTR_ERR(db->clk);
4174 +- goto out_iounmap;
4175 ++ goto out_dispose_mapping;
4176 + }
4177 +
4178 + ret = clk_prepare_enable(db->clk);
4179 + if (ret) {
4180 + dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
4181 +- goto out_iounmap;
4182 ++ goto out_dispose_mapping;
4183 + }
4184 +
4185 + ret = sunxi_sram_claim(&pdev->dev);
4186 +@@ -910,6 +910,8 @@ out_release_sram:
4187 + sunxi_sram_release(&pdev->dev);
4188 + out_clk_disable_unprepare:
4189 + clk_disable_unprepare(db->clk);
4190 ++out_dispose_mapping:
4191 ++ irq_dispose_mapping(ndev->irq);
4192 + out_iounmap:
4193 + iounmap(db->membase);
4194 + out:
4195 +@@ -928,6 +930,7 @@ static int emac_remove(struct platform_device *pdev)
4196 + unregister_netdev(ndev);
4197 + sunxi_sram_release(&pdev->dev);
4198 + clk_disable_unprepare(db->clk);
4199 ++ irq_dispose_mapping(ndev->irq);
4200 + iounmap(db->membase);
4201 + free_netdev(ndev);
4202 +
4203 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4204 +index c7667017c1a3f..c3e824f5e50e8 100644
4205 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4206 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
4207 +@@ -3593,8 +3593,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
4208 + clk_disable_unprepare(priv->clk);
4209 +
4210 + err = register_netdev(dev);
4211 +- if (err)
4212 ++ if (err) {
4213 ++ bcmgenet_mii_exit(dev);
4214 + goto err;
4215 ++ }
4216 +
4217 + return err;
4218 +
4219 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4220 +index 4243ff4ec4b1d..faee77fa08044 100644
4221 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4222 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
4223 +@@ -1943,7 +1943,8 @@ static inline bool ixgbe_page_is_reserved(struct page *page)
4224 + return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
4225 + }
4226 +
4227 +-static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
4228 ++static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer,
4229 ++ int rx_buffer_pgcnt)
4230 + {
4231 + unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
4232 + struct page *page = rx_buffer->page;
4233 +@@ -1954,7 +1955,7 @@ static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer)
4234 +
4235 + #if (PAGE_SIZE < 8192)
4236 + /* if we are only owner of page we can reuse it */
4237 +- if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
4238 ++ if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1))
4239 + return false;
4240 + #else
4241 + /* The last offset is a bit aggressive in that we assume the
4242 +@@ -2019,11 +2020,18 @@ static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
4243 + static struct ixgbe_rx_buffer *ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring,
4244 + union ixgbe_adv_rx_desc *rx_desc,
4245 + struct sk_buff **skb,
4246 +- const unsigned int size)
4247 ++ const unsigned int size,
4248 ++ int *rx_buffer_pgcnt)
4249 + {
4250 + struct ixgbe_rx_buffer *rx_buffer;
4251 +
4252 + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
4253 ++ *rx_buffer_pgcnt =
4254 ++#if (PAGE_SIZE < 8192)
4255 ++ page_count(rx_buffer->page);
4256 ++#else
4257 ++ 0;
4258 ++#endif
4259 + prefetchw(rx_buffer->page);
4260 + *skb = rx_buffer->skb;
4261 +
4262 +@@ -2053,9 +2061,10 @@ skip_sync:
4263 +
4264 + static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
4265 + struct ixgbe_rx_buffer *rx_buffer,
4266 +- struct sk_buff *skb)
4267 ++ struct sk_buff *skb,
4268 ++ int rx_buffer_pgcnt)
4269 + {
4270 +- if (ixgbe_can_reuse_rx_page(rx_buffer)) {
4271 ++ if (ixgbe_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) {
4272 + /* hand second half of page back to the ring */
4273 + ixgbe_reuse_rx_page(rx_ring, rx_buffer);
4274 + } else {
4275 +@@ -2299,6 +2308,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
4276 + union ixgbe_adv_rx_desc *rx_desc;
4277 + struct ixgbe_rx_buffer *rx_buffer;
4278 + struct sk_buff *skb;
4279 ++ int rx_buffer_pgcnt;
4280 + unsigned int size;
4281 +
4282 + /* return some buffers to hardware, one at a time is too slow */
4283 +@@ -2318,7 +2328,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
4284 + */
4285 + dma_rmb();
4286 +
4287 +- rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size);
4288 ++ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size, &rx_buffer_pgcnt);
4289 +
4290 + /* retrieve a buffer from the ring */
4291 + if (!skb) {
4292 +@@ -2360,7 +2370,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
4293 + break;
4294 + }
4295 +
4296 +- ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb);
4297 ++ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb, rx_buffer_pgcnt);
4298 + cleaned_count++;
4299 +
4300 + /* place incomplete frames back on ring for completion */
4301 +diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
4302 +index 993f495e2bf7b..9f804e2aba359 100644
4303 +--- a/drivers/net/ethernet/korina.c
4304 ++++ b/drivers/net/ethernet/korina.c
4305 +@@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
4306 + dev_kfree_skb_any(skb);
4307 + spin_unlock_irqrestore(&lp->lock, flags);
4308 +
4309 +- return NETDEV_TX_BUSY;
4310 ++ return NETDEV_TX_OK;
4311 + }
4312 + }
4313 +
4314 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4315 +index 5868ec11db1af..47eee3e083ece 100644
4316 +--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4317 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4318 +@@ -1384,8 +1384,10 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
4319 + }
4320 +
4321 + priv->port_stats.tx_timeout++;
4322 +- en_dbg(DRV, priv, "Scheduling watchdog\n");
4323 +- queue_work(mdev->workqueue, &priv->watchdog_task);
4324 ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state)) {
4325 ++ en_dbg(DRV, priv, "Scheduling port restart\n");
4326 ++ queue_work(mdev->workqueue, &priv->restart_task);
4327 ++ }
4328 + }
4329 +
4330 +
4331 +@@ -1739,6 +1741,7 @@ int mlx4_en_start_port(struct net_device *dev)
4332 + mlx4_en_deactivate_cq(priv, cq);
4333 + goto tx_err;
4334 + }
4335 ++ clear_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &tx_ring->state);
4336 + if (t != TX_XDP) {
4337 + tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
4338 + tx_ring->recycle_ring = NULL;
4339 +@@ -1835,6 +1838,7 @@ int mlx4_en_start_port(struct net_device *dev)
4340 + local_bh_enable();
4341 + }
4342 +
4343 ++ clear_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state);
4344 + netif_tx_start_all_queues(dev);
4345 + netif_device_attach(dev);
4346 +
4347 +@@ -2005,7 +2009,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
4348 + static void mlx4_en_restart(struct work_struct *work)
4349 + {
4350 + struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
4351 +- watchdog_task);
4352 ++ restart_task);
4353 + struct mlx4_en_dev *mdev = priv->mdev;
4354 + struct net_device *dev = priv->dev;
4355 +
4356 +@@ -2387,7 +2391,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
4357 + if (netif_running(dev)) {
4358 + mutex_lock(&mdev->state_lock);
4359 + if (!mdev->device_up) {
4360 +- /* NIC is probably restarting - let watchdog task reset
4361 ++ /* NIC is probably restarting - let restart task reset
4362 + * the port */
4363 + en_dbg(DRV, priv, "Change MTU called with card down!?\n");
4364 + } else {
4365 +@@ -2396,7 +2400,9 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
4366 + if (err) {
4367 + en_err(priv, "Failed restarting port:%d\n",
4368 + priv->port);
4369 +- queue_work(mdev->workqueue, &priv->watchdog_task);
4370 ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING,
4371 ++ &priv->state))
4372 ++ queue_work(mdev->workqueue, &priv->restart_task);
4373 + }
4374 + }
4375 + mutex_unlock(&mdev->state_lock);
4376 +@@ -2882,7 +2888,8 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
4377 + if (err) {
4378 + en_err(priv, "Failed starting port %d for XDP change\n",
4379 + priv->port);
4380 +- queue_work(mdev->workqueue, &priv->watchdog_task);
4381 ++ if (!test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
4382 ++ queue_work(mdev->workqueue, &priv->restart_task);
4383 + }
4384 + }
4385 +
4386 +@@ -3280,7 +3287,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
4387 + priv->counter_index = MLX4_SINK_COUNTER_INDEX(mdev->dev);
4388 + spin_lock_init(&priv->stats_lock);
4389 + INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
4390 +- INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
4391 ++ INIT_WORK(&priv->restart_task, mlx4_en_restart);
4392 + INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
4393 + INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
4394 + INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
4395 +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4396 +index e58052d07e399..29041d4a3f28e 100644
4397 +--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4398 ++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4399 +@@ -385,6 +385,35 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
4400 + return cnt;
4401 + }
4402 +
4403 ++static void mlx4_en_handle_err_cqe(struct mlx4_en_priv *priv, struct mlx4_err_cqe *err_cqe,
4404 ++ u16 cqe_index, struct mlx4_en_tx_ring *ring)
4405 ++{
4406 ++ struct mlx4_en_dev *mdev = priv->mdev;
4407 ++ struct mlx4_en_tx_info *tx_info;
4408 ++ struct mlx4_en_tx_desc *tx_desc;
4409 ++ u16 wqe_index;
4410 ++ int desc_size;
4411 ++
4412 ++ en_err(priv, "CQE error - cqn 0x%x, ci 0x%x, vendor syndrome: 0x%x syndrome: 0x%x\n",
4413 ++ ring->sp_cqn, cqe_index, err_cqe->vendor_err_syndrome, err_cqe->syndrome);
4414 ++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe, sizeof(*err_cqe),
4415 ++ false);
4416 ++
4417 ++ wqe_index = be16_to_cpu(err_cqe->wqe_index) & ring->size_mask;
4418 ++ tx_info = &ring->tx_info[wqe_index];
4419 ++ desc_size = tx_info->nr_txbb << LOG_TXBB_SIZE;
4420 ++ en_err(priv, "Related WQE - qpn 0x%x, wqe index 0x%x, wqe size 0x%x\n", ring->qpn,
4421 ++ wqe_index, desc_size);
4422 ++ tx_desc = ring->buf + (wqe_index << LOG_TXBB_SIZE);
4423 ++ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, tx_desc, desc_size, false);
4424 ++
4425 ++ if (test_and_set_bit(MLX4_EN_STATE_FLAG_RESTARTING, &priv->state))
4426 ++ return;
4427 ++
4428 ++ en_err(priv, "Scheduling port restart\n");
4429 ++ queue_work(mdev->workqueue, &priv->restart_task);
4430 ++}
4431 ++
4432 + bool mlx4_en_process_tx_cq(struct net_device *dev,
4433 + struct mlx4_en_cq *cq, int napi_budget)
4434 + {
4435 +@@ -431,13 +460,10 @@ bool mlx4_en_process_tx_cq(struct net_device *dev,
4436 + dma_rmb();
4437 +
4438 + if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
4439 +- MLX4_CQE_OPCODE_ERROR)) {
4440 +- struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
4441 +-
4442 +- en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
4443 +- cqe_err->vendor_err_syndrome,
4444 +- cqe_err->syndrome);
4445 +- }
4446 ++ MLX4_CQE_OPCODE_ERROR))
4447 ++ if (!test_and_set_bit(MLX4_EN_TX_RING_STATE_RECOVERING, &ring->state))
4448 ++ mlx4_en_handle_err_cqe(priv, (struct mlx4_err_cqe *)cqe, index,
4449 ++ ring);
4450 +
4451 + /* Skip over last polled CQE */
4452 + new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
4453 +diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
4454 +index 240f9c9ca943d..1a57ea9a7ea59 100644
4455 +--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
4456 ++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
4457 +@@ -271,6 +271,10 @@ struct mlx4_en_page_cache {
4458 + } buf[MLX4_EN_CACHE_SIZE];
4459 + };
4460 +
4461 ++enum {
4462 ++ MLX4_EN_TX_RING_STATE_RECOVERING,
4463 ++};
4464 ++
4465 + struct mlx4_en_priv;
4466 +
4467 + struct mlx4_en_tx_ring {
4468 +@@ -317,6 +321,7 @@ struct mlx4_en_tx_ring {
4469 + * Only queue_stopped might be used if BQL is not properly working.
4470 + */
4471 + unsigned long queue_stopped;
4472 ++ unsigned long state;
4473 + struct mlx4_hwq_resources sp_wqres;
4474 + struct mlx4_qp sp_qp;
4475 + struct mlx4_qp_context sp_context;
4476 +@@ -530,6 +535,10 @@ struct mlx4_en_stats_bitmap {
4477 + struct mutex mutex; /* for mutual access to stats bitmap */
4478 + };
4479 +
4480 ++enum {
4481 ++ MLX4_EN_STATE_FLAG_RESTARTING,
4482 ++};
4483 ++
4484 + struct mlx4_en_priv {
4485 + struct mlx4_en_dev *mdev;
4486 + struct mlx4_en_port_profile *prof;
4487 +@@ -595,7 +604,7 @@ struct mlx4_en_priv {
4488 + struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
4489 + struct mlx4_qp drop_qp;
4490 + struct work_struct rx_mode_task;
4491 +- struct work_struct watchdog_task;
4492 ++ struct work_struct restart_task;
4493 + struct work_struct linkstate_task;
4494 + struct delayed_work stats_task;
4495 + struct delayed_work service_task;
4496 +@@ -643,6 +652,7 @@ struct mlx4_en_priv {
4497 + u32 pflags;
4498 + u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
4499 + u8 rss_hash_fn;
4500 ++ unsigned long state;
4501 + };
4502 +
4503 + enum mlx4_en_wol {
4504 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4505 +index 5fac00ea62457..a2b25afa24722 100644
4506 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
4507 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4508 +@@ -51,6 +51,7 @@
4509 + #ifdef CONFIG_RFS_ACCEL
4510 + #include <linux/cpu_rmap.h>
4511 + #endif
4512 ++#include <linux/version.h>
4513 + #include <net/devlink.h>
4514 + #include "mlx5_core.h"
4515 + #include "fs_core.h"
4516 +@@ -211,7 +212,10 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
4517 + strncat(string, ",", remaining_size);
4518 +
4519 + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string));
4520 +- strncat(string, DRIVER_VERSION, remaining_size);
4521 ++
4522 ++ snprintf(string + strlen(string), remaining_size, "%u.%u.%u",
4523 ++ (u8)((LINUX_VERSION_CODE >> 16) & 0xff), (u8)((LINUX_VERSION_CODE >> 8) & 0xff),
4524 ++ (u16)(LINUX_VERSION_CODE & 0xffff));
4525 +
4526 + /*Send the command*/
4527 + MLX5_SET(set_driver_version_in, in, opcode,
4528 +diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
4529 +index 07c1eb63415a3..190c22cdc4d2b 100644
4530 +--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
4531 ++++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
4532 +@@ -659,7 +659,9 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
4533 +
4534 + wol->supported = 0;
4535 + wol->wolopts = 0;
4536 +- phy_ethtool_get_wol(netdev->phydev, wol);
4537 ++
4538 ++ if (netdev->phydev)
4539 ++ phy_ethtool_get_wol(netdev->phydev, wol);
4540 +
4541 + wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
4542 + WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
4543 +@@ -688,9 +690,8 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
4544 +
4545 + device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
4546 +
4547 +- phy_ethtool_set_wol(netdev->phydev, wol);
4548 +-
4549 +- return 0;
4550 ++ return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
4551 ++ : -ENETDOWN;
4552 + }
4553 + #endif /* CONFIG_PM */
4554 +
4555 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4556 +index dbd48012224f2..ed34b7d1a9e11 100644
4557 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4558 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
4559 +@@ -2508,6 +2508,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4560 + qlcnic_sriov_vf_register_map(ahw);
4561 + break;
4562 + default:
4563 ++ err = -EINVAL;
4564 + goto err_out_free_hw_res;
4565 + }
4566 +
4567 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
4568 +index 03bda2e0b7a89..5020d5b28c6ad 100644
4569 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
4570 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
4571 +@@ -35,7 +35,6 @@
4572 + #define PRG_ETH0_EXT_RMII_MODE 4
4573 +
4574 + /* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
4575 +-#define PRG_ETH0_CLK_M250_SEL_SHIFT 4
4576 + #define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
4577 +
4578 + #define PRG_ETH0_TXDLY_SHIFT 5
4579 +@@ -149,8 +148,9 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
4580 + }
4581 +
4582 + clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
4583 +- clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
4584 +- clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
4585 ++ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
4586 ++ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
4587 ++ clk_configs->m250_mux.shift;
4588 + clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names,
4589 + MUX_CLK_NUM_PARENTS, &clk_mux_ops,
4590 + &clk_configs->m250_mux.hw);
4591 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4592 +index 2872684906e14..4ac507b4d1019 100644
4593 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4594 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
4595 +@@ -1428,6 +1428,19 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
4596 + stmmac_free_tx_buffer(priv, queue, i);
4597 + }
4598 +
4599 ++/**
4600 ++ * stmmac_free_tx_skbufs - free TX skb buffers
4601 ++ * @priv: private structure
4602 ++ */
4603 ++static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
4604 ++{
4605 ++ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
4606 ++ u32 queue;
4607 ++
4608 ++ for (queue = 0; queue < tx_queue_cnt; queue++)
4609 ++ dma_free_tx_skbufs(priv, queue);
4610 ++}
4611 ++
4612 + /**
4613 + * free_dma_rx_desc_resources - free RX dma desc resources
4614 + * @priv: private structure
4615 +@@ -2689,9 +2702,6 @@ static int stmmac_release(struct net_device *dev)
4616 + struct stmmac_priv *priv = netdev_priv(dev);
4617 + u32 chan;
4618 +
4619 +- if (priv->eee_enabled)
4620 +- del_timer_sync(&priv->eee_ctrl_timer);
4621 +-
4622 + /* Stop and disconnect the PHY */
4623 + if (dev->phydev) {
4624 + phy_stop(dev->phydev);
4625 +@@ -2710,6 +2720,11 @@ static int stmmac_release(struct net_device *dev)
4626 + if (priv->lpi_irq > 0)
4627 + free_irq(priv->lpi_irq, dev);
4628 +
4629 ++ if (priv->eee_enabled) {
4630 ++ priv->tx_path_in_lpi_mode = false;
4631 ++ del_timer_sync(&priv->eee_ctrl_timer);
4632 ++ }
4633 ++
4634 + /* Stop TX/RX DMA and clear the descriptors */
4635 + stmmac_stop_all_dma(priv);
4636 +
4637 +@@ -4497,6 +4512,11 @@ int stmmac_suspend(struct device *dev)
4638 + for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4639 + del_timer_sync(&priv->tx_queue[chan].txtimer);
4640 +
4641 ++ if (priv->eee_enabled) {
4642 ++ priv->tx_path_in_lpi_mode = false;
4643 ++ del_timer_sync(&priv->eee_ctrl_timer);
4644 ++ }
4645 ++
4646 + /* Stop TX/RX DMA */
4647 + stmmac_stop_all_dma(priv);
4648 +
4649 +@@ -4591,6 +4611,7 @@ int stmmac_resume(struct device *dev)
4650 +
4651 + stmmac_reset_queues_param(priv);
4652 +
4653 ++ stmmac_free_tx_skbufs(priv);
4654 + stmmac_clear_descriptors(priv);
4655 +
4656 + stmmac_hw_setup(ndev, false);
4657 +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
4658 +index abf85f0ab72fc..66fffbd64a33f 100644
4659 +--- a/drivers/net/vxlan.c
4660 ++++ b/drivers/net/vxlan.c
4661 +@@ -3180,6 +3180,9 @@ static void vxlan_config_apply(struct net_device *dev,
4662 + dev->gso_max_segs = lowerdev->gso_max_segs;
4663 +
4664 + needed_headroom = lowerdev->hard_header_len;
4665 ++ needed_headroom += lowerdev->needed_headroom;
4666 ++
4667 ++ dev->needed_tailroom = lowerdev->needed_tailroom;
4668 +
4669 + max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM :
4670 + VXLAN_HEADROOM);
4671 +diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
4672 +index c64a03f164c0f..16d5fe6d1e2e4 100644
4673 +--- a/drivers/net/wireless/ath/ath10k/usb.c
4674 ++++ b/drivers/net/wireless/ath/ath10k/usb.c
4675 +@@ -1019,6 +1019,8 @@ static int ath10k_usb_probe(struct usb_interface *interface,
4676 +
4677 + ar_usb = ath10k_usb_priv(ar);
4678 + ret = ath10k_usb_create(ar, interface);
4679 ++ if (ret)
4680 ++ goto err;
4681 + ar_usb->ar = ar;
4682 +
4683 + ar->dev_id = product_id;
4684 +@@ -1030,7 +1032,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
4685 + ret = ath10k_core_register(ar, chip_id);
4686 + if (ret) {
4687 + ath10k_warn(ar, "failed to register driver core: %d\n", ret);
4688 +- goto err;
4689 ++ goto err_usb_destroy;
4690 + }
4691 +
4692 + /* TODO: remove this once USB support is fully implemented */
4693 +@@ -1038,6 +1040,9 @@ static int ath10k_usb_probe(struct usb_interface *interface,
4694 +
4695 + return 0;
4696 +
4697 ++err_usb_destroy:
4698 ++ ath10k_usb_destroy(ar);
4699 ++
4700 + err:
4701 + ath10k_core_destroy(ar);
4702 +
4703 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4704 +index 7f435fa29f75e..a6f7bf28a8b2d 100644
4705 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4706 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
4707 +@@ -1157,13 +1157,15 @@ static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
4708 +
4709 + switch (tag) {
4710 + case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
4711 ++ arg->service_map_ext_valid = true;
4712 + arg->service_map_ext_len = *(__le32 *)ptr;
4713 + arg->service_map_ext = ptr + sizeof(__le32);
4714 + return 0;
4715 + default:
4716 + break;
4717 + }
4718 +- return -EPROTO;
4719 ++
4720 ++ return 0;
4721 + }
4722 +
4723 + static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
4724 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
4725 +index 3f3fbee631c34..41eb57be92220 100644
4726 +--- a/drivers/net/wireless/ath/ath10k/wmi.c
4727 ++++ b/drivers/net/wireless/ath/ath10k/wmi.c
4728 +@@ -5510,8 +5510,13 @@ void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
4729 + ret);
4730 + }
4731 +
4732 +- ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
4733 +- __le32_to_cpu(arg.service_map_ext_len));
4734 ++ /*
4735 ++ * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
4736 ++ * for the below logic to work.
4737 ++ */
4738 ++ if (arg.service_map_ext_valid)
4739 ++ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
4740 ++ __le32_to_cpu(arg.service_map_ext_len));
4741 + }
4742 +
4743 + static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
4744 +diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
4745 +index e341cfb3fcc26..6bd63d1cd0395 100644
4746 +--- a/drivers/net/wireless/ath/ath10k/wmi.h
4747 ++++ b/drivers/net/wireless/ath/ath10k/wmi.h
4748 +@@ -6710,6 +6710,7 @@ struct wmi_svc_rdy_ev_arg {
4749 + };
4750 +
4751 + struct wmi_svc_avail_ev_arg {
4752 ++ bool service_map_ext_valid;
4753 + __le32 service_map_ext_len;
4754 + const __le32 *service_map_ext;
4755 + };
4756 +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4757 +index 525b26e0f65ee..2fad20c845b47 100644
4758 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4759 ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
4760 +@@ -2880,7 +2880,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
4761 +
4762 + /* this would be a mac80211 bug ... but don't crash */
4763 + if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
4764 +- return -EINVAL;
4765 ++ return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL;
4766 +
4767 + /*
4768 + * If we are in a STA removal flow and in DQA mode:
4769 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4770 +index 24da496151353..f48c7cac122e9 100644
4771 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4772 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
4773 +@@ -2121,18 +2121,36 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
4774 + void *buf, int dwords)
4775 + {
4776 + unsigned long flags;
4777 +- int offs, ret = 0;
4778 ++ int offs = 0;
4779 + u32 *vals = buf;
4780 +
4781 +- if (iwl_trans_grab_nic_access(trans, &flags)) {
4782 +- iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
4783 +- for (offs = 0; offs < dwords; offs++)
4784 +- vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
4785 +- iwl_trans_release_nic_access(trans, &flags);
4786 +- } else {
4787 +- ret = -EBUSY;
4788 ++ while (offs < dwords) {
4789 ++ /* limit the time we spin here under lock to 1/2s */
4790 ++ ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC);
4791 ++
4792 ++ if (iwl_trans_grab_nic_access(trans, &flags)) {
4793 ++ iwl_write32(trans, HBUS_TARG_MEM_RADDR,
4794 ++ addr + 4 * offs);
4795 ++
4796 ++ while (offs < dwords) {
4797 ++ vals[offs] = iwl_read32(trans,
4798 ++ HBUS_TARG_MEM_RDAT);
4799 ++ offs++;
4800 ++
4801 ++ /* calling ktime_get is expensive so
4802 ++ * do it once in 128 reads
4803 ++ */
4804 ++ if (offs % 128 == 0 && ktime_after(ktime_get(),
4805 ++ timeout))
4806 ++ break;
4807 ++ }
4808 ++ iwl_trans_release_nic_access(trans, &flags);
4809 ++ } else {
4810 ++ return -EBUSY;
4811 ++ }
4812 + }
4813 +- return ret;
4814 ++
4815 ++ return 0;
4816 + }
4817 +
4818 + static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
4819 +diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4820 +index b704e4bce171d..a04d598430228 100644
4821 +--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4822 ++++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
4823 +@@ -1237,13 +1237,6 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
4824 + if (skb->len < ETH_HLEN)
4825 + goto drop;
4826 +
4827 +- ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
4828 +- if (!ctx)
4829 +- goto busy;
4830 +-
4831 +- memset(ctx->buf, 0, BULK_BUF_SIZE);
4832 +- buf = ctx->buf->data;
4833 +-
4834 + tx_control = 0;
4835 +
4836 + err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
4837 +@@ -1251,6 +1244,13 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
4838 + if (err)
4839 + goto drop;
4840 +
4841 ++ ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
4842 ++ if (!ctx)
4843 ++ goto drop;
4844 ++
4845 ++ memset(ctx->buf, 0, BULK_BUF_SIZE);
4846 ++ buf = ctx->buf->data;
4847 ++
4848 + {
4849 + __le16 *tx_cntl = (__le16 *)buf;
4850 + *tx_cntl = cpu_to_le16(tx_control);
4851 +diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
4852 +index e48b47f425540..ceac611ef0864 100644
4853 +--- a/drivers/net/wireless/marvell/mwifiex/main.c
4854 ++++ b/drivers/net/wireless/marvell/mwifiex/main.c
4855 +@@ -1474,6 +1474,8 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
4856 + priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
4857 + mwifiex_deauthenticate(priv, NULL);
4858 +
4859 ++ mwifiex_init_shutdown_fw(priv, MWIFIEX_FUNC_SHUTDOWN);
4860 ++
4861 + mwifiex_uninit_sw(adapter);
4862 +
4863 + if (adapter->if_ops.down_dev)
4864 +diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
4865 +index c1608f0bf6d01..0c5a15e2b8f97 100644
4866 +--- a/drivers/net/wireless/st/cw1200/main.c
4867 ++++ b/drivers/net/wireless/st/cw1200/main.c
4868 +@@ -384,6 +384,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
4869 + CW1200_LINK_ID_MAX,
4870 + cw1200_skb_dtor,
4871 + priv)) {
4872 ++ destroy_workqueue(priv->workqueue);
4873 + ieee80211_free_hw(hw);
4874 + return NULL;
4875 + }
4876 +@@ -395,6 +396,7 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
4877 + for (; i > 0; i--)
4878 + cw1200_queue_deinit(&priv->tx_queue[i - 1]);
4879 + cw1200_queue_stats_deinit(&priv->tx_queue_stats);
4880 ++ destroy_workqueue(priv->workqueue);
4881 + ieee80211_free_hw(hw);
4882 + return NULL;
4883 + }
4884 +diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
4885 +index cd51492ae6c2d..107bbd4ae825e 100644
4886 +--- a/drivers/net/xen-netback/xenbus.c
4887 ++++ b/drivers/net/xen-netback/xenbus.c
4888 +@@ -777,12 +777,14 @@ static int xen_register_credit_watch(struct xenbus_device *dev,
4889 + return -ENOMEM;
4890 + snprintf(node, maxlen, "%s/rate", dev->nodename);
4891 + vif->credit_watch.node = node;
4892 ++ vif->credit_watch.will_handle = NULL;
4893 + vif->credit_watch.callback = xen_net_rate_changed;
4894 + err = register_xenbus_watch(&vif->credit_watch);
4895 + if (err) {
4896 + pr_err("Failed to set watcher %s\n", vif->credit_watch.node);
4897 + kfree(node);
4898 + vif->credit_watch.node = NULL;
4899 ++ vif->credit_watch.will_handle = NULL;
4900 + vif->credit_watch.callback = NULL;
4901 + }
4902 + return err;
4903 +@@ -829,6 +831,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
4904 + snprintf(node, maxlen, "%s/request-multicast-control",
4905 + dev->otherend);
4906 + vif->mcast_ctrl_watch.node = node;
4907 ++ vif->mcast_ctrl_watch.will_handle = NULL;
4908 + vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
4909 + err = register_xenbus_watch(&vif->mcast_ctrl_watch);
4910 + if (err) {
4911 +@@ -836,6 +839,7 @@ static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
4912 + vif->mcast_ctrl_watch.node);
4913 + kfree(node);
4914 + vif->mcast_ctrl_watch.node = NULL;
4915 ++ vif->mcast_ctrl_watch.will_handle = NULL;
4916 + vif->mcast_ctrl_watch.callback = NULL;
4917 + }
4918 + return err;
4919 +@@ -1039,7 +1043,7 @@ static void connect(struct backend_info *be)
4920 + xenvif_carrier_on(be->vif);
4921 +
4922 + unregister_hotplug_status_watch(be);
4923 +- err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch,
4924 ++ err = xenbus_watch_pathfmt(dev, &be->hotplug_status_watch, NULL,
4925 + hotplug_status_changed,
4926 + "%s/%s", dev->nodename, "hotplug-status");
4927 + if (!err)
4928 +diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
4929 +index b7828fb252f27..b7d5b12035c1a 100644
4930 +--- a/drivers/nfc/s3fwrn5/firmware.c
4931 ++++ b/drivers/nfc/s3fwrn5/firmware.c
4932 +@@ -304,8 +304,10 @@ static int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info)
4933 + if (ret < 0)
4934 + return ret;
4935 +
4936 +- if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE)
4937 ++ if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) {
4938 ++ release_firmware(fw->fw);
4939 + return -EINVAL;
4940 ++ }
4941 +
4942 + memcpy(fw->date, fw->fw->data + 0x00, 12);
4943 + fw->date[12] = '\0';
4944 +diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
4945 +index 9f1b7e3153f99..19e3469d5908e 100644
4946 +--- a/drivers/nvdimm/label.c
4947 ++++ b/drivers/nvdimm/label.c
4948 +@@ -861,6 +861,15 @@ static int __blk_label_update(struct nd_region *nd_region,
4949 + }
4950 + }
4951 +
4952 ++ /* release slots associated with any invalidated UUIDs */
4953 ++ mutex_lock(&nd_mapping->lock);
4954 ++ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
4955 ++ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
4956 ++ reap_victim(nd_mapping, label_ent);
4957 ++ list_move(&label_ent->list, &list);
4958 ++ }
4959 ++ mutex_unlock(&nd_mapping->lock);
4960 ++
4961 + /*
4962 + * Find the resource associated with the first label in the set
4963 + * per the v1.2 namespace specification.
4964 +@@ -880,8 +889,10 @@ static int __blk_label_update(struct nd_region *nd_region,
4965 + if (is_old_resource(res, old_res_list, old_num_resources))
4966 + continue; /* carry-over */
4967 + slot = nd_label_alloc_slot(ndd);
4968 +- if (slot == UINT_MAX)
4969 ++ if (slot == UINT_MAX) {
4970 ++ rc = -ENXIO;
4971 + goto abort;
4972 ++ }
4973 + dev_dbg(ndd->dev, "allocated: %d\n", slot);
4974 +
4975 + nd_label = to_label(ndd, slot);
4976 +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
4977 +index 1bdac298a943f..791d6b671ee0b 100644
4978 +--- a/drivers/pci/controller/dwc/pcie-qcom.c
4979 ++++ b/drivers/pci/controller/dwc/pcie-qcom.c
4980 +@@ -108,6 +108,7 @@ struct qcom_pcie_resources_2_1_0 {
4981 + struct reset_control *ahb_reset;
4982 + struct reset_control *por_reset;
4983 + struct reset_control *phy_reset;
4984 ++ struct reset_control *ext_reset;
4985 + struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
4986 + };
4987 +
4988 +@@ -269,6 +270,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
4989 + if (IS_ERR(res->por_reset))
4990 + return PTR_ERR(res->por_reset);
4991 +
4992 ++ res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
4993 ++ if (IS_ERR(res->ext_reset))
4994 ++ return PTR_ERR(res->ext_reset);
4995 ++
4996 + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
4997 + return PTR_ERR_OR_ZERO(res->phy_reset);
4998 + }
4999 +@@ -281,6 +286,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
5000 + reset_control_assert(res->axi_reset);
5001 + reset_control_assert(res->ahb_reset);
5002 + reset_control_assert(res->por_reset);
5003 ++ reset_control_assert(res->ext_reset);
5004 + reset_control_assert(res->pci_reset);
5005 + clk_disable_unprepare(res->iface_clk);
5006 + clk_disable_unprepare(res->core_clk);
5007 +@@ -333,6 +339,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
5008 + goto err_deassert_ahb;
5009 + }
5010 +
5011 ++ ret = reset_control_deassert(res->ext_reset);
5012 ++ if (ret) {
5013 ++ dev_err(dev, "cannot deassert ext reset\n");
5014 ++ goto err_deassert_ahb;
5015 ++ }
5016 ++
5017 + /* enable PCIe clocks and resets */
5018 + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
5019 + val &= ~BIT(0);
5020 +diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
5021 +index ec86414216f97..f2d79e0235bc1 100644
5022 +--- a/drivers/pci/controller/pcie-iproc.c
5023 ++++ b/drivers/pci/controller/pcie-iproc.c
5024 +@@ -300,7 +300,7 @@ enum iproc_pcie_reg {
5025 + };
5026 +
5027 + /* iProc PCIe PAXB BCMA registers */
5028 +-static const u16 iproc_pcie_reg_paxb_bcma[] = {
5029 ++static const u16 iproc_pcie_reg_paxb_bcma[IPROC_PCIE_MAX_NUM_REG] = {
5030 + [IPROC_PCIE_CLK_CTRL] = 0x000,
5031 + [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
5032 + [IPROC_PCIE_CFG_IND_DATA] = 0x124,
5033 +@@ -311,7 +311,7 @@ static const u16 iproc_pcie_reg_paxb_bcma[] = {
5034 + };
5035 +
5036 + /* iProc PCIe PAXB registers */
5037 +-static const u16 iproc_pcie_reg_paxb[] = {
5038 ++static const u16 iproc_pcie_reg_paxb[IPROC_PCIE_MAX_NUM_REG] = {
5039 + [IPROC_PCIE_CLK_CTRL] = 0x000,
5040 + [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
5041 + [IPROC_PCIE_CFG_IND_DATA] = 0x124,
5042 +@@ -327,7 +327,7 @@ static const u16 iproc_pcie_reg_paxb[] = {
5043 + };
5044 +
5045 + /* iProc PCIe PAXB v2 registers */
5046 +-static const u16 iproc_pcie_reg_paxb_v2[] = {
5047 ++static const u16 iproc_pcie_reg_paxb_v2[IPROC_PCIE_MAX_NUM_REG] = {
5048 + [IPROC_PCIE_CLK_CTRL] = 0x000,
5049 + [IPROC_PCIE_CFG_IND_ADDR] = 0x120,
5050 + [IPROC_PCIE_CFG_IND_DATA] = 0x124,
5051 +@@ -355,7 +355,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = {
5052 + };
5053 +
5054 + /* iProc PCIe PAXC v1 registers */
5055 +-static const u16 iproc_pcie_reg_paxc[] = {
5056 ++static const u16 iproc_pcie_reg_paxc[IPROC_PCIE_MAX_NUM_REG] = {
5057 + [IPROC_PCIE_CLK_CTRL] = 0x000,
5058 + [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0,
5059 + [IPROC_PCIE_CFG_IND_DATA] = 0x1f4,
5060 +@@ -364,7 +364,7 @@ static const u16 iproc_pcie_reg_paxc[] = {
5061 + };
5062 +
5063 + /* iProc PCIe PAXC v2 registers */
5064 +-static const u16 iproc_pcie_reg_paxc_v2[] = {
5065 ++static const u16 iproc_pcie_reg_paxc_v2[IPROC_PCIE_MAX_NUM_REG] = {
5066 + [IPROC_PCIE_MSI_GIC_MODE] = 0x050,
5067 + [IPROC_PCIE_MSI_BASE_ADDR] = 0x074,
5068 + [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078,
5069 +diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
5070 +index f7218c1673ceb..2c46f7dcd2f5d 100644
5071 +--- a/drivers/pci/pci-acpi.c
5072 ++++ b/drivers/pci/pci-acpi.c
5073 +@@ -587,7 +587,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
5074 + {
5075 + while (bus->parent) {
5076 + if (acpi_pm_device_can_wakeup(&bus->self->dev))
5077 +- return acpi_pm_set_bridge_wakeup(&bus->self->dev, enable);
5078 ++ return acpi_pm_set_device_wakeup(&bus->self->dev, enable);
5079 +
5080 + bus = bus->parent;
5081 + }
5082 +@@ -595,7 +595,7 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
5083 + /* We have reached the root bus. */
5084 + if (bus->bridge) {
5085 + if (acpi_pm_device_can_wakeup(bus->bridge))
5086 +- return acpi_pm_set_bridge_wakeup(bus->bridge, enable);
5087 ++ return acpi_pm_set_device_wakeup(bus->bridge, enable);
5088 + }
5089 + return 0;
5090 + }
5091 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
5092 +index 57a87a001b4f4..cd628dd73719b 100644
5093 +--- a/drivers/pci/pci.c
5094 ++++ b/drivers/pci/pci.c
5095 +@@ -5840,19 +5840,21 @@ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5096 + while (*p) {
5097 + count = 0;
5098 + if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5099 +- p[count] == '@') {
5100 ++ p[count] == '@') {
5101 + p += count + 1;
5102 ++ if (align_order > 63) {
5103 ++ pr_err("PCI: Invalid requested alignment (order %d)\n",
5104 ++ align_order);
5105 ++ align_order = PAGE_SHIFT;
5106 ++ }
5107 + } else {
5108 +- align_order = -1;
5109 ++ align_order = PAGE_SHIFT;
5110 + }
5111 +
5112 + ret = pci_dev_str_match(dev, p, &p);
5113 + if (ret == 1) {
5114 + *resize = true;
5115 +- if (align_order == -1)
5116 +- align = PAGE_SIZE;
5117 +- else
5118 +- align = 1 << align_order;
5119 ++ align = 1ULL << align_order;
5120 + break;
5121 + } else if (ret < 0) {
5122 + pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
5123 +diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
5124 +index fb7478b6c4f9d..dfbe9cbf292c0 100644
5125 +--- a/drivers/pci/slot.c
5126 ++++ b/drivers/pci/slot.c
5127 +@@ -307,6 +307,9 @@ placeholder:
5128 + goto err;
5129 + }
5130 +
5131 ++ INIT_LIST_HEAD(&slot->list);
5132 ++ list_add(&slot->list, &parent->slots);
5133 ++
5134 + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
5135 + "%s", slot_name);
5136 + if (err) {
5137 +@@ -314,9 +317,6 @@ placeholder:
5138 + goto err;
5139 + }
5140 +
5141 +- INIT_LIST_HEAD(&slot->list);
5142 +- list_add(&slot->list, &parent->slots);
5143 +-
5144 + down_read(&pci_bus_sem);
5145 + list_for_each_entry(dev, &parent->devices, bus_list)
5146 + if (PCI_SLOT(dev->devfn) == slot_nr)
5147 +diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
5148 +index 1b00a3f3b419c..b3d478edbbb1c 100644
5149 +--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
5150 ++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
5151 +@@ -1258,7 +1258,6 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
5152 + break;
5153 + case PIN_CONFIG_INPUT_DEBOUNCE:
5154 + debounce = readl(db_reg);
5155 +- debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5156 +
5157 + if (arg)
5158 + conf |= BYT_DEBOUNCE_EN;
5159 +@@ -1267,24 +1266,31 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
5160 +
5161 + switch (arg) {
5162 + case 375:
5163 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5164 + debounce |= BYT_DEBOUNCE_PULSE_375US;
5165 + break;
5166 + case 750:
5167 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5168 + debounce |= BYT_DEBOUNCE_PULSE_750US;
5169 + break;
5170 + case 1500:
5171 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5172 + debounce |= BYT_DEBOUNCE_PULSE_1500US;
5173 + break;
5174 + case 3000:
5175 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5176 + debounce |= BYT_DEBOUNCE_PULSE_3MS;
5177 + break;
5178 + case 6000:
5179 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5180 + debounce |= BYT_DEBOUNCE_PULSE_6MS;
5181 + break;
5182 + case 12000:
5183 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5184 + debounce |= BYT_DEBOUNCE_PULSE_12MS;
5185 + break;
5186 + case 24000:
5187 ++ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
5188 + debounce |= BYT_DEBOUNCE_PULSE_24MS;
5189 + break;
5190 + default:
5191 +diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
5192 +index 4fa69f988c7b7..6b2312e73f23f 100644
5193 +--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
5194 ++++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
5195 +@@ -729,6 +729,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
5196 + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
5197 + bits |= BUFCFG_PU_EN;
5198 +
5199 ++ /* Set default strength value in case none is given */
5200 ++ if (arg == 1)
5201 ++ arg = 20000;
5202 ++
5203 + switch (arg) {
5204 + case 50000:
5205 + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
5206 +@@ -749,6 +753,10 @@ static int mrfld_config_set_pin(struct mrfld_pinctrl *mp, unsigned int pin,
5207 + mask |= BUFCFG_Px_EN_MASK | BUFCFG_PUPD_VAL_MASK;
5208 + bits |= BUFCFG_PD_EN;
5209 +
5210 ++ /* Set default strength value in case none is given */
5211 ++ if (arg == 1)
5212 ++ arg = 20000;
5213 ++
5214 + switch (arg) {
5215 + case 50000:
5216 + bits |= BUFCFG_PUPD_VAL_50K << BUFCFG_PUPD_VAL_SHIFT;
5217 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
5218 +index d6255049e5196..d9b9c11c7f8f2 100644
5219 +--- a/drivers/pinctrl/pinctrl-amd.c
5220 ++++ b/drivers/pinctrl/pinctrl-amd.c
5221 +@@ -439,7 +439,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5222 + pin_reg &= ~BIT(LEVEL_TRIG_OFF);
5223 + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
5224 + pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
5225 +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
5226 + irq_set_handler_locked(d, handle_edge_irq);
5227 + break;
5228 +
5229 +@@ -447,7 +446,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5230 + pin_reg &= ~BIT(LEVEL_TRIG_OFF);
5231 + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
5232 + pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
5233 +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
5234 + irq_set_handler_locked(d, handle_edge_irq);
5235 + break;
5236 +
5237 +@@ -455,7 +453,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5238 + pin_reg &= ~BIT(LEVEL_TRIG_OFF);
5239 + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
5240 + pin_reg |= BOTH_EADGE << ACTIVE_LEVEL_OFF;
5241 +- pin_reg |= DB_TYPE_REMOVE_GLITCH << DB_CNTRL_OFF;
5242 + irq_set_handler_locked(d, handle_edge_irq);
5243 + break;
5244 +
5245 +@@ -463,8 +460,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5246 + pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
5247 + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
5248 + pin_reg |= ACTIVE_HIGH << ACTIVE_LEVEL_OFF;
5249 +- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
5250 +- pin_reg |= DB_TYPE_PRESERVE_LOW_GLITCH << DB_CNTRL_OFF;
5251 + irq_set_handler_locked(d, handle_level_irq);
5252 + break;
5253 +
5254 +@@ -472,8 +467,6 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
5255 + pin_reg |= LEVEL_TRIGGER << LEVEL_TRIG_OFF;
5256 + pin_reg &= ~(ACTIVE_LEVEL_MASK << ACTIVE_LEVEL_OFF);
5257 + pin_reg |= ACTIVE_LOW << ACTIVE_LEVEL_OFF;
5258 +- pin_reg &= ~(DB_CNTRl_MASK << DB_CNTRL_OFF);
5259 +- pin_reg |= DB_TYPE_PRESERVE_HIGH_GLITCH << DB_CNTRL_OFF;
5260 + irq_set_handler_locked(d, handle_level_irq);
5261 + break;
5262 +
5263 +diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
5264 +index fb73dcbb5ef37..68dcf53aaac34 100644
5265 +--- a/drivers/pinctrl/pinctrl-falcon.c
5266 ++++ b/drivers/pinctrl/pinctrl-falcon.c
5267 +@@ -438,24 +438,28 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
5268 +
5269 + /* load and remap the pad resources of the different banks */
5270 + for_each_compatible_node(np, NULL, "lantiq,pad-falcon") {
5271 +- struct platform_device *ppdev = of_find_device_by_node(np);
5272 + const __be32 *bank = of_get_property(np, "lantiq,bank", NULL);
5273 + struct resource res;
5274 ++ struct platform_device *ppdev;
5275 + u32 avail;
5276 + int pins;
5277 +
5278 + if (!of_device_is_available(np))
5279 + continue;
5280 +
5281 +- if (!ppdev) {
5282 +- dev_err(&pdev->dev, "failed to find pad pdev\n");
5283 +- continue;
5284 +- }
5285 + if (!bank || *bank >= PORTS)
5286 + continue;
5287 + if (of_address_to_resource(np, 0, &res))
5288 + continue;
5289 ++
5290 ++ ppdev = of_find_device_by_node(np);
5291 ++ if (!ppdev) {
5292 ++ dev_err(&pdev->dev, "failed to find pad pdev\n");
5293 ++ continue;
5294 ++ }
5295 ++
5296 + falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
5297 ++ put_device(&ppdev->dev);
5298 + if (IS_ERR(falcon_info.clk[*bank])) {
5299 + dev_err(&ppdev->dev, "failed to get clock\n");
5300 + return PTR_ERR(falcon_info.clk[*bank]);
5301 +diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
5302 +index 61aaaf58c5993..ff9c2758d25e0 100644
5303 +--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
5304 ++++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
5305 +@@ -1001,20 +1001,22 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
5306 + if (bank == pctl->desc->irq_banks)
5307 + return;
5308 +
5309 ++ chained_irq_enter(chip, desc);
5310 ++
5311 + reg = sunxi_irq_status_reg_from_bank(pctl->desc, bank);
5312 + val = readl(pctl->membase + reg);
5313 +
5314 + if (val) {
5315 + int irqoffset;
5316 +
5317 +- chained_irq_enter(chip, desc);
5318 + for_each_set_bit(irqoffset, &val, IRQ_PER_BANK) {
5319 + int pin_irq = irq_find_mapping(pctl->domain,
5320 + bank * IRQ_PER_BANK + irqoffset);
5321 + generic_handle_irq(pin_irq);
5322 + }
5323 +- chained_irq_exit(chip, desc);
5324 + }
5325 ++
5326 ++ chained_irq_exit(chip, desc);
5327 + }
5328 +
5329 + static int sunxi_pinctrl_add_function(struct sunxi_pinctrl *pctl,
5330 +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
5331 +index fcfeadd1301f4..92400abe35520 100644
5332 +--- a/drivers/platform/x86/acer-wmi.c
5333 ++++ b/drivers/platform/x86/acer-wmi.c
5334 +@@ -124,6 +124,7 @@ static const struct key_entry acer_wmi_keymap[] __initconst = {
5335 + {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */
5336 + {KE_IGNORE, 0x81, {KEY_SLEEP} },
5337 + {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad Toggle */
5338 ++ {KE_IGNORE, 0x84, {KEY_KBDILLUMTOGGLE} }, /* Automatic Keyboard background light toggle */
5339 + {KE_KEY, KEY_TOUCHPAD_ON, {KEY_TOUCHPAD_ON} },
5340 + {KE_KEY, KEY_TOUCHPAD_OFF, {KEY_TOUCHPAD_OFF} },
5341 + {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} },
5342 +diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
5343 +index 0537d44d45a6e..9e9fc51557892 100644
5344 +--- a/drivers/platform/x86/dell-smbios-base.c
5345 ++++ b/drivers/platform/x86/dell-smbios-base.c
5346 +@@ -597,6 +597,7 @@ static int __init dell_smbios_init(void)
5347 + if (wmi && smm) {
5348 + pr_err("No SMBIOS backends available (wmi: %d, smm: %d)\n",
5349 + wmi, smm);
5350 ++ ret = -ENODEV;
5351 + goto fail_create_group;
5352 + }
5353 +
5354 +diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
5355 +index 1e6b4661c7645..36d6e72f50735 100644
5356 +--- a/drivers/platform/x86/intel-vbtn.c
5357 ++++ b/drivers/platform/x86/intel-vbtn.c
5358 +@@ -197,6 +197,18 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
5359 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"),
5360 + },
5361 + },
5362 ++ {
5363 ++ .matches = {
5364 ++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
5365 ++ DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"),
5366 ++ },
5367 ++ },
5368 ++ {
5369 ++ .matches = {
5370 ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
5371 ++ DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
5372 ++ },
5373 ++ },
5374 + {} /* Array terminator */
5375 + };
5376 +
5377 +diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
5378 +index 0c72de95b5ccd..b2a196a2b6c7c 100644
5379 +--- a/drivers/platform/x86/mlx-platform.c
5380 ++++ b/drivers/platform/x86/mlx-platform.c
5381 +@@ -212,15 +212,6 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = {
5382 + };
5383 +
5384 + /* Platform hotplug devices */
5385 +-static struct i2c_board_info mlxplat_mlxcpld_psu[] = {
5386 +- {
5387 +- I2C_BOARD_INFO("24c02", 0x51),
5388 +- },
5389 +- {
5390 +- I2C_BOARD_INFO("24c02", 0x50),
5391 +- },
5392 +-};
5393 +-
5394 + static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
5395 + {
5396 + I2C_BOARD_INFO("dps460", 0x59),
5397 +@@ -251,15 +242,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = {
5398 + .label = "psu1",
5399 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
5400 + .mask = BIT(0),
5401 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
5402 +- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
5403 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
5404 + },
5405 + {
5406 + .label = "psu2",
5407 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
5408 + .mask = BIT(1),
5409 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
5410 +- .hpdev.nr = MLXPLAT_CPLD_PSU_DEFAULT_NR,
5411 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
5412 + },
5413 + };
5414 +
5415 +@@ -326,7 +315,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
5416 + .aggr_mask = MLXPLAT_CPLD_AGGR_PSU_MASK_DEF,
5417 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
5418 + .mask = MLXPLAT_CPLD_PSU_MASK,
5419 +- .count = ARRAY_SIZE(mlxplat_mlxcpld_psu),
5420 ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
5421 + .inversed = 1,
5422 + .health = false,
5423 + },
5424 +@@ -335,7 +324,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
5425 + .aggr_mask = MLXPLAT_CPLD_AGGR_PWR_MASK_DEF,
5426 + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
5427 + .mask = MLXPLAT_CPLD_PWR_MASK,
5428 +- .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr),
5429 ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
5430 + .inversed = 0,
5431 + .health = false,
5432 + },
5433 +@@ -344,7 +333,7 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = {
5434 + .aggr_mask = MLXPLAT_CPLD_AGGR_FAN_MASK_DEF,
5435 + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
5436 + .mask = MLXPLAT_CPLD_FAN_MASK,
5437 +- .count = ARRAY_SIZE(mlxplat_mlxcpld_fan),
5438 ++ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_fan_items_data),
5439 + .inversed = 1,
5440 + .health = false,
5441 + },
5442 +@@ -422,15 +411,13 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn274x_psu_items_data[] = {
5443 + .label = "psu1",
5444 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
5445 + .mask = BIT(0),
5446 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[0],
5447 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
5448 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
5449 + },
5450 + {
5451 + .label = "psu2",
5452 + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
5453 + .mask = BIT(1),
5454 +- .hpdev.brdinfo = &mlxplat_mlxcpld_psu[1],
5455 +- .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
5456 ++ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
5457 + },
5458 + };
5459 +
5460 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
5461 +index 8cc01857bc5c0..a6e69f2495d23 100644
5462 +--- a/drivers/platform/x86/thinkpad_acpi.c
5463 ++++ b/drivers/platform/x86/thinkpad_acpi.c
5464 +@@ -3242,7 +3242,14 @@ static int hotkey_init_tablet_mode(void)
5465 +
5466 + in_tablet_mode = hotkey_gmms_get_tablet_mode(res,
5467 + &has_tablet_mode);
5468 +- if (has_tablet_mode)
5469 ++ /*
5470 ++ * The Yoga 11e series has 2 accelerometers described by a
5471 ++ * BOSC0200 ACPI node. This setup relies on a Windows service
5472 ++ * which calls special ACPI methods on this node to report
5473 ++ * the laptop/tent/tablet mode to the EC. The bmc150 iio driver
5474 ++ * does not support this, so skip the hotkey on these models.
5475 ++ */
5476 ++ if (has_tablet_mode && !acpi_dev_present("BOSC0200", "1", -1))
5477 + tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS;
5478 + type = "GMMS";
5479 + } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) {
5480 +@@ -9690,6 +9697,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
5481 + TPACPI_Q_LNV3('R', '0', 'B', true), /* Thinkpad 11e gen 3 */
5482 + TPACPI_Q_LNV3('R', '0', 'C', true), /* Thinkpad 13 */
5483 + TPACPI_Q_LNV3('R', '0', 'J', true), /* Thinkpad 13 gen 2 */
5484 ++ TPACPI_Q_LNV3('R', '0', 'K', true), /* Thinkpad 11e gen 4 celeron BIOS */
5485 + };
5486 +
5487 + static int __init tpacpi_battery_init(struct ibm_init_struct *ibm)
5488 +diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
5489 +index 46eb7716c35c8..84106a9836c8f 100644
5490 +--- a/drivers/power/supply/axp288_charger.c
5491 ++++ b/drivers/power/supply/axp288_charger.c
5492 +@@ -555,14 +555,15 @@ out:
5493 +
5494 + /*
5495 + * The HP Pavilion x2 10 series comes in a number of variants:
5496 +- * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
5497 +- * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
5498 +- * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
5499 ++ * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021"
5500 ++ * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D"
5501 ++ * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E"
5502 ++ * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4"
5503 + *
5504 +- * The variants with the AXP288 PMIC are all kinds of special:
5505 ++ * The variants with the AXP288 + Type-C connector are all kinds of special:
5506 + *
5507 +- * 1. All variants use a Type-C connector which the AXP288 does not support, so
5508 +- * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
5509 ++ * 1. They use a Type-C connector which the AXP288 does not support, so when
5510 ++ * using a Type-C charger it is not recognized. Unlike most AXP288 devices,
5511 + * this model actually has mostly working ACPI AC / Battery code, the ACPI code
5512 + * "solves" this by simply setting the input_current_limit to 3A.
5513 + * There are still some issues with the ACPI code, so we use this native driver,
5514 +@@ -585,12 +586,17 @@ out:
5515 + */
5516 + static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
5517 + {
5518 +- /*
5519 +- * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
5520 +- * Trail model has "HP", so we only match on product_name.
5521 +- */
5522 + .matches = {
5523 +- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
5524 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
5525 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
5526 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"),
5527 ++ },
5528 ++ },
5529 ++ {
5530 ++ .matches = {
5531 ++ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"),
5532 ++ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
5533 ++ DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"),
5534 + },
5535 + },
5536 + {} /* Terminating entry */
5537 +diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
5538 +index b58df04d03b33..863208928cf0b 100644
5539 +--- a/drivers/power/supply/bq24190_charger.c
5540 ++++ b/drivers/power/supply/bq24190_charger.c
5541 +@@ -446,8 +446,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
5542 + return -EINVAL;
5543 +
5544 + ret = pm_runtime_get_sync(bdi->dev);
5545 +- if (ret < 0)
5546 ++ if (ret < 0) {
5547 ++ pm_runtime_put_noidle(bdi->dev);
5548 + return ret;
5549 ++ }
5550 +
5551 + ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
5552 + if (ret)
5553 +@@ -1092,8 +1094,10 @@ static int bq24190_charger_get_property(struct power_supply *psy,
5554 + dev_dbg(bdi->dev, "prop: %d\n", psp);
5555 +
5556 + ret = pm_runtime_get_sync(bdi->dev);
5557 +- if (ret < 0)
5558 ++ if (ret < 0) {
5559 ++ pm_runtime_put_noidle(bdi->dev);
5560 + return ret;
5561 ++ }
5562 +
5563 + switch (psp) {
5564 + case POWER_SUPPLY_PROP_CHARGE_TYPE:
5565 +@@ -1164,8 +1168,10 @@ static int bq24190_charger_set_property(struct power_supply *psy,
5566 + dev_dbg(bdi->dev, "prop: %d\n", psp);
5567 +
5568 + ret = pm_runtime_get_sync(bdi->dev);
5569 +- if (ret < 0)
5570 ++ if (ret < 0) {
5571 ++ pm_runtime_put_noidle(bdi->dev);
5572 + return ret;
5573 ++ }
5574 +
5575 + switch (psp) {
5576 + case POWER_SUPPLY_PROP_ONLINE:
5577 +@@ -1425,8 +1431,10 @@ static int bq24190_battery_get_property(struct power_supply *psy,
5578 + dev_dbg(bdi->dev, "prop: %d\n", psp);
5579 +
5580 + ret = pm_runtime_get_sync(bdi->dev);
5581 +- if (ret < 0)
5582 ++ if (ret < 0) {
5583 ++ pm_runtime_put_noidle(bdi->dev);
5584 + return ret;
5585 ++ }
5586 +
5587 + switch (psp) {
5588 + case POWER_SUPPLY_PROP_STATUS:
5589 +@@ -1471,8 +1479,10 @@ static int bq24190_battery_set_property(struct power_supply *psy,
5590 + dev_dbg(bdi->dev, "prop: %d\n", psp);
5591 +
5592 + ret = pm_runtime_get_sync(bdi->dev);
5593 +- if (ret < 0)
5594 ++ if (ret < 0) {
5595 ++ pm_runtime_put_noidle(bdi->dev);
5596 + return ret;
5597 ++ }
5598 +
5599 + switch (psp) {
5600 + case POWER_SUPPLY_PROP_ONLINE:
5601 +diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
5602 +index 8c3f5adf1bc65..2d76183756626 100644
5603 +--- a/drivers/ps3/ps3stor_lib.c
5604 ++++ b/drivers/ps3/ps3stor_lib.c
5605 +@@ -201,7 +201,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
5606 + dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
5607 + dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
5608 + dev->bounce_size, DMA_BIDIRECTIONAL);
5609 +- if (!dev->bounce_dma) {
5610 ++ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) {
5611 + dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
5612 + __func__, __LINE__);
5613 + error = -ENODEV;
5614 +diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
5615 +index 15b40a8bc4fbb..5055ba2c6c940 100644
5616 +--- a/drivers/pwm/pwm-lp3943.c
5617 ++++ b/drivers/pwm/pwm-lp3943.c
5618 +@@ -278,6 +278,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
5619 + lp3943_pwm->chip.dev = &pdev->dev;
5620 + lp3943_pwm->chip.ops = &lp3943_pwm_ops;
5621 + lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
5622 ++ lp3943_pwm->chip.base = -1;
5623 +
5624 + platform_set_drvdata(pdev, lp3943_pwm);
5625 +
5626 +diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
5627 +index 5d27c16edfb13..0d4112410b69d 100644
5628 +--- a/drivers/pwm/pwm-zx.c
5629 ++++ b/drivers/pwm/pwm-zx.c
5630 +@@ -241,6 +241,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
5631 + ret = pwmchip_add(&zpc->chip);
5632 + if (ret < 0) {
5633 + dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
5634 ++ clk_disable_unprepare(zpc->pclk);
5635 + return ret;
5636 + }
5637 +
5638 +diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
5639 +index 99f86612f7751..dc78a523a69f2 100644
5640 +--- a/drivers/s390/block/dasd_alias.c
5641 ++++ b/drivers/s390/block/dasd_alias.c
5642 +@@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
5643 + return;
5644 + device->discipline->get_uid(device, &uid);
5645 + spin_lock_irqsave(&lcu->lock, flags);
5646 +- list_del_init(&device->alias_list);
5647 + /* make sure that the workers don't use this device */
5648 + if (device == lcu->suc_data.device) {
5649 + spin_unlock_irqrestore(&lcu->lock, flags);
5650 +@@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
5651 +
5652 + spin_lock_irqsave(&aliastree.lock, flags);
5653 + spin_lock(&lcu->lock);
5654 ++ list_del_init(&device->alias_list);
5655 + if (list_empty(&lcu->grouplist) &&
5656 + list_empty(&lcu->active_devices) &&
5657 + list_empty(&lcu->inactive_devices)) {
5658 +@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
5659 + spin_unlock_irqrestore(&lcu->lock, flags);
5660 +
5661 + rc = dasd_sleep_on(cqr);
5662 +- if (rc && !suborder_not_supported(cqr)) {
5663 ++ if (!rc)
5664 ++ goto out;
5665 ++
5666 ++ if (suborder_not_supported(cqr)) {
5667 ++ /* suborder not supported or device unusable for IO */
5668 ++ rc = -EOPNOTSUPP;
5669 ++ } else {
5670 ++ /* IO failed but should be retried */
5671 + spin_lock_irqsave(&lcu->lock, flags);
5672 + lcu->flags |= NEED_UAC_UPDATE;
5673 + spin_unlock_irqrestore(&lcu->lock, flags);
5674 + }
5675 ++out:
5676 + dasd_sfree_request(cqr, cqr->memdev);
5677 + return rc;
5678 + }
5679 +@@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
5680 + return rc;
5681 +
5682 + spin_lock_irqsave(&lcu->lock, flags);
5683 ++ /*
5684 ++ * there is another update needed skip the remaining handling
5685 ++ * the data might already be outdated
5686 ++ * but especially do not add the device to an LCU with pending
5687 ++ * update
5688 ++ */
5689 ++ if (lcu->flags & NEED_UAC_UPDATE)
5690 ++ goto out;
5691 + lcu->pav = NO_PAV;
5692 + for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
5693 + switch (lcu->uac->unit[i].ua_type) {
5694 +@@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
5695 + alias_list) {
5696 + _add_device_to_lcu(lcu, device, refdev);
5697 + }
5698 ++out:
5699 + spin_unlock_irqrestore(&lcu->lock, flags);
5700 + return 0;
5701 + }
5702 +@@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device)
5703 + }
5704 + if (lcu->flags & UPDATE_PENDING) {
5705 + list_move(&device->alias_list, &lcu->active_devices);
5706 ++ private->pavgroup = NULL;
5707 + _schedule_lcu_update(lcu, device);
5708 + }
5709 + spin_unlock_irqrestore(&lcu->lock, flags);
5710 +diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
5711 +index 6221a8372cee2..3660059784f74 100644
5712 +--- a/drivers/scsi/be2iscsi/be_main.c
5713 ++++ b/drivers/scsi/be2iscsi/be_main.c
5714 +@@ -3039,7 +3039,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
5715 + goto create_eq_error;
5716 + }
5717 +
5718 +- mem->dma = paddr;
5719 + mem->va = eq_vaddress;
5720 + ret = be_fill_queue(eq, phba->params.num_eq_entries,
5721 + sizeof(struct be_eq_entry), eq_vaddress);
5722 +@@ -3049,6 +3048,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
5723 + goto create_eq_error;
5724 + }
5725 +
5726 ++ mem->dma = paddr;
5727 + ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
5728 + BEISCSI_EQ_DELAY_DEF);
5729 + if (ret) {
5730 +@@ -3105,7 +3105,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
5731 + goto create_cq_error;
5732 + }
5733 +
5734 +- mem->dma = paddr;
5735 + ret = be_fill_queue(cq, phba->params.num_cq_entries,
5736 + sizeof(struct sol_cqe), cq_vaddress);
5737 + if (ret) {
5738 +@@ -3115,6 +3114,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
5739 + goto create_cq_error;
5740 + }
5741 +
5742 ++ mem->dma = paddr;
5743 + ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
5744 + false, 0);
5745 + if (ret) {
5746 +diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
5747 +index ba30ff86d5818..b27a3738d940c 100644
5748 +--- a/drivers/scsi/bnx2i/Kconfig
5749 ++++ b/drivers/scsi/bnx2i/Kconfig
5750 +@@ -3,6 +3,7 @@ config SCSI_BNX2_ISCSI
5751 + depends on NET
5752 + depends on PCI
5753 + depends on (IPV6 || IPV6=n)
5754 ++ depends on MMU
5755 + select SCSI_ISCSI_ATTRS
5756 + select NETDEVICES
5757 + select ETHERNET
5758 +diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
5759 +index e52599f441707..bc5dbe3bae5c5 100644
5760 +--- a/drivers/scsi/fnic/fnic_main.c
5761 ++++ b/drivers/scsi/fnic/fnic_main.c
5762 +@@ -746,6 +746,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
5763 + for (i = 0; i < FNIC_IO_LOCKS; i++)
5764 + spin_lock_init(&fnic->io_req_lock[i]);
5765 +
5766 ++ err = -ENOMEM;
5767 + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
5768 + if (!fnic->io_req_pool)
5769 + goto err_out_free_resources;
5770 +diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
5771 +index 9c22a2c93462e..fa758f9b82e09 100644
5772 +--- a/drivers/scsi/lpfc/lpfc_mem.c
5773 ++++ b/drivers/scsi/lpfc/lpfc_mem.c
5774 +@@ -560,8 +560,6 @@ lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
5775 + * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
5776 + * pool along a non-DMA-mapped container for it.
5777 + *
5778 +- * Notes: Not interrupt-safe. Must be called with no locks held.
5779 +- *
5780 + * Returns:
5781 + * pointer to HBQ on success
5782 + * NULL on failure
5783 +@@ -631,7 +629,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
5784 + {
5785 + struct rqb_dmabuf *dma_buf;
5786 +
5787 +- dma_buf = kzalloc(sizeof(struct rqb_dmabuf), GFP_KERNEL);
5788 ++ dma_buf = kzalloc(sizeof(*dma_buf), GFP_KERNEL);
5789 + if (!dma_buf)
5790 + return NULL;
5791 +
5792 +@@ -754,7 +752,6 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
5793 + drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys);
5794 + rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe);
5795 + if (rc < 0) {
5796 +- (rqbp->rqb_free_buffer)(phba, rqb_entry);
5797 + lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5798 + "6409 Cannot post to HRQ %d: %x %x %x "
5799 + "DRQ %x %x\n",
5800 +@@ -764,6 +761,7 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp)
5801 + rqb_entry->hrq->entry_count,
5802 + rqb_entry->drq->host_index,
5803 + rqb_entry->drq->hba_index);
5804 ++ (rqbp->rqb_free_buffer)(phba, rqb_entry);
5805 + } else {
5806 + list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list);
5807 + rqbp->buffer_count++;
5808 +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
5809 +index 2ab351260e815..a7b14875af5fa 100644
5810 +--- a/drivers/scsi/lpfc/lpfc_sli.c
5811 ++++ b/drivers/scsi/lpfc/lpfc_sli.c
5812 +@@ -6755,12 +6755,16 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
5813 + struct rqb_dmabuf *rqb_buffer;
5814 + LIST_HEAD(rqb_buf_list);
5815 +
5816 +- spin_lock_irqsave(&phba->hbalock, flags);
5817 + rqbp = hrq->rqbp;
5818 + for (i = 0; i < count; i++) {
5819 ++ spin_lock_irqsave(&phba->hbalock, flags);
5820 + /* IF RQ is already full, don't bother */
5821 +- if (rqbp->buffer_count + i >= rqbp->entry_count - 1)
5822 ++ if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
5823 ++ spin_unlock_irqrestore(&phba->hbalock, flags);
5824 + break;
5825 ++ }
5826 ++ spin_unlock_irqrestore(&phba->hbalock, flags);
5827 ++
5828 + rqb_buffer = rqbp->rqb_alloc_buffer(phba);
5829 + if (!rqb_buffer)
5830 + break;
5831 +@@ -6769,6 +6773,8 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
5832 + rqb_buffer->idx = idx;
5833 + list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
5834 + }
5835 ++
5836 ++ spin_lock_irqsave(&phba->hbalock, flags);
5837 + while (!list_empty(&rqb_buf_list)) {
5838 + list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
5839 + hbuf.list);
5840 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5841 +index 21f971447dd8c..83d25ee88f028 100644
5842 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
5843 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5844 +@@ -7192,7 +7192,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5845 + int error = 0, i;
5846 + void *sense = NULL;
5847 + dma_addr_t sense_handle;
5848 +- unsigned long *sense_ptr;
5849 ++ void *sense_ptr;
5850 + u32 opcode = 0;
5851 +
5852 + memset(kbuff_arr, 0, sizeof(kbuff_arr));
5853 +@@ -7309,6 +7309,13 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5854 + }
5855 +
5856 + if (ioc->sense_len) {
5857 ++ /* make sure the pointer is part of the frame */
5858 ++ if (ioc->sense_off >
5859 ++ (sizeof(union megasas_frame) - sizeof(__le64))) {
5860 ++ error = -EINVAL;
5861 ++ goto out;
5862 ++ }
5863 ++
5864 + sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
5865 + &sense_handle, GFP_KERNEL);
5866 + if (!sense) {
5867 +@@ -7316,12 +7323,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
5868 + goto out;
5869 + }
5870 +
5871 +- sense_ptr =
5872 +- (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off);
5873 ++ sense_ptr = (void *)cmd->frame + ioc->sense_off;
5874 + if (instance->consistent_mask_64bit)
5875 +- *sense_ptr = cpu_to_le64(sense_handle);
5876 ++ put_unaligned_le64(sense_handle, sense_ptr);
5877 + else
5878 +- *sense_ptr = cpu_to_le32(sense_handle);
5879 ++ put_unaligned_le32(sense_handle, sense_ptr);
5880 + }
5881 +
5882 + /*
5883 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
5884 +index 9fbe20e38ad07..07959047d4dc4 100644
5885 +--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
5886 ++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
5887 +@@ -5771,7 +5771,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
5888 +
5889 + r = _base_handshake_req_reply_wait(ioc,
5890 + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
5891 +- sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
5892 ++ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 30);
5893 +
5894 + if (r != 0) {
5895 + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
5896 +diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
5897 +index 7a697ca68501e..1d59d7447a1c8 100644
5898 +--- a/drivers/scsi/pm8001/pm8001_init.c
5899 ++++ b/drivers/scsi/pm8001/pm8001_init.c
5900 +@@ -1059,7 +1059,8 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
5901 +
5902 + pm8001_init_sas_add(pm8001_ha);
5903 + /* phy setting support for motherboard controller */
5904 +- if (pm8001_configure_phy_settings(pm8001_ha))
5905 ++ rc = pm8001_configure_phy_settings(pm8001_ha);
5906 ++ if (rc)
5907 + goto err_out_shost;
5908 +
5909 + pm8001_post_sas_ha_init(shost, chip);
5910 +diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
5911 +index 763c7628356b1..eaa50328de90c 100644
5912 +--- a/drivers/scsi/qedi/qedi_main.c
5913 ++++ b/drivers/scsi/qedi/qedi_main.c
5914 +@@ -2580,7 +2580,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
5915 + QEDI_ERR(&qedi->dbg_ctx,
5916 + "Unable to start offload thread!\n");
5917 + rc = -ENODEV;
5918 +- goto free_cid_que;
5919 ++ goto free_tmf_thread;
5920 + }
5921 +
5922 + /* F/w needs 1st task context memory entry for performance */
5923 +@@ -2600,6 +2600,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
5924 +
5925 + return 0;
5926 +
5927 ++free_tmf_thread:
5928 ++ destroy_workqueue(qedi->tmf_thread);
5929 + free_cid_que:
5930 + qedi_release_cid_que(qedi);
5931 + free_uio:
5932 +diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
5933 +index c501fb5190a38..fe5ae2b221c19 100644
5934 +--- a/drivers/scsi/scsi_lib.c
5935 ++++ b/drivers/scsi/scsi_lib.c
5936 +@@ -3446,6 +3446,78 @@ void sdev_enable_disk_events(struct scsi_device *sdev)
5937 + }
5938 + EXPORT_SYMBOL(sdev_enable_disk_events);
5939 +
5940 ++static unsigned char designator_prio(const unsigned char *d)
5941 ++{
5942 ++ if (d[1] & 0x30)
5943 ++ /* not associated with LUN */
5944 ++ return 0;
5945 ++
5946 ++ if (d[3] == 0)
5947 ++ /* invalid length */
5948 ++ return 0;
5949 ++
5950 ++ /*
5951 ++ * Order of preference for lun descriptor:
5952 ++ * - SCSI name string
5953 ++ * - NAA IEEE Registered Extended
5954 ++ * - EUI-64 based 16-byte
5955 ++ * - EUI-64 based 12-byte
5956 ++ * - NAA IEEE Registered
5957 ++ * - NAA IEEE Extended
5958 ++ * - EUI-64 based 8-byte
5959 ++ * - SCSI name string (truncated)
5960 ++ * - T10 Vendor ID
5961 ++ * as longer descriptors reduce the likelyhood
5962 ++ * of identification clashes.
5963 ++ */
5964 ++
5965 ++ switch (d[1] & 0xf) {
5966 ++ case 8:
5967 ++ /* SCSI name string, variable-length UTF-8 */
5968 ++ return 9;
5969 ++ case 3:
5970 ++ switch (d[4] >> 4) {
5971 ++ case 6:
5972 ++ /* NAA registered extended */
5973 ++ return 8;
5974 ++ case 5:
5975 ++ /* NAA registered */
5976 ++ return 5;
5977 ++ case 4:
5978 ++ /* NAA extended */
5979 ++ return 4;
5980 ++ case 3:
5981 ++ /* NAA locally assigned */
5982 ++ return 1;
5983 ++ default:
5984 ++ break;
5985 ++ }
5986 ++ break;
5987 ++ case 2:
5988 ++ switch (d[3]) {
5989 ++ case 16:
5990 ++ /* EUI64-based, 16 byte */
5991 ++ return 7;
5992 ++ case 12:
5993 ++ /* EUI64-based, 12 byte */
5994 ++ return 6;
5995 ++ case 8:
5996 ++ /* EUI64-based, 8 byte */
5997 ++ return 3;
5998 ++ default:
5999 ++ break;
6000 ++ }
6001 ++ break;
6002 ++ case 1:
6003 ++ /* T10 vendor ID */
6004 ++ return 1;
6005 ++ default:
6006 ++ break;
6007 ++ }
6008 ++
6009 ++ return 0;
6010 ++}
6011 ++
6012 + /**
6013 + * scsi_vpd_lun_id - return a unique device identification
6014 + * @sdev: SCSI device
6015 +@@ -3462,7 +3534,7 @@ EXPORT_SYMBOL(sdev_enable_disk_events);
6016 + */
6017 + int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
6018 + {
6019 +- u8 cur_id_type = 0xff;
6020 ++ u8 cur_id_prio = 0;
6021 + u8 cur_id_size = 0;
6022 + const unsigned char *d, *cur_id_str;
6023 + const struct scsi_vpd *vpd_pg83;
6024 +@@ -3475,20 +3547,6 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
6025 + return -ENXIO;
6026 + }
6027 +
6028 +- /*
6029 +- * Look for the correct descriptor.
6030 +- * Order of preference for lun descriptor:
6031 +- * - SCSI name string
6032 +- * - NAA IEEE Registered Extended
6033 +- * - EUI-64 based 16-byte
6034 +- * - EUI-64 based 12-byte
6035 +- * - NAA IEEE Registered
6036 +- * - NAA IEEE Extended
6037 +- * - T10 Vendor ID
6038 +- * as longer descriptors reduce the likelyhood
6039 +- * of identification clashes.
6040 +- */
6041 +-
6042 + /* The id string must be at least 20 bytes + terminating NULL byte */
6043 + if (id_len < 21) {
6044 + rcu_read_unlock();
6045 +@@ -3498,8 +3556,9 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
6046 + memset(id, 0, id_len);
6047 + d = vpd_pg83->data + 4;
6048 + while (d < vpd_pg83->data + vpd_pg83->len) {
6049 +- /* Skip designators not referring to the LUN */
6050 +- if ((d[1] & 0x30) != 0x00)
6051 ++ u8 prio = designator_prio(d);
6052 ++
6053 ++ if (prio == 0 || cur_id_prio > prio)
6054 + goto next_desig;
6055 +
6056 + switch (d[1] & 0xf) {
6057 +@@ -3507,28 +3566,19 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
6058 + /* T10 Vendor ID */
6059 + if (cur_id_size > d[3])
6060 + break;
6061 +- /* Prefer anything */
6062 +- if (cur_id_type > 0x01 && cur_id_type != 0xff)
6063 +- break;
6064 ++ cur_id_prio = prio;
6065 + cur_id_size = d[3];
6066 + if (cur_id_size + 4 > id_len)
6067 + cur_id_size = id_len - 4;
6068 + cur_id_str = d + 4;
6069 +- cur_id_type = d[1] & 0xf;
6070 + id_size = snprintf(id, id_len, "t10.%*pE",
6071 + cur_id_size, cur_id_str);
6072 + break;
6073 + case 0x2:
6074 + /* EUI-64 */
6075 +- if (cur_id_size > d[3])
6076 +- break;
6077 +- /* Prefer NAA IEEE Registered Extended */
6078 +- if (cur_id_type == 0x3 &&
6079 +- cur_id_size == d[3])
6080 +- break;
6081 ++ cur_id_prio = prio;
6082 + cur_id_size = d[3];
6083 + cur_id_str = d + 4;
6084 +- cur_id_type = d[1] & 0xf;
6085 + switch (cur_id_size) {
6086 + case 8:
6087 + id_size = snprintf(id, id_len,
6088 +@@ -3546,17 +3596,14 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
6089 + cur_id_str);
6090 + break;
6091 + default:
6092 +- cur_id_size = 0;
6093 + break;
6094 + }
6095 + break;
6096 + case 0x3:
6097 + /* NAA */
6098 +- if (cur_id_size > d[3])
6099 +- break;
6100 ++ cur_id_prio = prio;
6101 + cur_id_size = d[3];
6102 + cur_id_str = d + 4;
6103 +- cur_id_type = d[1] & 0xf;
6104 + switch (cur_id_size) {
6105 + case 8:
6106 + id_size = snprintf(id, id_len,
6107 +@@ -3569,26 +3616,25 @@ int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len)
6108 + cur_id_str);
6109 + break;
6110 + default:
6111 +- cur_id_size = 0;
6112 + break;
6113 + }
6114 + break;
6115 + case 0x8:
6116 + /* SCSI name string */
6117 +- if (cur_id_size + 4 > d[3])
6118 ++ if (cur_id_size > d[3])
6119 + break;
6120 + /* Prefer others for truncated descriptor */
6121 +- if (cur_id_size && d[3] > id_len)
6122 +- break;
6123 ++ if (d[3] > id_len) {
6124 ++ prio = 2;
6125 ++ if (cur_id_prio > prio)
6126 ++ break;
6127 ++ }
6128 ++ cur_id_prio = prio;
6129 + cur_id_size = id_size = d[3];
6130 + cur_id_str = d + 4;
6131 +- cur_id_type = d[1] & 0xf;
6132 + if (cur_id_size >= id_len)
6133 + cur_id_size = id_len - 1;
6134 + memcpy(id, cur_id_str, cur_id_size);
6135 +- /* Decrease priority for truncated descriptor */
6136 +- if (cur_id_size != id_size)
6137 +- cur_id_size = 6;
6138 + break;
6139 + default:
6140 + break;
6141 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
6142 +index 7e4e6e982055e..61b1eae42ea85 100644
6143 +--- a/drivers/scsi/ufs/ufshcd.c
6144 ++++ b/drivers/scsi/ufs/ufshcd.c
6145 +@@ -1281,8 +1281,15 @@ static int ufshcd_devfreq_target(struct device *dev,
6146 + }
6147 + spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
6148 +
6149 ++ pm_runtime_get_noresume(hba->dev);
6150 ++ if (!pm_runtime_active(hba->dev)) {
6151 ++ pm_runtime_put_noidle(hba->dev);
6152 ++ ret = -EAGAIN;
6153 ++ goto out;
6154 ++ }
6155 + start = ktime_get();
6156 + ret = ufshcd_devfreq_scale(hba, scale_up);
6157 ++ pm_runtime_put(hba->dev);
6158 +
6159 + trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
6160 + (scale_up ? "up" : "down"),
6161 +diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
6162 +index 522a87fc573a6..44021620d1013 100644
6163 +--- a/drivers/slimbus/qcom-ngd-ctrl.c
6164 ++++ b/drivers/slimbus/qcom-ngd-ctrl.c
6165 +@@ -1200,6 +1200,9 @@ static int qcom_slim_ngd_runtime_resume(struct device *dev)
6166 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
6167 + int ret = 0;
6168 +
6169 ++ if (!ctrl->qmi.handle)
6170 ++ return 0;
6171 ++
6172 + if (ctrl->state >= QCOM_SLIM_NGD_CTRL_ASLEEP)
6173 + ret = qcom_slim_ngd_power_up(ctrl);
6174 + if (ret) {
6175 +@@ -1493,6 +1496,9 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
6176 + struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
6177 + int ret = 0;
6178 +
6179 ++ if (!ctrl->qmi.handle)
6180 ++ return 0;
6181 ++
6182 + ret = qcom_slim_qmi_power_request(ctrl, false);
6183 + if (ret && ret != -EBUSY)
6184 + dev_info(ctrl->dev, "slim resource not idle:%d\n", ret);
6185 +diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
6186 +index b60b77bfaffae..ea6f8904c01b5 100644
6187 +--- a/drivers/soc/fsl/dpio/dpio-driver.c
6188 ++++ b/drivers/soc/fsl/dpio/dpio-driver.c
6189 +@@ -53,7 +53,6 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
6190 + struct dpio_priv *priv;
6191 + int error;
6192 + struct fsl_mc_device_irq *irq;
6193 +- cpumask_t mask;
6194 +
6195 + priv = dev_get_drvdata(&dpio_dev->dev);
6196 +
6197 +@@ -72,9 +71,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
6198 + }
6199 +
6200 + /* set the affinity hint */
6201 +- cpumask_clear(&mask);
6202 +- cpumask_set_cpu(cpu, &mask);
6203 +- if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
6204 ++ if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
6205 + dev_err(&dpio_dev->dev,
6206 + "irq_set_affinity failed irq %d cpu %d\n",
6207 + irq->msi_desc->irq, cpu);
6208 +diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
6209 +index 5b24bb4bfbf66..ef54f1638d207 100644
6210 +--- a/drivers/soc/mediatek/mtk-scpsys.c
6211 ++++ b/drivers/soc/mediatek/mtk-scpsys.c
6212 +@@ -454,6 +454,7 @@ static void mtk_register_power_domains(struct platform_device *pdev,
6213 + for (i = 0; i < num; i++) {
6214 + struct scp_domain *scpd = &scp->domains[i];
6215 + struct generic_pm_domain *genpd = &scpd->genpd;
6216 ++ bool on;
6217 +
6218 + /*
6219 + * Initially turn on all domains to make the domains usable
6220 +@@ -461,9 +462,9 @@ static void mtk_register_power_domains(struct platform_device *pdev,
6221 + * software. The unused domains will be switched off during
6222 + * late_init time.
6223 + */
6224 +- genpd->power_on(genpd);
6225 ++ on = !WARN_ON(genpd->power_on(genpd) < 0);
6226 +
6227 +- pm_genpd_init(genpd, NULL, false);
6228 ++ pm_genpd_init(genpd, NULL, !on);
6229 + }
6230 +
6231 + /*
6232 +diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
6233 +index ee89ffb6dde84..7369b061929bb 100644
6234 +--- a/drivers/soc/qcom/qcom-geni-se.c
6235 ++++ b/drivers/soc/qcom/qcom-geni-se.c
6236 +@@ -275,6 +275,7 @@ static void geni_se_select_fifo_mode(struct geni_se *se)
6237 +
6238 + static void geni_se_select_dma_mode(struct geni_se *se)
6239 + {
6240 ++ u32 proto = geni_se_read_proto(se);
6241 + u32 val;
6242 +
6243 + writel_relaxed(0, se->base + SE_GSI_EVENT_EN);
6244 +@@ -284,6 +285,18 @@ static void geni_se_select_dma_mode(struct geni_se *se)
6245 + writel_relaxed(0xffffffff, se->base + SE_DMA_RX_IRQ_CLR);
6246 + writel_relaxed(0xffffffff, se->base + SE_IRQ_EN);
6247 +
6248 ++ val = readl_relaxed(se->base + SE_GENI_M_IRQ_EN);
6249 ++ if (proto != GENI_SE_UART) {
6250 ++ val &= ~(M_CMD_DONE_EN | M_TX_FIFO_WATERMARK_EN);
6251 ++ val &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
6252 ++ }
6253 ++ writel_relaxed(val, se->base + SE_GENI_M_IRQ_EN);
6254 ++
6255 ++ val = readl_relaxed(se->base + SE_GENI_S_IRQ_EN);
6256 ++ if (proto != GENI_SE_UART)
6257 ++ val &= ~S_CMD_DONE_EN;
6258 ++ writel_relaxed(val, se->base + SE_GENI_S_IRQ_EN);
6259 ++
6260 + val = readl_relaxed(se->base + SE_GENI_DMA_MODE_EN);
6261 + val |= GENI_DMA_MODE_EN;
6262 + writel_relaxed(val, se->base + SE_GENI_DMA_MODE_EN);
6263 +@@ -633,7 +646,7 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
6264 + writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
6265 + writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
6266 + writel_relaxed(GENI_SE_DMA_EOT_BUF, se->base + SE_DMA_TX_ATTR);
6267 +- writel_relaxed(len, se->base + SE_DMA_TX_LEN);
6268 ++ writel(len, se->base + SE_DMA_TX_LEN);
6269 + return 0;
6270 + }
6271 + EXPORT_SYMBOL(geni_se_tx_dma_prep);
6272 +@@ -667,7 +680,7 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
6273 + writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
6274 + /* RX does not have EOT buffer type bit. So just reset RX_ATTR */
6275 + writel_relaxed(0, se->base + SE_DMA_RX_ATTR);
6276 +- writel_relaxed(len, se->base + SE_DMA_RX_LEN);
6277 ++ writel(len, se->base + SE_DMA_RX_LEN);
6278 + return 0;
6279 + }
6280 + EXPORT_SYMBOL(geni_se_rx_dma_prep);
6281 +diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
6282 +index c22503cd1edf9..7908e7f2850f9 100644
6283 +--- a/drivers/soc/qcom/smp2p.c
6284 ++++ b/drivers/soc/qcom/smp2p.c
6285 +@@ -326,15 +326,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
6286 + static int smp2p_update_bits(void *data, u32 mask, u32 value)
6287 + {
6288 + struct smp2p_entry *entry = data;
6289 ++ unsigned long flags;
6290 + u32 orig;
6291 + u32 val;
6292 +
6293 +- spin_lock(&entry->lock);
6294 ++ spin_lock_irqsave(&entry->lock, flags);
6295 + val = orig = readl(entry->value);
6296 + val &= ~mask;
6297 + val |= value;
6298 + writel(val, entry->value);
6299 +- spin_unlock(&entry->lock);
6300 ++ spin_unlock_irqrestore(&entry->lock, flags);
6301 +
6302 + if (val != orig)
6303 + qcom_smp2p_kick(entry->smp2p);
6304 +diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
6305 +index 5373f4c16b54c..4403b89561fd6 100644
6306 +--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
6307 ++++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
6308 +@@ -105,7 +105,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
6309 + unsigned int i;
6310 +
6311 + for (i = 0; i < num; i++)
6312 +- if (value < speedos[num])
6313 ++ if (value < speedos[i])
6314 + return i;
6315 +
6316 + return -EINVAL;
6317 +diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
6318 +index 224d7ddeeb767..eb2e87229c1da 100644
6319 +--- a/drivers/soc/ti/knav_dma.c
6320 ++++ b/drivers/soc/ti/knav_dma.c
6321 +@@ -759,8 +759,9 @@ static int knav_dma_probe(struct platform_device *pdev)
6322 + pm_runtime_enable(kdev->dev);
6323 + ret = pm_runtime_get_sync(kdev->dev);
6324 + if (ret < 0) {
6325 ++ pm_runtime_put_noidle(kdev->dev);
6326 + dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
6327 +- return ret;
6328 ++ goto err_pm_disable;
6329 + }
6330 +
6331 + /* Initialise all packet dmas */
6332 +@@ -774,7 +775,8 @@ static int knav_dma_probe(struct platform_device *pdev)
6333 +
6334 + if (list_empty(&kdev->list)) {
6335 + dev_err(dev, "no valid dma instance\n");
6336 +- return -ENODEV;
6337 ++ ret = -ENODEV;
6338 ++ goto err_put_sync;
6339 + }
6340 +
6341 + debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
6342 +@@ -782,6 +784,13 @@ static int knav_dma_probe(struct platform_device *pdev)
6343 +
6344 + device_ready = true;
6345 + return ret;
6346 ++
6347 ++err_put_sync:
6348 ++ pm_runtime_put_sync(kdev->dev);
6349 ++err_pm_disable:
6350 ++ pm_runtime_disable(kdev->dev);
6351 ++
6352 ++ return ret;
6353 + }
6354 +
6355 + static int knav_dma_remove(struct platform_device *pdev)
6356 +diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
6357 +index ef36acc0e7088..9f5ce52e6c161 100644
6358 +--- a/drivers/soc/ti/knav_qmss_queue.c
6359 ++++ b/drivers/soc/ti/knav_qmss_queue.c
6360 +@@ -1799,6 +1799,7 @@ static int knav_queue_probe(struct platform_device *pdev)
6361 + pm_runtime_enable(&pdev->dev);
6362 + ret = pm_runtime_get_sync(&pdev->dev);
6363 + if (ret < 0) {
6364 ++ pm_runtime_put_noidle(&pdev->dev);
6365 + dev_err(dev, "Failed to enable QMSS\n");
6366 + return ret;
6367 + }
6368 +@@ -1866,9 +1867,10 @@ static int knav_queue_probe(struct platform_device *pdev)
6369 + if (ret)
6370 + goto err;
6371 +
6372 +- regions = of_get_child_by_name(node, "descriptor-regions");
6373 ++ regions = of_get_child_by_name(node, "descriptor-regions");
6374 + if (!regions) {
6375 + dev_err(dev, "descriptor-regions not specified\n");
6376 ++ ret = -ENODEV;
6377 + goto err;
6378 + }
6379 + ret = knav_queue_setup_regions(kdev, regions);
6380 +diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
6381 +index 11895c98aae3b..8ea7e31b8c2fb 100644
6382 +--- a/drivers/spi/spi-bcm2835aux.c
6383 ++++ b/drivers/spi/spi-bcm2835aux.c
6384 +@@ -407,7 +407,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
6385 + unsigned long clk_hz;
6386 + int err;
6387 +
6388 +- master = spi_alloc_master(&pdev->dev, sizeof(*bs));
6389 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*bs));
6390 + if (!master) {
6391 + dev_err(&pdev->dev, "spi_alloc_master() failed\n");
6392 + return -ENOMEM;
6393 +@@ -439,30 +439,27 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
6394 + /* the main area */
6395 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
6396 + bs->regs = devm_ioremap_resource(&pdev->dev, res);
6397 +- if (IS_ERR(bs->regs)) {
6398 +- err = PTR_ERR(bs->regs);
6399 +- goto out_master_put;
6400 +- }
6401 ++ if (IS_ERR(bs->regs))
6402 ++ return PTR_ERR(bs->regs);
6403 +
6404 + bs->clk = devm_clk_get(&pdev->dev, NULL);
6405 + if ((!bs->clk) || (IS_ERR(bs->clk))) {
6406 + err = PTR_ERR(bs->clk);
6407 + dev_err(&pdev->dev, "could not get clk: %d\n", err);
6408 +- goto out_master_put;
6409 ++ return err;
6410 + }
6411 +
6412 + bs->irq = platform_get_irq(pdev, 0);
6413 + if (bs->irq <= 0) {
6414 + dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
6415 +- err = bs->irq ? bs->irq : -ENODEV;
6416 +- goto out_master_put;
6417 ++ return bs->irq ? bs->irq : -ENODEV;
6418 + }
6419 +
6420 + /* this also enables the HW block */
6421 + err = clk_prepare_enable(bs->clk);
6422 + if (err) {
6423 + dev_err(&pdev->dev, "could not prepare clock: %d\n", err);
6424 +- goto out_master_put;
6425 ++ return err;
6426 + }
6427 +
6428 + /* just checking if the clock returns a sane value */
6429 +@@ -495,8 +492,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
6430 +
6431 + out_clk_disable:
6432 + clk_disable_unprepare(bs->clk);
6433 +-out_master_put:
6434 +- spi_master_put(master);
6435 + return err;
6436 + }
6437 +
6438 +diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
6439 +index 1669c554ea340..2ad7b3f3666be 100644
6440 +--- a/drivers/spi/spi-bcm63xx-hsspi.c
6441 ++++ b/drivers/spi/spi-bcm63xx-hsspi.c
6442 +@@ -487,8 +487,10 @@ static int bcm63xx_hsspi_resume(struct device *dev)
6443 +
6444 + if (bs->pll_clk) {
6445 + ret = clk_prepare_enable(bs->pll_clk);
6446 +- if (ret)
6447 ++ if (ret) {
6448 ++ clk_disable_unprepare(bs->clk);
6449 + return ret;
6450 ++ }
6451 + }
6452 +
6453 + spi_master_resume(master);
6454 +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
6455 +index a02099c90c5c2..b56038945f411 100644
6456 +--- a/drivers/spi/spi-davinci.c
6457 ++++ b/drivers/spi/spi-davinci.c
6458 +@@ -1086,13 +1086,13 @@ static int davinci_spi_remove(struct platform_device *pdev)
6459 + spi_bitbang_stop(&dspi->bitbang);
6460 +
6461 + clk_disable_unprepare(dspi->clk);
6462 +- spi_master_put(master);
6463 +
6464 + if (dspi->dma_rx) {
6465 + dma_release_channel(dspi->dma_rx);
6466 + dma_release_channel(dspi->dma_tx);
6467 + }
6468 +
6469 ++ spi_master_put(master);
6470 + return 0;
6471 + }
6472 +
6473 +diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
6474 +index e4b31d6e6e33e..25a545c985d4a 100644
6475 +--- a/drivers/spi/spi-img-spfi.c
6476 ++++ b/drivers/spi/spi-img-spfi.c
6477 +@@ -774,8 +774,10 @@ static int img_spfi_resume(struct device *dev)
6478 + int ret;
6479 +
6480 + ret = pm_runtime_get_sync(dev);
6481 +- if (ret)
6482 ++ if (ret) {
6483 ++ pm_runtime_put_noidle(dev);
6484 + return ret;
6485 ++ }
6486 + spfi_reset(spfi);
6487 + pm_runtime_put(dev);
6488 +
6489 +diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
6490 +index 6ac95a2a21cef..4a7375ecb65ef 100644
6491 +--- a/drivers/spi/spi-mxs.c
6492 ++++ b/drivers/spi/spi-mxs.c
6493 +@@ -605,6 +605,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
6494 +
6495 + ret = pm_runtime_get_sync(ssp->dev);
6496 + if (ret < 0) {
6497 ++ pm_runtime_put_noidle(ssp->dev);
6498 + dev_err(ssp->dev, "runtime_get_sync failed\n");
6499 + goto out_pm_runtime_disable;
6500 + }
6501 +diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
6502 +index 288002f6c613e..661a40c653e90 100644
6503 +--- a/drivers/spi/spi-pic32.c
6504 ++++ b/drivers/spi/spi-pic32.c
6505 +@@ -839,6 +839,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
6506 + return 0;
6507 +
6508 + err_bailout:
6509 ++ pic32_spi_dma_unprep(pic32s);
6510 + clk_disable_unprepare(pic32s->clk);
6511 + err_master:
6512 + spi_master_put(master);
6513 +diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
6514 +index 3641d0e20135b..1d7fd6dbaf876 100644
6515 +--- a/drivers/spi/spi-rb4xx.c
6516 ++++ b/drivers/spi/spi-rb4xx.c
6517 +@@ -148,7 +148,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
6518 + if (IS_ERR(spi_base))
6519 + return PTR_ERR(spi_base);
6520 +
6521 +- master = spi_alloc_master(&pdev->dev, sizeof(*rbspi));
6522 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(*rbspi));
6523 + if (!master)
6524 + return -ENOMEM;
6525 +
6526 +diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
6527 +index 52cf0e9189c23..64cf1f572b6dd 100644
6528 +--- a/drivers/spi/spi-sc18is602.c
6529 ++++ b/drivers/spi/spi-sc18is602.c
6530 +@@ -248,13 +248,12 @@ static int sc18is602_probe(struct i2c_client *client,
6531 + struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
6532 + struct sc18is602 *hw;
6533 + struct spi_master *master;
6534 +- int error;
6535 +
6536 + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
6537 + I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
6538 + return -EINVAL;
6539 +
6540 +- master = spi_alloc_master(dev, sizeof(struct sc18is602));
6541 ++ master = devm_spi_alloc_master(dev, sizeof(struct sc18is602));
6542 + if (!master)
6543 + return -ENOMEM;
6544 +
6545 +@@ -308,15 +307,7 @@ static int sc18is602_probe(struct i2c_client *client,
6546 + master->min_speed_hz = hw->freq / 128;
6547 + master->max_speed_hz = hw->freq / 4;
6548 +
6549 +- error = devm_spi_register_master(dev, master);
6550 +- if (error)
6551 +- goto error_reg;
6552 +-
6553 +- return 0;
6554 +-
6555 +-error_reg:
6556 +- spi_master_put(master);
6557 +- return error;
6558 ++ return devm_spi_register_master(dev, master);
6559 + }
6560 +
6561 + static const struct i2c_device_id sc18is602_id[] = {
6562 +diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
6563 +index 50e0ea9acf8b8..cba49a65ed2bd 100644
6564 +--- a/drivers/spi/spi-sh.c
6565 ++++ b/drivers/spi/spi-sh.c
6566 +@@ -450,7 +450,7 @@ static int spi_sh_probe(struct platform_device *pdev)
6567 + return irq;
6568 + }
6569 +
6570 +- master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
6571 ++ master = devm_spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
6572 + if (master == NULL) {
6573 + dev_err(&pdev->dev, "spi_alloc_master error.\n");
6574 + return -ENOMEM;
6575 +@@ -468,16 +468,14 @@ static int spi_sh_probe(struct platform_device *pdev)
6576 + break;
6577 + default:
6578 + dev_err(&pdev->dev, "No support width\n");
6579 +- ret = -ENODEV;
6580 +- goto error1;
6581 ++ return -ENODEV;
6582 + }
6583 + ss->irq = irq;
6584 + ss->master = master;
6585 + ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
6586 + if (ss->addr == NULL) {
6587 + dev_err(&pdev->dev, "ioremap error.\n");
6588 +- ret = -ENOMEM;
6589 +- goto error1;
6590 ++ return -ENOMEM;
6591 + }
6592 + INIT_LIST_HEAD(&ss->queue);
6593 + spin_lock_init(&ss->lock);
6594 +@@ -487,7 +485,7 @@ static int spi_sh_probe(struct platform_device *pdev)
6595 + ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
6596 + if (ret < 0) {
6597 + dev_err(&pdev->dev, "request_irq error\n");
6598 +- goto error1;
6599 ++ return ret;
6600 + }
6601 +
6602 + master->num_chipselect = 2;
6603 +@@ -506,9 +504,6 @@ static int spi_sh_probe(struct platform_device *pdev)
6604 +
6605 + error3:
6606 + free_irq(irq, ss);
6607 +- error1:
6608 +- spi_master_put(master);
6609 +-
6610 + return ret;
6611 + }
6612 +
6613 +diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
6614 +index 5df01ffdef468..b46502db7f122 100644
6615 +--- a/drivers/spi/spi-st-ssc4.c
6616 ++++ b/drivers/spi/spi-st-ssc4.c
6617 +@@ -379,13 +379,14 @@ static int spi_st_probe(struct platform_device *pdev)
6618 + ret = devm_spi_register_master(&pdev->dev, master);
6619 + if (ret) {
6620 + dev_err(&pdev->dev, "Failed to register master\n");
6621 +- goto clk_disable;
6622 ++ goto rpm_disable;
6623 + }
6624 +
6625 + return 0;
6626 +
6627 +-clk_disable:
6628 ++rpm_disable:
6629 + pm_runtime_disable(&pdev->dev);
6630 ++clk_disable:
6631 + clk_disable_unprepare(spi_st->clk);
6632 + put_master:
6633 + spi_master_put(master);
6634 +diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
6635 +index 09cfae3abce2d..c510b53e5e3f5 100644
6636 +--- a/drivers/spi/spi-tegra114.c
6637 ++++ b/drivers/spi/spi-tegra114.c
6638 +@@ -827,6 +827,7 @@ static int tegra_spi_setup(struct spi_device *spi)
6639 +
6640 + ret = pm_runtime_get_sync(tspi->dev);
6641 + if (ret < 0) {
6642 ++ pm_runtime_put_noidle(tspi->dev);
6643 + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
6644 + return ret;
6645 + }
6646 +@@ -1252,6 +1253,7 @@ static int tegra_spi_resume(struct device *dev)
6647 +
6648 + ret = pm_runtime_get_sync(dev);
6649 + if (ret < 0) {
6650 ++ pm_runtime_put_noidle(dev);
6651 + dev_err(dev, "pm runtime failed, e = %d\n", ret);
6652 + return ret;
6653 + }
6654 +diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
6655 +index 22893a7e0aa0e..749288310c36c 100644
6656 +--- a/drivers/spi/spi-tegra20-sflash.c
6657 ++++ b/drivers/spi/spi-tegra20-sflash.c
6658 +@@ -564,6 +564,7 @@ static int tegra_sflash_resume(struct device *dev)
6659 +
6660 + ret = pm_runtime_get_sync(dev);
6661 + if (ret < 0) {
6662 ++ pm_runtime_put_noidle(dev);
6663 + dev_err(dev, "pm runtime failed, e = %d\n", ret);
6664 + return ret;
6665 + }
6666 +diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
6667 +index d1187317bb5d7..c6b80a60951b1 100644
6668 +--- a/drivers/spi/spi-tegra20-slink.c
6669 ++++ b/drivers/spi/spi-tegra20-slink.c
6670 +@@ -761,6 +761,7 @@ static int tegra_slink_setup(struct spi_device *spi)
6671 +
6672 + ret = pm_runtime_get_sync(tspi->dev);
6673 + if (ret < 0) {
6674 ++ pm_runtime_put_noidle(tspi->dev);
6675 + dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
6676 + return ret;
6677 + }
6678 +@@ -1197,6 +1198,7 @@ static int tegra_slink_resume(struct device *dev)
6679 +
6680 + ret = pm_runtime_get_sync(dev);
6681 + if (ret < 0) {
6682 ++ pm_runtime_put_noidle(dev);
6683 + dev_err(dev, "pm runtime failed, e = %d\n", ret);
6684 + return ret;
6685 + }
6686 +diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
6687 +index 95c28abaa0272..73a08724034ba 100644
6688 +--- a/drivers/spi/spi-ti-qspi.c
6689 ++++ b/drivers/spi/spi-ti-qspi.c
6690 +@@ -183,6 +183,7 @@ static int ti_qspi_setup(struct spi_device *spi)
6691 +
6692 + ret = pm_runtime_get_sync(qspi->dev);
6693 + if (ret < 0) {
6694 ++ pm_runtime_put_noidle(qspi->dev);
6695 + dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
6696 + return ret;
6697 + }
6698 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
6699 +index fbc5444bd9cbd..7dabbc82b6463 100644
6700 +--- a/drivers/spi/spi.c
6701 ++++ b/drivers/spi/spi.c
6702 +@@ -362,9 +362,11 @@ static int spi_drv_probe(struct device *dev)
6703 + if (ret)
6704 + return ret;
6705 +
6706 +- ret = sdrv->probe(spi);
6707 +- if (ret)
6708 +- dev_pm_domain_detach(dev, true);
6709 ++ if (sdrv->probe) {
6710 ++ ret = sdrv->probe(spi);
6711 ++ if (ret)
6712 ++ dev_pm_domain_detach(dev, true);
6713 ++ }
6714 +
6715 + return ret;
6716 + }
6717 +@@ -372,9 +374,10 @@ static int spi_drv_probe(struct device *dev)
6718 + static int spi_drv_remove(struct device *dev)
6719 + {
6720 + const struct spi_driver *sdrv = to_spi_driver(dev->driver);
6721 +- int ret;
6722 ++ int ret = 0;
6723 +
6724 +- ret = sdrv->remove(to_spi_device(dev));
6725 ++ if (sdrv->remove)
6726 ++ ret = sdrv->remove(to_spi_device(dev));
6727 + dev_pm_domain_detach(dev, true);
6728 +
6729 + return ret;
6730 +@@ -399,10 +402,8 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
6731 + {
6732 + sdrv->driver.owner = owner;
6733 + sdrv->driver.bus = &spi_bus_type;
6734 +- if (sdrv->probe)
6735 +- sdrv->driver.probe = spi_drv_probe;
6736 +- if (sdrv->remove)
6737 +- sdrv->driver.remove = spi_drv_remove;
6738 ++ sdrv->driver.probe = spi_drv_probe;
6739 ++ sdrv->driver.remove = spi_drv_remove;
6740 + if (sdrv->shutdown)
6741 + sdrv->driver.shutdown = spi_drv_shutdown;
6742 + return driver_register(&sdrv->driver);
6743 +diff --git a/drivers/staging/comedi/drivers/mf6x4.c b/drivers/staging/comedi/drivers/mf6x4.c
6744 +index ea430237efa7f..9da8dd748078d 100644
6745 +--- a/drivers/staging/comedi/drivers/mf6x4.c
6746 ++++ b/drivers/staging/comedi/drivers/mf6x4.c
6747 +@@ -112,8 +112,9 @@ static int mf6x4_ai_eoc(struct comedi_device *dev,
6748 + struct mf6x4_private *devpriv = dev->private;
6749 + unsigned int status;
6750 +
6751 ++ /* EOLC goes low at end of conversion. */
6752 + status = ioread32(devpriv->gpioc_reg);
6753 +- if (status & MF6X4_GPIOC_EOLC)
6754 ++ if ((status & MF6X4_GPIOC_EOLC) == 0)
6755 + return 0;
6756 + return -EBUSY;
6757 + }
6758 +diff --git a/drivers/staging/gasket/gasket_interrupt.c b/drivers/staging/gasket/gasket_interrupt.c
6759 +index 1cfbc120f2284..225460c535d61 100644
6760 +--- a/drivers/staging/gasket/gasket_interrupt.c
6761 ++++ b/drivers/staging/gasket/gasket_interrupt.c
6762 +@@ -527,14 +527,16 @@ int gasket_interrupt_system_status(struct gasket_dev *gasket_dev)
6763 + int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
6764 + int interrupt, int event_fd)
6765 + {
6766 +- struct eventfd_ctx *ctx = eventfd_ctx_fdget(event_fd);
6767 +-
6768 +- if (IS_ERR(ctx))
6769 +- return PTR_ERR(ctx);
6770 ++ struct eventfd_ctx *ctx;
6771 +
6772 + if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
6773 + return -EINVAL;
6774 +
6775 ++ ctx = eventfd_ctx_fdget(event_fd);
6776 ++
6777 ++ if (IS_ERR(ctx))
6778 ++ return PTR_ERR(ctx);
6779 ++
6780 + interrupt_data->eventfd_ctxs[interrupt] = ctx;
6781 + return 0;
6782 + }
6783 +@@ -545,6 +547,9 @@ int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
6784 + if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
6785 + return -EINVAL;
6786 +
6787 +- interrupt_data->eventfd_ctxs[interrupt] = NULL;
6788 ++ if (interrupt_data->eventfd_ctxs[interrupt]) {
6789 ++ eventfd_ctx_put(interrupt_data->eventfd_ctxs[interrupt]);
6790 ++ interrupt_data->eventfd_ctxs[interrupt] = NULL;
6791 ++ }
6792 + return 0;
6793 + }
6794 +diff --git a/drivers/staging/greybus/audio_codec.c b/drivers/staging/greybus/audio_codec.c
6795 +index 35acd55ca5ab7..6cbf69a57dfd9 100644
6796 +--- a/drivers/staging/greybus/audio_codec.c
6797 ++++ b/drivers/staging/greybus/audio_codec.c
6798 +@@ -489,6 +489,7 @@ static int gbcodec_hw_params(struct snd_pcm_substream *substream,
6799 + if (ret) {
6800 + dev_err_ratelimited(dai->dev, "%d: Error during set_config\n",
6801 + ret);
6802 ++ gb_pm_runtime_put_noidle(bundle);
6803 + mutex_unlock(&codec->lock);
6804 + return ret;
6805 + }
6806 +@@ -565,6 +566,7 @@ static int gbcodec_prepare(struct snd_pcm_substream *substream,
6807 + break;
6808 + }
6809 + if (ret) {
6810 ++ gb_pm_runtime_put_noidle(bundle);
6811 + mutex_unlock(&codec->lock);
6812 + dev_err_ratelimited(dai->dev, "set_data_size failed:%d\n",
6813 + ret);
6814 +diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
6815 +index 578aa6824ad3e..33c747bc83202 100644
6816 +--- a/drivers/staging/mt7621-spi/spi-mt7621.c
6817 ++++ b/drivers/staging/mt7621-spi/spi-mt7621.c
6818 +@@ -455,6 +455,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
6819 + master = spi_alloc_master(&pdev->dev, sizeof(*rs));
6820 + if (master == NULL) {
6821 + dev_info(&pdev->dev, "master allocation failed\n");
6822 ++ clk_disable_unprepare(clk);
6823 + return -ENOMEM;
6824 + }
6825 +
6826 +@@ -480,6 +481,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
6827 + ret = device_reset(&pdev->dev);
6828 + if (ret) {
6829 + dev_err(&pdev->dev, "SPI reset failed!\n");
6830 ++ clk_disable_unprepare(clk);
6831 + return ret;
6832 + }
6833 +
6834 +diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
6835 +index a144f28ee1a8a..04de81559c6e3 100644
6836 +--- a/drivers/staging/speakup/speakup_dectlk.c
6837 ++++ b/drivers/staging/speakup/speakup_dectlk.c
6838 +@@ -37,7 +37,7 @@ static unsigned char get_index(struct spk_synth *synth);
6839 + static int in_escape;
6840 + static int is_flushing;
6841 +
6842 +-static spinlock_t flush_lock;
6843 ++static DEFINE_SPINLOCK(flush_lock);
6844 + static DECLARE_WAIT_QUEUE_HEAD(flush);
6845 +
6846 + static struct var_t vars[] = {
6847 +diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
6848 +index cbd006fb7fbb9..c1166b45c288b 100644
6849 +--- a/drivers/tty/serial/8250/8250_omap.c
6850 ++++ b/drivers/tty/serial/8250/8250_omap.c
6851 +@@ -163,11 +163,6 @@ static void omap_8250_mdr1_errataset(struct uart_8250_port *up,
6852 + struct omap8250_priv *priv)
6853 + {
6854 + u8 timeout = 255;
6855 +- u8 old_mdr1;
6856 +-
6857 +- old_mdr1 = serial_in(up, UART_OMAP_MDR1);
6858 +- if (old_mdr1 == priv->mdr1)
6859 +- return;
6860 +
6861 + serial_out(up, UART_OMAP_MDR1, priv->mdr1);
6862 + udelay(2);
6863 +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
6864 +index 2a5bf4c14fb89..80fa06b16d9d7 100644
6865 +--- a/drivers/tty/serial/serial_core.c
6866 ++++ b/drivers/tty/serial/serial_core.c
6867 +@@ -1421,6 +1421,10 @@ static void uart_set_ldisc(struct tty_struct *tty)
6868 + {
6869 + struct uart_state *state = tty->driver_data;
6870 + struct uart_port *uport;
6871 ++ struct tty_port *port = &state->port;
6872 ++
6873 ++ if (!tty_port_initialized(port))
6874 ++ return;
6875 +
6876 + mutex_lock(&state->port.mutex);
6877 + uport = uart_port_check(state);
6878 +diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
6879 +index 19f5f5f2a48a1..7335dc8552180 100644
6880 +--- a/drivers/usb/chipidea/ci_hdrc_imx.c
6881 ++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
6882 +@@ -57,7 +57,8 @@ static const struct ci_hdrc_imx_platform_flag imx6sx_usb_data = {
6883 +
6884 + static const struct ci_hdrc_imx_platform_flag imx6ul_usb_data = {
6885 + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM |
6886 +- CI_HDRC_TURN_VBUS_EARLY_ON,
6887 ++ CI_HDRC_TURN_VBUS_EARLY_ON |
6888 ++ CI_HDRC_DISABLE_DEVICE_STREAMING,
6889 + };
6890 +
6891 + static const struct ci_hdrc_imx_platform_flag imx7d_usb_data = {
6892 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
6893 +index b55c3a699fc65..c1592403222f5 100644
6894 +--- a/drivers/usb/core/quirks.c
6895 ++++ b/drivers/usb/core/quirks.c
6896 +@@ -342,6 +342,9 @@ static const struct usb_device_id usb_quirk_list[] = {
6897 + { USB_DEVICE(0x06a3, 0x0006), .driver_info =
6898 + USB_QUIRK_CONFIG_INTF_STRINGS },
6899 +
6900 ++ /* Agfa SNAPSCAN 1212U */
6901 ++ { USB_DEVICE(0x06bd, 0x0001), .driver_info = USB_QUIRK_RESET_RESUME },
6902 ++
6903 + /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
6904 + { USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
6905 +
6906 +diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
6907 +index 9fc98de836249..add0f7ead55cc 100644
6908 +--- a/drivers/usb/gadget/function/f_acm.c
6909 ++++ b/drivers/usb/gadget/function/f_acm.c
6910 +@@ -684,7 +684,7 @@ acm_bind(struct usb_configuration *c, struct usb_function *f)
6911 + acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
6912 +
6913 + status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
6914 +- acm_ss_function, NULL);
6915 ++ acm_ss_function, acm_ss_function);
6916 + if (status)
6917 + goto fail;
6918 +
6919 +diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
6920 +index bb2edfc77627b..5c1846d1372ec 100644
6921 +--- a/drivers/usb/gadget/function/f_fs.c
6922 ++++ b/drivers/usb/gadget/function/f_fs.c
6923 +@@ -1247,6 +1247,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
6924 +
6925 + switch (epfile->ffs->gadget->speed) {
6926 + case USB_SPEED_SUPER:
6927 ++ case USB_SPEED_SUPER_PLUS:
6928 + desc_idx = 2;
6929 + break;
6930 + case USB_SPEED_HIGH:
6931 +@@ -3077,7 +3078,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
6932 + }
6933 +
6934 + if (likely(super)) {
6935 +- func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
6936 ++ func->function.ss_descriptors = func->function.ssp_descriptors =
6937 ++ vla_ptr(vlabuf, d, ss_descs);
6938 + ss_len = ffs_do_descs(ffs->ss_descs_count,
6939 + vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
6940 + d_raw_descs__sz - fs_len - hs_len,
6941 +@@ -3487,6 +3489,7 @@ static void ffs_func_unbind(struct usb_configuration *c,
6942 + func->function.fs_descriptors = NULL;
6943 + func->function.hs_descriptors = NULL;
6944 + func->function.ss_descriptors = NULL;
6945 ++ func->function.ssp_descriptors = NULL;
6946 + func->interfaces_nums = NULL;
6947 +
6948 + ffs_event_add(ffs, FUNCTIONFS_UNBIND);
6949 +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
6950 +index b2b5b0689667b..0e083a53da534 100644
6951 +--- a/drivers/usb/gadget/function/f_midi.c
6952 ++++ b/drivers/usb/gadget/function/f_midi.c
6953 +@@ -1048,6 +1048,12 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
6954 + f->ss_descriptors = usb_copy_descriptors(midi_function);
6955 + if (!f->ss_descriptors)
6956 + goto fail_f_midi;
6957 ++
6958 ++ if (gadget_is_superspeed_plus(c->cdev->gadget)) {
6959 ++ f->ssp_descriptors = usb_copy_descriptors(midi_function);
6960 ++ if (!f->ssp_descriptors)
6961 ++ goto fail_f_midi;
6962 ++ }
6963 + }
6964 +
6965 + kfree(midi_function);
6966 +diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
6967 +index 0d8e4a364ca6e..cc1ff5b7b60c4 100644
6968 +--- a/drivers/usb/gadget/function/f_rndis.c
6969 ++++ b/drivers/usb/gadget/function/f_rndis.c
6970 +@@ -87,8 +87,10 @@ static inline struct f_rndis *func_to_rndis(struct usb_function *f)
6971 + /* peak (theoretical) bulk transfer rate in bits-per-second */
6972 + static unsigned int bitrate(struct usb_gadget *g)
6973 + {
6974 ++ if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
6975 ++ return 4250000000U;
6976 + if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
6977 +- return 13 * 1024 * 8 * 1000 * 8;
6978 ++ return 3750000000U;
6979 + else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
6980 + return 13 * 512 * 8 * 1000 * 8;
6981 + else
6982 +diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
6983 +index d0248c58dcb6a..fdbce8529dbb4 100644
6984 +--- a/drivers/usb/gadget/udc/dummy_hcd.c
6985 ++++ b/drivers/usb/gadget/udc/dummy_hcd.c
6986 +@@ -2747,7 +2747,7 @@ static int __init init(void)
6987 + {
6988 + int retval = -ENOMEM;
6989 + int i;
6990 +- struct dummy *dum[MAX_NUM_UDC];
6991 ++ struct dummy *dum[MAX_NUM_UDC] = {};
6992 +
6993 + if (usb_disabled())
6994 + return -ENODEV;
6995 +diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
6996 +index 7d20296cbe9f9..d31c425d61675 100644
6997 +--- a/drivers/usb/host/ehci-omap.c
6998 ++++ b/drivers/usb/host/ehci-omap.c
6999 +@@ -222,6 +222,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
7000 +
7001 + err_pm_runtime:
7002 + pm_runtime_put_sync(dev);
7003 ++ pm_runtime_disable(dev);
7004 +
7005 + err_phy:
7006 + for (i = 0; i < omap->nports; i++) {
7007 +diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
7008 +index afa321ab55fcf..c9acc59f4addd 100644
7009 +--- a/drivers/usb/host/max3421-hcd.c
7010 ++++ b/drivers/usb/host/max3421-hcd.c
7011 +@@ -1864,7 +1864,7 @@ max3421_probe(struct spi_device *spi)
7012 + struct max3421_hcd *max3421_hcd;
7013 + struct usb_hcd *hcd = NULL;
7014 + struct max3421_hcd_platform_data *pdata = NULL;
7015 +- int retval = -ENOMEM;
7016 ++ int retval;
7017 +
7018 + if (spi_setup(spi) < 0) {
7019 + dev_err(&spi->dev, "Unable to setup SPI bus");
7020 +@@ -1906,6 +1906,7 @@ max3421_probe(struct spi_device *spi)
7021 + goto error;
7022 + }
7023 +
7024 ++ retval = -ENOMEM;
7025 + hcd = usb_create_hcd(&max3421_hcd_desc, &spi->dev,
7026 + dev_name(&spi->dev));
7027 + if (!hcd) {
7028 +diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
7029 +index c5e6e8d0b5ef5..10d97261b433f 100644
7030 +--- a/drivers/usb/host/oxu210hp-hcd.c
7031 ++++ b/drivers/usb/host/oxu210hp-hcd.c
7032 +@@ -3719,8 +3719,10 @@ static struct usb_hcd *oxu_create(struct platform_device *pdev,
7033 + oxu->is_otg = otg;
7034 +
7035 + ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
7036 +- if (ret < 0)
7037 ++ if (ret < 0) {
7038 ++ usb_put_hcd(hcd);
7039 + return ERR_PTR(ret);
7040 ++ }
7041 +
7042 + device_wakeup_enable(hcd->self.controller);
7043 + return hcd;
7044 +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
7045 +index 64dc94853b8b5..e6e8bed11aeab 100644
7046 +--- a/drivers/usb/host/xhci-hub.c
7047 ++++ b/drivers/usb/host/xhci-hub.c
7048 +@@ -1617,6 +1617,10 @@ retry:
7049 + hcd->state = HC_STATE_SUSPENDED;
7050 + bus_state->next_statechange = jiffies + msecs_to_jiffies(10);
7051 + spin_unlock_irqrestore(&xhci->lock, flags);
7052 ++
7053 ++ if (bus_state->bus_suspended)
7054 ++ usleep_range(5000, 10000);
7055 ++
7056 + return 0;
7057 + }
7058 +
7059 +diff --git a/drivers/usb/misc/sisusbvga/Kconfig b/drivers/usb/misc/sisusbvga/Kconfig
7060 +index 36bc28c884ad7..47dabccafef43 100644
7061 +--- a/drivers/usb/misc/sisusbvga/Kconfig
7062 ++++ b/drivers/usb/misc/sisusbvga/Kconfig
7063 +@@ -15,7 +15,7 @@ config USB_SISUSBVGA
7064 +
7065 + config USB_SISUSBVGA_CON
7066 + bool "Text console and mode switching support" if USB_SISUSBVGA
7067 +- depends on VT
7068 ++ depends on VT && BROKEN
7069 + select FONT_8x16
7070 + ---help---
7071 + Say Y here if you want a VGA text console via the USB dongle or
7072 +diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
7073 +index e7f244cf2c072..a618276ead98d 100644
7074 +--- a/drivers/usb/serial/digi_acceleport.c
7075 ++++ b/drivers/usb/serial/digi_acceleport.c
7076 +@@ -19,7 +19,6 @@
7077 + #include <linux/tty_flip.h>
7078 + #include <linux/module.h>
7079 + #include <linux/spinlock.h>
7080 +-#include <linux/workqueue.h>
7081 + #include <linux/uaccess.h>
7082 + #include <linux/usb.h>
7083 + #include <linux/wait.h>
7084 +@@ -198,14 +197,12 @@ struct digi_port {
7085 + int dp_throttle_restart;
7086 + wait_queue_head_t dp_flush_wait;
7087 + wait_queue_head_t dp_close_wait; /* wait queue for close */
7088 +- struct work_struct dp_wakeup_work;
7089 + struct usb_serial_port *dp_port;
7090 + };
7091 +
7092 +
7093 + /* Local Function Declarations */
7094 +
7095 +-static void digi_wakeup_write_lock(struct work_struct *work);
7096 + static int digi_write_oob_command(struct usb_serial_port *port,
7097 + unsigned char *buf, int count, int interruptible);
7098 + static int digi_write_inb_command(struct usb_serial_port *port,
7099 +@@ -356,26 +353,6 @@ __releases(lock)
7100 + return timeout;
7101 + }
7102 +
7103 +-
7104 +-/*
7105 +- * Digi Wakeup Write
7106 +- *
7107 +- * Wake up port, line discipline, and tty processes sleeping
7108 +- * on writes.
7109 +- */
7110 +-
7111 +-static void digi_wakeup_write_lock(struct work_struct *work)
7112 +-{
7113 +- struct digi_port *priv =
7114 +- container_of(work, struct digi_port, dp_wakeup_work);
7115 +- struct usb_serial_port *port = priv->dp_port;
7116 +- unsigned long flags;
7117 +-
7118 +- spin_lock_irqsave(&priv->dp_port_lock, flags);
7119 +- tty_port_tty_wakeup(&port->port);
7120 +- spin_unlock_irqrestore(&priv->dp_port_lock, flags);
7121 +-}
7122 +-
7123 + /*
7124 + * Digi Write OOB Command
7125 + *
7126 +@@ -987,6 +964,7 @@ static void digi_write_bulk_callback(struct urb *urb)
7127 + unsigned long flags;
7128 + int ret = 0;
7129 + int status = urb->status;
7130 ++ bool wakeup;
7131 +
7132 + /* port and serial sanity check */
7133 + if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
7134 +@@ -1013,6 +991,7 @@ static void digi_write_bulk_callback(struct urb *urb)
7135 + }
7136 +
7137 + /* try to send any buffered data on this port */
7138 ++ wakeup = true;
7139 + spin_lock_irqsave(&priv->dp_port_lock, flags);
7140 + priv->dp_write_urb_in_use = 0;
7141 + if (priv->dp_out_buf_len > 0) {
7142 +@@ -1028,19 +1007,18 @@ static void digi_write_bulk_callback(struct urb *urb)
7143 + if (ret == 0) {
7144 + priv->dp_write_urb_in_use = 1;
7145 + priv->dp_out_buf_len = 0;
7146 ++ wakeup = false;
7147 + }
7148 + }
7149 +- /* wake up processes sleeping on writes immediately */
7150 +- tty_port_tty_wakeup(&port->port);
7151 +- /* also queue up a wakeup at scheduler time, in case we */
7152 +- /* lost the race in write_chan(). */
7153 +- schedule_work(&priv->dp_wakeup_work);
7154 +-
7155 + spin_unlock_irqrestore(&priv->dp_port_lock, flags);
7156 ++
7157 + if (ret && ret != -EPERM)
7158 + dev_err_console(port,
7159 + "%s: usb_submit_urb failed, ret=%d, port=%d\n",
7160 + __func__, ret, priv->dp_port_num);
7161 ++
7162 ++ if (wakeup)
7163 ++ tty_port_tty_wakeup(&port->port);
7164 + }
7165 +
7166 + static int digi_write_room(struct tty_struct *tty)
7167 +@@ -1240,7 +1218,6 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num)
7168 + init_waitqueue_head(&priv->dp_transmit_idle_wait);
7169 + init_waitqueue_head(&priv->dp_flush_wait);
7170 + init_waitqueue_head(&priv->dp_close_wait);
7171 +- INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
7172 + priv->dp_port = port;
7173 +
7174 + init_waitqueue_head(&port->write_wait);
7175 +@@ -1509,13 +1486,14 @@ static int digi_read_oob_callback(struct urb *urb)
7176 + rts = C_CRTSCTS(tty);
7177 +
7178 + if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
7179 ++ bool wakeup = false;
7180 ++
7181 + spin_lock_irqsave(&priv->dp_port_lock, flags);
7182 + /* convert from digi flags to termiox flags */
7183 + if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
7184 + priv->dp_modem_signals |= TIOCM_CTS;
7185 +- /* port must be open to use tty struct */
7186 + if (rts)
7187 +- tty_port_tty_wakeup(&port->port);
7188 ++ wakeup = true;
7189 + } else {
7190 + priv->dp_modem_signals &= ~TIOCM_CTS;
7191 + /* port must be open to use tty struct */
7192 +@@ -1534,6 +1512,9 @@ static int digi_read_oob_callback(struct urb *urb)
7193 + priv->dp_modem_signals &= ~TIOCM_CD;
7194 +
7195 + spin_unlock_irqrestore(&priv->dp_port_lock, flags);
7196 ++
7197 ++ if (wakeup)
7198 ++ tty_port_tty_wakeup(&port->port);
7199 + } else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
7200 + spin_lock_irqsave(&priv->dp_port_lock, flags);
7201 + priv->dp_transmit_idle = 1;
7202 +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
7203 +index 38d43c4b7ce54..e7a2aa1747db1 100644
7204 +--- a/drivers/usb/serial/keyspan_pda.c
7205 ++++ b/drivers/usb/serial/keyspan_pda.c
7206 +@@ -40,11 +40,12 @@
7207 + #define DRIVER_AUTHOR "Brian Warner <warner@××××××.com>"
7208 + #define DRIVER_DESC "USB Keyspan PDA Converter driver"
7209 +
7210 ++#define KEYSPAN_TX_THRESHOLD 16
7211 ++
7212 + struct keyspan_pda_private {
7213 + int tx_room;
7214 + int tx_throttled;
7215 +- struct work_struct wakeup_work;
7216 +- struct work_struct unthrottle_work;
7217 ++ struct work_struct unthrottle_work;
7218 + struct usb_serial *serial;
7219 + struct usb_serial_port *port;
7220 + };
7221 +@@ -97,15 +98,6 @@ static const struct usb_device_id id_table_fake_xircom[] = {
7222 + };
7223 + #endif
7224 +
7225 +-static void keyspan_pda_wakeup_write(struct work_struct *work)
7226 +-{
7227 +- struct keyspan_pda_private *priv =
7228 +- container_of(work, struct keyspan_pda_private, wakeup_work);
7229 +- struct usb_serial_port *port = priv->port;
7230 +-
7231 +- tty_port_tty_wakeup(&port->port);
7232 +-}
7233 +-
7234 + static void keyspan_pda_request_unthrottle(struct work_struct *work)
7235 + {
7236 + struct keyspan_pda_private *priv =
7237 +@@ -120,7 +112,7 @@ static void keyspan_pda_request_unthrottle(struct work_struct *work)
7238 + 7, /* request_unthrottle */
7239 + USB_TYPE_VENDOR | USB_RECIP_INTERFACE
7240 + | USB_DIR_OUT,
7241 +- 16, /* value: threshold */
7242 ++ KEYSPAN_TX_THRESHOLD,
7243 + 0, /* index */
7244 + NULL,
7245 + 0,
7246 +@@ -139,6 +131,8 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
7247 + int retval;
7248 + int status = urb->status;
7249 + struct keyspan_pda_private *priv;
7250 ++ unsigned long flags;
7251 ++
7252 + priv = usb_get_serial_port_data(port);
7253 +
7254 + switch (status) {
7255 +@@ -172,18 +166,21 @@ static void keyspan_pda_rx_interrupt(struct urb *urb)
7256 + break;
7257 + case 1:
7258 + /* status interrupt */
7259 +- if (len < 3) {
7260 ++ if (len < 2) {
7261 + dev_warn(&port->dev, "short interrupt message received\n");
7262 + break;
7263 + }
7264 +- dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]);
7265 ++ dev_dbg(&port->dev, "rx int, d1=%d\n", data[1]);
7266 + switch (data[1]) {
7267 + case 1: /* modemline change */
7268 + break;
7269 + case 2: /* tx unthrottle interrupt */
7270 ++ spin_lock_irqsave(&port->lock, flags);
7271 + priv->tx_throttled = 0;
7272 ++ priv->tx_room = max(priv->tx_room, KEYSPAN_TX_THRESHOLD);
7273 ++ spin_unlock_irqrestore(&port->lock, flags);
7274 + /* queue up a wakeup at scheduler time */
7275 +- schedule_work(&priv->wakeup_work);
7276 ++ usb_serial_port_softint(port);
7277 + break;
7278 + default:
7279 + break;
7280 +@@ -443,6 +440,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
7281 + int request_unthrottle = 0;
7282 + int rc = 0;
7283 + struct keyspan_pda_private *priv;
7284 ++ unsigned long flags;
7285 +
7286 + priv = usb_get_serial_port_data(port);
7287 + /* guess how much room is left in the device's ring buffer, and if we
7288 +@@ -462,13 +460,13 @@ static int keyspan_pda_write(struct tty_struct *tty,
7289 + the TX urb is in-flight (wait until it completes)
7290 + the device is full (wait until it says there is room)
7291 + */
7292 +- spin_lock_bh(&port->lock);
7293 ++ spin_lock_irqsave(&port->lock, flags);
7294 + if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) {
7295 +- spin_unlock_bh(&port->lock);
7296 ++ spin_unlock_irqrestore(&port->lock, flags);
7297 + return 0;
7298 + }
7299 + clear_bit(0, &port->write_urbs_free);
7300 +- spin_unlock_bh(&port->lock);
7301 ++ spin_unlock_irqrestore(&port->lock, flags);
7302 +
7303 + /* At this point the URB is in our control, nobody else can submit it
7304 + again (the only sudden transition was the one from EINPROGRESS to
7305 +@@ -514,7 +512,8 @@ static int keyspan_pda_write(struct tty_struct *tty,
7306 + goto exit;
7307 + }
7308 + }
7309 +- if (count > priv->tx_room) {
7310 ++
7311 ++ if (count >= priv->tx_room) {
7312 + /* we're about to completely fill the Tx buffer, so
7313 + we'll be throttled afterwards. */
7314 + count = priv->tx_room;
7315 +@@ -547,7 +546,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
7316 +
7317 + rc = count;
7318 + exit:
7319 +- if (rc < 0)
7320 ++ if (rc <= 0)
7321 + set_bit(0, &port->write_urbs_free);
7322 + return rc;
7323 + }
7324 +@@ -562,21 +561,24 @@ static void keyspan_pda_write_bulk_callback(struct urb *urb)
7325 + priv = usb_get_serial_port_data(port);
7326 +
7327 + /* queue up a wakeup at scheduler time */
7328 +- schedule_work(&priv->wakeup_work);
7329 ++ usb_serial_port_softint(port);
7330 + }
7331 +
7332 +
7333 + static int keyspan_pda_write_room(struct tty_struct *tty)
7334 + {
7335 + struct usb_serial_port *port = tty->driver_data;
7336 +- struct keyspan_pda_private *priv;
7337 +- priv = usb_get_serial_port_data(port);
7338 +- /* used by n_tty.c for processing of tabs and such. Giving it our
7339 +- conservative guess is probably good enough, but needs testing by
7340 +- running a console through the device. */
7341 +- return priv->tx_room;
7342 +-}
7343 ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
7344 ++ unsigned long flags;
7345 ++ int room = 0;
7346 +
7347 ++ spin_lock_irqsave(&port->lock, flags);
7348 ++ if (test_bit(0, &port->write_urbs_free) && !priv->tx_throttled)
7349 ++ room = priv->tx_room;
7350 ++ spin_unlock_irqrestore(&port->lock, flags);
7351 ++
7352 ++ return room;
7353 ++}
7354 +
7355 + static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
7356 + {
7357 +@@ -656,8 +658,12 @@ error:
7358 + }
7359 + static void keyspan_pda_close(struct usb_serial_port *port)
7360 + {
7361 ++ struct keyspan_pda_private *priv = usb_get_serial_port_data(port);
7362 ++
7363 + usb_kill_urb(port->write_urb);
7364 + usb_kill_urb(port->interrupt_in_urb);
7365 ++
7366 ++ cancel_work_sync(&priv->unthrottle_work);
7367 + }
7368 +
7369 +
7370 +@@ -715,7 +721,6 @@ static int keyspan_pda_port_probe(struct usb_serial_port *port)
7371 + if (!priv)
7372 + return -ENOMEM;
7373 +
7374 +- INIT_WORK(&priv->wakeup_work, keyspan_pda_wakeup_write);
7375 + INIT_WORK(&priv->unthrottle_work, keyspan_pda_request_unthrottle);
7376 + priv->serial = port->serial;
7377 + priv->port = port;
7378 +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
7379 +index c0232b67a40f4..1f65bee521e69 100644
7380 +--- a/drivers/usb/serial/mos7720.c
7381 ++++ b/drivers/usb/serial/mos7720.c
7382 +@@ -638,6 +638,8 @@ static void parport_mos7715_restore_state(struct parport *pp,
7383 + spin_unlock(&release_lock);
7384 + return;
7385 + }
7386 ++ mos_parport->shadowDCR = s->u.pc.ctr;
7387 ++ mos_parport->shadowECR = s->u.pc.ecr;
7388 + write_parport_reg_nonblock(mos_parport, MOS7720_DCR,
7389 + mos_parport->shadowDCR);
7390 + write_parport_reg_nonblock(mos_parport, MOS7720_ECR,
7391 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
7392 +index 73cd2f8f0f65a..6fd6012ad7b3a 100644
7393 +--- a/drivers/usb/serial/option.c
7394 ++++ b/drivers/usb/serial/option.c
7395 +@@ -563,6 +563,9 @@ static void option_instat_callback(struct urb *urb);
7396 +
7397 + /* Device flags */
7398 +
7399 ++/* Highest interface number which can be used with NCTRL() and RSVD() */
7400 ++#define FLAG_IFNUM_MAX 7
7401 ++
7402 + /* Interface does not support modem-control requests */
7403 + #define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
7404 +
7405 +@@ -2100,6 +2103,14 @@ static struct usb_serial_driver * const serial_drivers[] = {
7406 +
7407 + module_usb_serial_driver(serial_drivers, option_ids);
7408 +
7409 ++static bool iface_is_reserved(unsigned long device_flags, u8 ifnum)
7410 ++{
7411 ++ if (ifnum > FLAG_IFNUM_MAX)
7412 ++ return false;
7413 ++
7414 ++ return device_flags & RSVD(ifnum);
7415 ++}
7416 ++
7417 + static int option_probe(struct usb_serial *serial,
7418 + const struct usb_device_id *id)
7419 + {
7420 +@@ -2116,7 +2127,7 @@ static int option_probe(struct usb_serial *serial,
7421 + * the same class/subclass/protocol as the serial interfaces. Look at
7422 + * the Windows driver .INF files for reserved interface numbers.
7423 + */
7424 +- if (device_flags & RSVD(iface_desc->bInterfaceNumber))
7425 ++ if (iface_is_reserved(device_flags, iface_desc->bInterfaceNumber))
7426 + return -ENODEV;
7427 +
7428 + /*
7429 +@@ -2132,6 +2143,14 @@ static int option_probe(struct usb_serial *serial,
7430 + return 0;
7431 + }
7432 +
7433 ++static bool iface_no_modem_control(unsigned long device_flags, u8 ifnum)
7434 ++{
7435 ++ if (ifnum > FLAG_IFNUM_MAX)
7436 ++ return false;
7437 ++
7438 ++ return device_flags & NCTRL(ifnum);
7439 ++}
7440 ++
7441 + static int option_attach(struct usb_serial *serial)
7442 + {
7443 + struct usb_interface_descriptor *iface_desc;
7444 +@@ -2147,7 +2166,7 @@ static int option_attach(struct usb_serial *serial)
7445 +
7446 + iface_desc = &serial->interface->cur_altsetting->desc;
7447 +
7448 +- if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
7449 ++ if (!iface_no_modem_control(device_flags, iface_desc->bInterfaceNumber))
7450 + data->use_send_setup = 1;
7451 +
7452 + if (device_flags & ZLP)
7453 +diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
7454 +index 658b0cd8e27ee..1fc7143c35a37 100644
7455 +--- a/drivers/usb/storage/uas.c
7456 ++++ b/drivers/usb/storage/uas.c
7457 +@@ -874,6 +874,9 @@ static int uas_slave_configure(struct scsi_device *sdev)
7458 + if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
7459 + sdev->no_read_capacity_16 = 1;
7460 +
7461 ++ /* Some disks cannot handle WRITE_SAME */
7462 ++ if (devinfo->flags & US_FL_NO_SAME)
7463 ++ sdev->no_write_same = 1;
7464 + /*
7465 + * Some disks return the total number of blocks in response
7466 + * to READ CAPACITY rather than the highest block number.
7467 +diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
7468 +index dcdfcdfd2ad13..749c69be091cc 100644
7469 +--- a/drivers/usb/storage/unusual_uas.h
7470 ++++ b/drivers/usb/storage/unusual_uas.h
7471 +@@ -35,12 +35,15 @@ UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
7472 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7473 + US_FL_NO_REPORT_OPCODES),
7474 +
7475 +-/* Reported-by: Julian Groß <julian.g@××××××.de> */
7476 ++/*
7477 ++ * Initially Reported-by: Julian Groß <julian.g@××××××.de>
7478 ++ * Further reports David C. Partridge <david.partridge@××××××××××.uk>
7479 ++ */
7480 + UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
7481 + "LaCie",
7482 + "2Big Quadra USB3",
7483 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
7484 +- US_FL_NO_REPORT_OPCODES),
7485 ++ US_FL_NO_REPORT_OPCODES | US_FL_NO_SAME),
7486 +
7487 + /*
7488 + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
7489 +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
7490 +index 9a79cd9762f31..2349dfa3b1762 100644
7491 +--- a/drivers/usb/storage/usb.c
7492 ++++ b/drivers/usb/storage/usb.c
7493 +@@ -541,6 +541,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
7494 + case 'j':
7495 + f |= US_FL_NO_REPORT_LUNS;
7496 + break;
7497 ++ case 'k':
7498 ++ f |= US_FL_NO_SAME;
7499 ++ break;
7500 + case 'l':
7501 + f |= US_FL_NOT_LOCKABLE;
7502 + break;
7503 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
7504 +index 58e7336b2748b..5e23e4aa5b0a3 100644
7505 +--- a/drivers/vfio/pci/vfio_pci.c
7506 ++++ b/drivers/vfio/pci/vfio_pci.c
7507 +@@ -1380,8 +1380,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
7508 +
7509 + mutex_unlock(&vdev->vma_lock);
7510 +
7511 +- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
7512 +- vma->vm_end - vma->vm_start, vma->vm_page_prot))
7513 ++ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
7514 ++ vma->vm_end - vma->vm_start, vma->vm_page_prot))
7515 + ret = VM_FAULT_SIGBUS;
7516 +
7517 + up_out:
7518 +diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
7519 +index 4ed55e6bbb840..6d01ae3984c73 100644
7520 +--- a/drivers/video/fbdev/atmel_lcdfb.c
7521 ++++ b/drivers/video/fbdev/atmel_lcdfb.c
7522 +@@ -1071,8 +1071,8 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
7523 + }
7524 +
7525 + INIT_LIST_HEAD(&pdata->pwr_gpios);
7526 +- ret = -ENOMEM;
7527 + for (i = 0; i < gpiod_count(dev, "atmel,power-control"); i++) {
7528 ++ ret = -ENOMEM;
7529 + gpiod = devm_gpiod_get_index(dev, "atmel,power-control",
7530 + i, GPIOD_ASIS);
7531 + if (IS_ERR(gpiod))
7532 +diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
7533 +index 709d4de11f40f..fa7f4c61524d9 100644
7534 +--- a/drivers/watchdog/Kconfig
7535 ++++ b/drivers/watchdog/Kconfig
7536 +@@ -515,7 +515,7 @@ config SUNXI_WATCHDOG
7537 +
7538 + config COH901327_WATCHDOG
7539 + bool "ST-Ericsson COH 901 327 watchdog"
7540 +- depends on ARCH_U300 || (ARM && COMPILE_TEST)
7541 ++ depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST)
7542 + default y if MACH_U300
7543 + select WATCHDOG_CORE
7544 + help
7545 +@@ -651,6 +651,7 @@ config MOXART_WDT
7546 +
7547 + config SIRFSOC_WATCHDOG
7548 + tristate "SiRFSOC watchdog"
7549 ++ depends on HAS_IOMEM
7550 + depends on ARCH_SIRF || COMPILE_TEST
7551 + select WATCHDOG_CORE
7552 + default y
7553 +diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
7554 +index 780971318810d..1a0005a8fadb2 100644
7555 +--- a/drivers/watchdog/qcom-wdt.c
7556 ++++ b/drivers/watchdog/qcom-wdt.c
7557 +@@ -121,7 +121,7 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
7558 + */
7559 + wmb();
7560 +
7561 +- msleep(150);
7562 ++ mdelay(150);
7563 + return 0;
7564 + }
7565 +
7566 +diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
7567 +index b6c65afd36778..86cf93af951b5 100644
7568 +--- a/drivers/watchdog/sprd_wdt.c
7569 ++++ b/drivers/watchdog/sprd_wdt.c
7570 +@@ -116,18 +116,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
7571 + u32 tmr_step = timeout * SPRD_WDT_CNT_STEP;
7572 + u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP;
7573 +
7574 +- sprd_wdt_unlock(wdt->base);
7575 +- writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
7576 +- SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
7577 +- writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK),
7578 +- wdt->base + SPRD_WDT_LOAD_LOW);
7579 +- writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
7580 +- SPRD_WDT_LOW_VALUE_MASK,
7581 +- wdt->base + SPRD_WDT_IRQ_LOAD_HIGH);
7582 +- writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK,
7583 +- wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
7584 +- sprd_wdt_lock(wdt->base);
7585 +-
7586 + /*
7587 + * Waiting the load value operation done,
7588 + * it needs two or three RTC clock cycles.
7589 +@@ -142,6 +130,19 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
7590 +
7591 + if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
7592 + return -EBUSY;
7593 ++
7594 ++ sprd_wdt_unlock(wdt->base);
7595 ++ writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
7596 ++ SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
7597 ++ writel_relaxed((tmr_step & SPRD_WDT_LOW_VALUE_MASK),
7598 ++ wdt->base + SPRD_WDT_LOAD_LOW);
7599 ++ writel_relaxed((prtmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
7600 ++ SPRD_WDT_LOW_VALUE_MASK,
7601 ++ wdt->base + SPRD_WDT_IRQ_LOAD_HIGH);
7602 ++ writel_relaxed(prtmr_step & SPRD_WDT_LOW_VALUE_MASK,
7603 ++ wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
7604 ++ sprd_wdt_lock(wdt->base);
7605 ++
7606 + return 0;
7607 + }
7608 +
7609 +@@ -360,15 +361,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
7610 + if (ret)
7611 + return ret;
7612 +
7613 +- if (watchdog_active(&wdt->wdd)) {
7614 ++ if (watchdog_active(&wdt->wdd))
7615 + ret = sprd_wdt_start(&wdt->wdd);
7616 +- if (ret) {
7617 +- sprd_wdt_disable(wdt);
7618 +- return ret;
7619 +- }
7620 +- }
7621 +
7622 +- return 0;
7623 ++ return ret;
7624 + }
7625 +
7626 + static const struct dev_pm_ops sprd_wdt_pm_ops = {
7627 +diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
7628 +index 8b1f37ffb65ac..5c600a3706505 100644
7629 +--- a/drivers/watchdog/watchdog_core.c
7630 ++++ b/drivers/watchdog/watchdog_core.c
7631 +@@ -246,15 +246,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
7632 + }
7633 +
7634 + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
7635 +- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
7636 +-
7637 +- ret = register_reboot_notifier(&wdd->reboot_nb);
7638 +- if (ret) {
7639 +- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
7640 +- wdd->id, ret);
7641 +- watchdog_dev_unregister(wdd);
7642 +- ida_simple_remove(&watchdog_ida, id);
7643 +- return ret;
7644 ++ if (!wdd->ops->stop)
7645 ++ pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
7646 ++ else {
7647 ++ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
7648 ++
7649 ++ ret = register_reboot_notifier(&wdd->reboot_nb);
7650 ++ if (ret) {
7651 ++ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
7652 ++ wdd->id, ret);
7653 ++ watchdog_dev_unregister(wdd);
7654 ++ ida_simple_remove(&watchdog_ida, id);
7655 ++ return ret;
7656 ++ }
7657 + }
7658 + }
7659 +
7660 +diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
7661 +index 3bbed47da3fa5..1e2a996c75158 100644
7662 +--- a/drivers/xen/xen-pciback/xenbus.c
7663 ++++ b/drivers/xen/xen-pciback/xenbus.c
7664 +@@ -688,7 +688,7 @@ static int xen_pcibk_xenbus_probe(struct xenbus_device *dev,
7665 +
7666 + /* watch the backend node for backend configuration information */
7667 + err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
7668 +- xen_pcibk_be_watch);
7669 ++ NULL, xen_pcibk_be_watch);
7670 + if (err)
7671 + goto out;
7672 +
7673 +diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
7674 +index d75a2385b37c7..88516a8a9f932 100644
7675 +--- a/drivers/xen/xenbus/xenbus.h
7676 ++++ b/drivers/xen/xenbus/xenbus.h
7677 +@@ -44,6 +44,8 @@ struct xen_bus_type {
7678 + int (*get_bus_id)(char bus_id[XEN_BUS_ID_SIZE], const char *nodename);
7679 + int (*probe)(struct xen_bus_type *bus, const char *type,
7680 + const char *dir);
7681 ++ bool (*otherend_will_handle)(struct xenbus_watch *watch,
7682 ++ const char *path, const char *token);
7683 + void (*otherend_changed)(struct xenbus_watch *watch, const char *path,
7684 + const char *token);
7685 + struct bus_type bus;
7686 +diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
7687 +index f7b553faadb10..e35bb6b874491 100644
7688 +--- a/drivers/xen/xenbus/xenbus_client.c
7689 ++++ b/drivers/xen/xenbus/xenbus_client.c
7690 +@@ -114,18 +114,22 @@ EXPORT_SYMBOL_GPL(xenbus_strstate);
7691 + */
7692 + int xenbus_watch_path(struct xenbus_device *dev, const char *path,
7693 + struct xenbus_watch *watch,
7694 ++ bool (*will_handle)(struct xenbus_watch *,
7695 ++ const char *, const char *),
7696 + void (*callback)(struct xenbus_watch *,
7697 + const char *, const char *))
7698 + {
7699 + int err;
7700 +
7701 + watch->node = path;
7702 ++ watch->will_handle = will_handle;
7703 + watch->callback = callback;
7704 +
7705 + err = register_xenbus_watch(watch);
7706 +
7707 + if (err) {
7708 + watch->node = NULL;
7709 ++ watch->will_handle = NULL;
7710 + watch->callback = NULL;
7711 + xenbus_dev_fatal(dev, err, "adding watch on %s", path);
7712 + }
7713 +@@ -152,6 +156,8 @@ EXPORT_SYMBOL_GPL(xenbus_watch_path);
7714 + */
7715 + int xenbus_watch_pathfmt(struct xenbus_device *dev,
7716 + struct xenbus_watch *watch,
7717 ++ bool (*will_handle)(struct xenbus_watch *,
7718 ++ const char *, const char *),
7719 + void (*callback)(struct xenbus_watch *,
7720 + const char *, const char *),
7721 + const char *pathfmt, ...)
7722 +@@ -168,7 +174,7 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev,
7723 + xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
7724 + return -ENOMEM;
7725 + }
7726 +- err = xenbus_watch_path(dev, path, watch, callback);
7727 ++ err = xenbus_watch_path(dev, path, watch, will_handle, callback);
7728 +
7729 + if (err)
7730 + kfree(path);
7731 +diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
7732 +index 5b471889d7237..e6d0903459e11 100644
7733 +--- a/drivers/xen/xenbus/xenbus_probe.c
7734 ++++ b/drivers/xen/xenbus/xenbus_probe.c
7735 +@@ -136,6 +136,7 @@ static int watch_otherend(struct xenbus_device *dev)
7736 + container_of(dev->dev.bus, struct xen_bus_type, bus);
7737 +
7738 + return xenbus_watch_pathfmt(dev, &dev->otherend_watch,
7739 ++ bus->otherend_will_handle,
7740 + bus->otherend_changed,
7741 + "%s/%s", dev->otherend, "state");
7742 + }
7743 +diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
7744 +index b0bed4faf44cc..4bb603051d5b6 100644
7745 +--- a/drivers/xen/xenbus/xenbus_probe_backend.c
7746 ++++ b/drivers/xen/xenbus/xenbus_probe_backend.c
7747 +@@ -180,6 +180,12 @@ static int xenbus_probe_backend(struct xen_bus_type *bus, const char *type,
7748 + return err;
7749 + }
7750 +
7751 ++static bool frontend_will_handle(struct xenbus_watch *watch,
7752 ++ const char *path, const char *token)
7753 ++{
7754 ++ return watch->nr_pending == 0;
7755 ++}
7756 ++
7757 + static void frontend_changed(struct xenbus_watch *watch,
7758 + const char *path, const char *token)
7759 + {
7760 +@@ -191,6 +197,7 @@ static struct xen_bus_type xenbus_backend = {
7761 + .levels = 3, /* backend/type/<frontend>/<id> */
7762 + .get_bus_id = backend_bus_id,
7763 + .probe = xenbus_probe_backend,
7764 ++ .otherend_will_handle = frontend_will_handle,
7765 + .otherend_changed = frontend_changed,
7766 + .bus = {
7767 + .name = "xen-backend",
7768 +diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
7769 +index 3a06eb699f333..12e02eb01f599 100644
7770 +--- a/drivers/xen/xenbus/xenbus_xs.c
7771 ++++ b/drivers/xen/xenbus/xenbus_xs.c
7772 +@@ -705,9 +705,13 @@ int xs_watch_msg(struct xs_watch_event *event)
7773 +
7774 + spin_lock(&watches_lock);
7775 + event->handle = find_watch(event->token);
7776 +- if (event->handle != NULL) {
7777 ++ if (event->handle != NULL &&
7778 ++ (!event->handle->will_handle ||
7779 ++ event->handle->will_handle(event->handle,
7780 ++ event->path, event->token))) {
7781 + spin_lock(&watch_events_lock);
7782 + list_add_tail(&event->list, &watch_events);
7783 ++ event->handle->nr_pending++;
7784 + wake_up(&watch_events_waitq);
7785 + spin_unlock(&watch_events_lock);
7786 + } else
7787 +@@ -765,6 +769,8 @@ int register_xenbus_watch(struct xenbus_watch *watch)
7788 +
7789 + sprintf(token, "%lX", (long)watch);
7790 +
7791 ++ watch->nr_pending = 0;
7792 ++
7793 + down_read(&xs_watch_rwsem);
7794 +
7795 + spin_lock(&watches_lock);
7796 +@@ -814,11 +820,14 @@ void unregister_xenbus_watch(struct xenbus_watch *watch)
7797 +
7798 + /* Cancel pending watch events. */
7799 + spin_lock(&watch_events_lock);
7800 +- list_for_each_entry_safe(event, tmp, &watch_events, list) {
7801 +- if (event->handle != watch)
7802 +- continue;
7803 +- list_del(&event->list);
7804 +- kfree(event);
7805 ++ if (watch->nr_pending) {
7806 ++ list_for_each_entry_safe(event, tmp, &watch_events, list) {
7807 ++ if (event->handle != watch)
7808 ++ continue;
7809 ++ list_del(&event->list);
7810 ++ kfree(event);
7811 ++ }
7812 ++ watch->nr_pending = 0;
7813 + }
7814 + spin_unlock(&watch_events_lock);
7815 +
7816 +@@ -865,7 +874,6 @@ void xs_suspend_cancel(void)
7817 +
7818 + static int xenwatch_thread(void *unused)
7819 + {
7820 +- struct list_head *ent;
7821 + struct xs_watch_event *event;
7822 +
7823 + xenwatch_pid = current->pid;
7824 +@@ -880,13 +888,15 @@ static int xenwatch_thread(void *unused)
7825 + mutex_lock(&xenwatch_mutex);
7826 +
7827 + spin_lock(&watch_events_lock);
7828 +- ent = watch_events.next;
7829 +- if (ent != &watch_events)
7830 +- list_del(ent);
7831 ++ event = list_first_entry_or_null(&watch_events,
7832 ++ struct xs_watch_event, list);
7833 ++ if (event) {
7834 ++ list_del(&event->list);
7835 ++ event->handle->nr_pending--;
7836 ++ }
7837 + spin_unlock(&watch_events_lock);
7838 +
7839 +- if (ent != &watch_events) {
7840 +- event = list_entry(ent, struct xs_watch_event, list);
7841 ++ if (event) {
7842 + event->handle->callback(event->handle, event->path,
7843 + event->token);
7844 + kfree(event);
7845 +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
7846 +index c0dbf8b7762b4..6e871a382209b 100644
7847 +--- a/fs/ceph/caps.c
7848 ++++ b/fs/ceph/caps.c
7849 +@@ -1047,12 +1047,19 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
7850 + {
7851 + struct ceph_mds_session *session = cap->session;
7852 + struct ceph_inode_info *ci = cap->ci;
7853 +- struct ceph_mds_client *mdsc =
7854 +- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
7855 ++ struct ceph_mds_client *mdsc;
7856 + int removed = 0;
7857 +
7858 ++ /* 'ci' being NULL means the remove have already occurred */
7859 ++ if (!ci) {
7860 ++ dout("%s: cap inode is NULL\n", __func__);
7861 ++ return;
7862 ++ }
7863 ++
7864 + dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
7865 +
7866 ++ mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
7867 ++
7868 + /* remove from inode's cap rbtree, and clear auth cap */
7869 + rb_erase(&cap->ci_node, &ci->i_caps);
7870 + if (ci->i_auth_cap == cap)
7871 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
7872 +index e20d170d13f6b..faafa9a557c23 100644
7873 +--- a/fs/cifs/smb2ops.c
7874 ++++ b/fs/cifs/smb2ops.c
7875 +@@ -366,7 +366,8 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
7876 + goto out;
7877 + }
7878 +
7879 +- if (bytes_left || p->Next)
7880 ++ /* Azure rounds the buffer size up 8, to a 16 byte boundary */
7881 ++ if ((bytes_left > 8) || p->Next)
7882 + cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
7883 +
7884 +
7885 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
7886 +index 379ac8caa29a6..ee824131c02e4 100644
7887 +--- a/fs/cifs/smb2pdu.c
7888 ++++ b/fs/cifs/smb2pdu.c
7889 +@@ -406,8 +406,8 @@ build_preauth_ctxt(struct smb2_preauth_neg_context *pneg_ctxt)
7890 + pneg_ctxt->ContextType = SMB2_PREAUTH_INTEGRITY_CAPABILITIES;
7891 + pneg_ctxt->DataLength = cpu_to_le16(38);
7892 + pneg_ctxt->HashAlgorithmCount = cpu_to_le16(1);
7893 +- pneg_ctxt->SaltLength = cpu_to_le16(SMB311_SALT_SIZE);
7894 +- get_random_bytes(pneg_ctxt->Salt, SMB311_SALT_SIZE);
7895 ++ pneg_ctxt->SaltLength = cpu_to_le16(SMB311_LINUX_CLIENT_SALT_SIZE);
7896 ++ get_random_bytes(pneg_ctxt->Salt, SMB311_LINUX_CLIENT_SALT_SIZE);
7897 + pneg_ctxt->HashAlgorithms = SMB2_PREAUTH_INTEGRITY_SHA512;
7898 + }
7899 +
7900 +@@ -461,6 +461,9 @@ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt)
7901 + if (len < MIN_PREAUTH_CTXT_DATA_LEN) {
7902 + printk_once(KERN_WARNING "server sent bad preauth context\n");
7903 + return;
7904 ++ } else if (len < MIN_PREAUTH_CTXT_DATA_LEN + le16_to_cpu(ctxt->SaltLength)) {
7905 ++ pr_warn_once("server sent invalid SaltLength\n");
7906 ++ return;
7907 + }
7908 + if (le16_to_cpu(ctxt->HashAlgorithmCount) != 1)
7909 + printk_once(KERN_WARNING "illegal SMB3 hash algorithm count\n");
7910 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
7911 +index 44501f8cbd75e..48ed43e6aee84 100644
7912 +--- a/fs/cifs/smb2pdu.h
7913 ++++ b/fs/cifs/smb2pdu.h
7914 +@@ -257,12 +257,20 @@ struct smb2_neg_context {
7915 + /* Followed by array of data */
7916 + } __packed;
7917 +
7918 +-#define SMB311_SALT_SIZE 32
7919 ++#define SMB311_LINUX_CLIENT_SALT_SIZE 32
7920 + /* Hash Algorithm Types */
7921 + #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
7922 + #define SMB2_PREAUTH_HASH_SIZE 64
7923 +
7924 +-#define MIN_PREAUTH_CTXT_DATA_LEN (SMB311_SALT_SIZE + 6)
7925 ++/*
7926 ++ * SaltLength that the server send can be zero, so the only three required
7927 ++ * fields (all __le16) end up six bytes total, so the minimum context data len
7928 ++ * in the response is six bytes which accounts for
7929 ++ *
7930 ++ * HashAlgorithmCount, SaltLength, and 1 HashAlgorithm.
7931 ++ */
7932 ++#define MIN_PREAUTH_CTXT_DATA_LEN 6
7933 ++
7934 + struct smb2_preauth_neg_context {
7935 + __le16 ContextType; /* 1 */
7936 + __le16 DataLength;
7937 +@@ -270,7 +278,7 @@ struct smb2_preauth_neg_context {
7938 + __le16 HashAlgorithmCount; /* 1 */
7939 + __le16 SaltLength;
7940 + __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */
7941 +- __u8 Salt[SMB311_SALT_SIZE];
7942 ++ __u8 Salt[SMB311_LINUX_CLIENT_SALT_SIZE];
7943 + } __packed;
7944 +
7945 + /* Encryption Algorithms Ciphers */
7946 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
7947 +index cc092386ac6d9..b2a9c746f8ce4 100644
7948 +--- a/fs/ext4/inode.c
7949 ++++ b/fs/ext4/inode.c
7950 +@@ -203,6 +203,7 @@ void ext4_evict_inode(struct inode *inode)
7951 + */
7952 + int extra_credits = 6;
7953 + struct ext4_xattr_inode_array *ea_inode_array = NULL;
7954 ++ bool freeze_protected = false;
7955 +
7956 + trace_ext4_evict_inode(inode);
7957 +
7958 +@@ -250,9 +251,14 @@ void ext4_evict_inode(struct inode *inode)
7959 +
7960 + /*
7961 + * Protect us against freezing - iput() caller didn't have to have any
7962 +- * protection against it
7963 ++ * protection against it. When we are in a running transaction though,
7964 ++ * we are already protected against freezing and we cannot grab further
7965 ++ * protection due to lock ordering constraints.
7966 + */
7967 +- sb_start_intwrite(inode->i_sb);
7968 ++ if (!ext4_journal_current_handle()) {
7969 ++ sb_start_intwrite(inode->i_sb);
7970 ++ freeze_protected = true;
7971 ++ }
7972 +
7973 + if (!IS_NOQUOTA(inode))
7974 + extra_credits += EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb);
7975 +@@ -271,7 +277,8 @@ void ext4_evict_inode(struct inode *inode)
7976 + * cleaned up.
7977 + */
7978 + ext4_orphan_del(NULL, inode);
7979 +- sb_end_intwrite(inode->i_sb);
7980 ++ if (freeze_protected)
7981 ++ sb_end_intwrite(inode->i_sb);
7982 + goto no_delete;
7983 + }
7984 +
7985 +@@ -312,7 +319,8 @@ void ext4_evict_inode(struct inode *inode)
7986 + stop_handle:
7987 + ext4_journal_stop(handle);
7988 + ext4_orphan_del(NULL, inode);
7989 +- sb_end_intwrite(inode->i_sb);
7990 ++ if (freeze_protected)
7991 ++ sb_end_intwrite(inode->i_sb);
7992 + ext4_xattr_inode_array_free(ea_inode_array);
7993 + goto no_delete;
7994 + }
7995 +@@ -341,7 +349,8 @@ stop_handle:
7996 + else
7997 + ext4_free_inode(handle, inode);
7998 + ext4_journal_stop(handle);
7999 +- sb_end_intwrite(inode->i_sb);
8000 ++ if (freeze_protected)
8001 ++ sb_end_intwrite(inode->i_sb);
8002 + ext4_xattr_inode_array_free(ea_inode_array);
8003 + return;
8004 + no_delete:
8005 +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
8006 +index 054cfdd007d69..ec6f65c91d93c 100644
8007 +--- a/fs/ext4/mballoc.c
8008 ++++ b/fs/ext4/mballoc.c
8009 +@@ -4690,6 +4690,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
8010 + ext4_group_first_block_no(sb, group) +
8011 + EXT4_C2B(sbi, cluster),
8012 + "Block already on to-be-freed list");
8013 ++ kmem_cache_free(ext4_free_data_cachep, new_entry);
8014 + return 0;
8015 + }
8016 + }
8017 +diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
8018 +index bccfc40b3a74a..d19483fa1fe89 100644
8019 +--- a/fs/jffs2/readinode.c
8020 ++++ b/fs/jffs2/readinode.c
8021 +@@ -672,6 +672,22 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
8022 + jffs2_free_full_dirent(fd);
8023 + return -EIO;
8024 + }
8025 ++
8026 ++#ifdef CONFIG_JFFS2_SUMMARY
8027 ++ /*
8028 ++ * we use CONFIG_JFFS2_SUMMARY because without it, we
8029 ++ * have checked it while mounting
8030 ++ */
8031 ++ crc = crc32(0, fd->name, rd->nsize);
8032 ++ if (unlikely(crc != je32_to_cpu(rd->name_crc))) {
8033 ++ JFFS2_NOTICE("name CRC failed on dirent node at"
8034 ++ "%#08x: read %#08x,calculated %#08x\n",
8035 ++ ref_offset(ref), je32_to_cpu(rd->node_crc), crc);
8036 ++ jffs2_mark_node_obsolete(c, ref);
8037 ++ jffs2_free_full_dirent(fd);
8038 ++ return 0;
8039 ++ }
8040 ++#endif
8041 + }
8042 +
8043 + fd->nhash = full_name_hash(NULL, fd->name, rd->nsize);
8044 +diff --git a/fs/jfs/jfs_dmap.h b/fs/jfs/jfs_dmap.h
8045 +index 562b9a7e4311f..f502a15c6c987 100644
8046 +--- a/fs/jfs/jfs_dmap.h
8047 ++++ b/fs/jfs/jfs_dmap.h
8048 +@@ -196,7 +196,7 @@ typedef union dmtree {
8049 + #define dmt_leafidx t1.leafidx
8050 + #define dmt_height t1.height
8051 + #define dmt_budmin t1.budmin
8052 +-#define dmt_stree t1.stree
8053 ++#define dmt_stree t2.stree
8054 +
8055 + /*
8056 + * on-disk aggregate disk allocation map descriptor.
8057 +diff --git a/fs/lockd/host.c b/fs/lockd/host.c
8058 +index f0b5c987d6ae1..3f6ba0cd2bd9c 100644
8059 +--- a/fs/lockd/host.c
8060 ++++ b/fs/lockd/host.c
8061 +@@ -432,12 +432,7 @@ nlm_bind_host(struct nlm_host *host)
8062 + * RPC rebind is required
8063 + */
8064 + if ((clnt = host->h_rpcclnt) != NULL) {
8065 +- if (time_after_eq(jiffies, host->h_nextrebind)) {
8066 +- rpc_force_rebind(clnt);
8067 +- host->h_nextrebind = jiffies + NLM_HOST_REBIND;
8068 +- dprintk("lockd: next rebind in %lu jiffies\n",
8069 +- host->h_nextrebind - jiffies);
8070 +- }
8071 ++ nlm_rebind_host(host);
8072 + } else {
8073 + unsigned long increment = nlmsvc_timeout;
8074 + struct rpc_timeout timeparms = {
8075 +@@ -485,13 +480,20 @@ nlm_bind_host(struct nlm_host *host)
8076 + return clnt;
8077 + }
8078 +
8079 +-/*
8080 +- * Force a portmap lookup of the remote lockd port
8081 ++/**
8082 ++ * nlm_rebind_host - If needed, force a portmap lookup of the peer's lockd port
8083 ++ * @host: NLM host handle for peer
8084 ++ *
8085 ++ * This is not needed when using a connection-oriented protocol, such as TCP.
8086 ++ * The existing autobind mechanism is sufficient to force a rebind when
8087 ++ * required, e.g. on connection state transitions.
8088 + */
8089 + void
8090 + nlm_rebind_host(struct nlm_host *host)
8091 + {
8092 +- dprintk("lockd: rebind host %s\n", host->h_name);
8093 ++ if (host->h_proto != IPPROTO_UDP)
8094 ++ return;
8095 ++
8096 + if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
8097 + rpc_force_rebind(host->h_rpcclnt);
8098 + host->h_nextrebind = jiffies + NLM_HOST_REBIND;
8099 +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
8100 +index e4cd3a2fe6989..aee66d8f13305 100644
8101 +--- a/fs/nfs/inode.c
8102 ++++ b/fs/nfs/inode.c
8103 +@@ -2142,7 +2142,7 @@ static int nfsiod_start(void)
8104 + {
8105 + struct workqueue_struct *wq;
8106 + dprintk("RPC: creating workqueue nfsiod\n");
8107 +- wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
8108 ++ wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
8109 + if (wq == NULL)
8110 + return -ENOMEM;
8111 + nfsiod_workqueue = wq;
8112 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
8113 +index fe7b42c277ac5..1a395647ae265 100644
8114 +--- a/fs/nfs/nfs4proc.c
8115 ++++ b/fs/nfs/nfs4proc.c
8116 +@@ -4687,12 +4687,12 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
8117 + u64 cookie, struct page **pages, unsigned int count, bool plus)
8118 + {
8119 + struct inode *dir = d_inode(dentry);
8120 ++ struct nfs_server *server = NFS_SERVER(dir);
8121 + struct nfs4_readdir_arg args = {
8122 + .fh = NFS_FH(dir),
8123 + .pages = pages,
8124 + .pgbase = 0,
8125 + .count = count,
8126 +- .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
8127 + .plus = plus,
8128 + };
8129 + struct nfs4_readdir_res res;
8130 +@@ -4707,9 +4707,15 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
8131 + dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
8132 + dentry,
8133 + (unsigned long long)cookie);
8134 ++ if (!(server->caps & NFS_CAP_SECURITY_LABEL))
8135 ++ args.bitmask = server->attr_bitmask_nl;
8136 ++ else
8137 ++ args.bitmask = server->attr_bitmask;
8138 ++
8139 + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
8140 + res.pgbase = args.pgbase;
8141 +- status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
8142 ++ status = nfs4_call_sync(server->client, server, &msg, &args.seq_args,
8143 ++ &res.seq_res, 0);
8144 + if (status >= 0) {
8145 + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
8146 + status += args.pgbase;
8147 +diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
8148 +index 5be08f02a76bc..4f90c444907f5 100644
8149 +--- a/fs/nfs_common/grace.c
8150 ++++ b/fs/nfs_common/grace.c
8151 +@@ -68,10 +68,14 @@ __state_in_grace(struct net *net, bool open)
8152 + if (!open)
8153 + return !list_empty(grace_list);
8154 +
8155 ++ spin_lock(&grace_lock);
8156 + list_for_each_entry(lm, grace_list, list) {
8157 +- if (lm->block_opens)
8158 ++ if (lm->block_opens) {
8159 ++ spin_unlock(&grace_lock);
8160 + return true;
8161 ++ }
8162 + }
8163 ++ spin_unlock(&grace_lock);
8164 + return false;
8165 + }
8166 +
8167 +diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
8168 +index 89cb484f1cfbe..ad38633392a0d 100644
8169 +--- a/fs/nfsd/nfssvc.c
8170 ++++ b/fs/nfsd/nfssvc.c
8171 +@@ -417,8 +417,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
8172 + return;
8173 +
8174 + nfsd_shutdown_net(net);
8175 +- printk(KERN_WARNING "nfsd: last server has exited, flushing export "
8176 +- "cache\n");
8177 ++ pr_info("nfsd: last server has exited, flushing export cache\n");
8178 + nfsd_export_flush(net);
8179 + }
8180 +
8181 +diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
8182 +index 5d4dc0f84f202..d99710270a373 100644
8183 +--- a/fs/quota/quota_v2.c
8184 ++++ b/fs/quota/quota_v2.c
8185 +@@ -158,6 +158,25 @@ static int v2_read_file_info(struct super_block *sb, int type)
8186 + qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk);
8187 + qinfo->dqi_ops = &v2r1_qtree_ops;
8188 + }
8189 ++ ret = -EUCLEAN;
8190 ++ /* Some sanity checks of the read headers... */
8191 ++ if ((loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits >
8192 ++ i_size_read(sb_dqopt(sb)->files[type])) {
8193 ++ quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
8194 ++ (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
8195 ++ i_size_read(sb_dqopt(sb)->files[type]));
8196 ++ goto out;
8197 ++ }
8198 ++ if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
8199 ++ quota_error(sb, "Free block number too big (%u >= %u).",
8200 ++ qinfo->dqi_free_blk, qinfo->dqi_blocks);
8201 ++ goto out;
8202 ++ }
8203 ++ if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
8204 ++ quota_error(sb, "Block with free entry too big (%u >= %u).",
8205 ++ qinfo->dqi_free_entry, qinfo->dqi_blocks);
8206 ++ goto out;
8207 ++ }
8208 + ret = 0;
8209 + out:
8210 + up_read(&dqopt->dqio_sem);
8211 +diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
8212 +index fab29f899f913..9542ebf643a5e 100644
8213 +--- a/fs/ubifs/io.c
8214 ++++ b/fs/ubifs/io.c
8215 +@@ -331,7 +331,7 @@ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad)
8216 + {
8217 + uint32_t crc;
8218 +
8219 +- ubifs_assert(c, pad >= 0 && !(pad & 7));
8220 ++ ubifs_assert(c, pad >= 0);
8221 +
8222 + if (pad >= UBIFS_PAD_NODE_SZ) {
8223 + struct ubifs_ch *ch = buf;
8224 +@@ -728,6 +728,10 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
8225 + * write-buffer.
8226 + */
8227 + memcpy(wbuf->buf + wbuf->used, buf, len);
8228 ++ if (aligned_len > len) {
8229 ++ ubifs_assert(c, aligned_len - len < 8);
8230 ++ ubifs_pad(c, wbuf->buf + wbuf->used + len, aligned_len - len);
8231 ++ }
8232 +
8233 + if (aligned_len == wbuf->avail) {
8234 + dbg_io("flush jhead %s wbuf to LEB %d:%d",
8235 +@@ -820,13 +824,18 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
8236 + }
8237 +
8238 + spin_lock(&wbuf->lock);
8239 +- if (aligned_len)
8240 ++ if (aligned_len) {
8241 + /*
8242 + * And now we have what's left and what does not take whole
8243 + * max. write unit, so write it to the write-buffer and we are
8244 + * done.
8245 + */
8246 + memcpy(wbuf->buf, buf + written, len);
8247 ++ if (aligned_len > len) {
8248 ++ ubifs_assert(c, aligned_len - len < 8);
8249 ++ ubifs_pad(c, wbuf->buf + len, aligned_len - len);
8250 ++ }
8251 ++ }
8252 +
8253 + if (c->leb_size - wbuf->offs >= c->max_write_size)
8254 + wbuf->size = c->max_write_size;
8255 +diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
8256 +index ba4dd54f2c821..d9773df60a360 100644
8257 +--- a/include/acpi/acpi_bus.h
8258 ++++ b/include/acpi/acpi_bus.h
8259 +@@ -622,7 +622,6 @@ acpi_status acpi_remove_pm_notifier(struct acpi_device *adev);
8260 + bool acpi_pm_device_can_wakeup(struct device *dev);
8261 + int acpi_pm_device_sleep_state(struct device *, int *, int);
8262 + int acpi_pm_set_device_wakeup(struct device *dev, bool enable);
8263 +-int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable);
8264 + #else
8265 + static inline void acpi_pm_wakeup_event(struct device *dev)
8266 + {
8267 +@@ -653,10 +652,6 @@ static inline int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
8268 + {
8269 + return -ENODEV;
8270 + }
8271 +-static inline int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
8272 +-{
8273 +- return -ENODEV;
8274 +-}
8275 + #endif
8276 +
8277 + #ifdef CONFIG_ACPI_SLEEP
8278 +diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
8279 +index 43d1fd50d433d..6099f754aad7c 100644
8280 +--- a/include/linux/build_bug.h
8281 ++++ b/include/linux/build_bug.h
8282 +@@ -80,4 +80,9 @@
8283 +
8284 + #endif /* __CHECKER__ */
8285 +
8286 ++#ifdef __GENKSYMS__
8287 ++/* genksyms gets confused by _Static_assert */
8288 ++#define _Static_assert(expr, ...)
8289 ++#endif
8290 ++
8291 + #endif /* _LINUX_BUILD_BUG_H */
8292 +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
8293 +index d756f2318efe0..2d6e5e4bb5d93 100644
8294 +--- a/include/linux/compiler-clang.h
8295 ++++ b/include/linux/compiler-clang.h
8296 +@@ -39,7 +39,6 @@
8297 + * and may be redefined here because they should not be shared with other
8298 + * compilers, like ICC.
8299 + */
8300 +-#define barrier() __asm__ __volatile__("" : : : "memory")
8301 + #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
8302 + #define __assume_aligned(a, ...) \
8303 + __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
8304 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
8305 +index 3ebee1ce6f982..14be095371093 100644
8306 +--- a/include/linux/compiler-gcc.h
8307 ++++ b/include/linux/compiler-gcc.h
8308 +@@ -14,25 +14,6 @@
8309 + # error Sorry, your compiler is too old - please upgrade it.
8310 + #endif
8311 +
8312 +-/* Optimization barrier */
8313 +-
8314 +-/* The "volatile" is due to gcc bugs */
8315 +-#define barrier() __asm__ __volatile__("": : :"memory")
8316 +-/*
8317 +- * This version is i.e. to prevent dead stores elimination on @ptr
8318 +- * where gcc and llvm may behave differently when otherwise using
8319 +- * normal barrier(): while gcc behavior gets along with a normal
8320 +- * barrier(), llvm needs an explicit input variable to be assumed
8321 +- * clobbered. The issue is as follows: while the inline asm might
8322 +- * access any memory it wants, the compiler could have fit all of
8323 +- * @ptr into memory registers instead, and since @ptr never escaped
8324 +- * from that, it proved that the inline asm wasn't touching any of
8325 +- * it. This version works well with both compilers, i.e. we're telling
8326 +- * the compiler that the inline asm absolutely may see the contents
8327 +- * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
8328 +- */
8329 +-#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
8330 +-
8331 + /*
8332 + * This macro obfuscates arithmetic on a variable address so that gcc
8333 + * shouldn't recognize the original var, and make assumptions about it.
8334 +diff --git a/include/linux/compiler.h b/include/linux/compiler.h
8335 +index fbb6490c1e094..6b6505e3b2c70 100644
8336 +--- a/include/linux/compiler.h
8337 ++++ b/include/linux/compiler.h
8338 +@@ -79,11 +79,25 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
8339 +
8340 + /* Optimization barrier */
8341 + #ifndef barrier
8342 +-# define barrier() __memory_barrier()
8343 ++/* The "volatile" is due to gcc bugs */
8344 ++# define barrier() __asm__ __volatile__("": : :"memory")
8345 + #endif
8346 +
8347 + #ifndef barrier_data
8348 +-# define barrier_data(ptr) barrier()
8349 ++/*
8350 ++ * This version is i.e. to prevent dead stores elimination on @ptr
8351 ++ * where gcc and llvm may behave differently when otherwise using
8352 ++ * normal barrier(): while gcc behavior gets along with a normal
8353 ++ * barrier(), llvm needs an explicit input variable to be assumed
8354 ++ * clobbered. The issue is as follows: while the inline asm might
8355 ++ * access any memory it wants, the compiler could have fit all of
8356 ++ * @ptr into memory registers instead, and since @ptr never escaped
8357 ++ * from that, it proved that the inline asm wasn't touching any of
8358 ++ * it. This version works well with both compilers, i.e. we're telling
8359 ++ * the compiler that the inline asm absolutely may see the contents
8360 ++ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
8361 ++ */
8362 ++# define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
8363 + #endif
8364 +
8365 + /* workaround for GCC PR82365 if needed */
8366 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
8367 +index 9077b3ebea08c..728d7716bf4f4 100644
8368 +--- a/include/linux/netfilter/x_tables.h
8369 ++++ b/include/linux/netfilter/x_tables.h
8370 +@@ -227,7 +227,7 @@ struct xt_table {
8371 + unsigned int valid_hooks;
8372 +
8373 + /* Man behind the curtain... */
8374 +- struct xt_table_info *private;
8375 ++ struct xt_table_info __rcu *private;
8376 +
8377 + /* Set this to THIS_MODULE if you are a module, otherwise NULL */
8378 + struct module *me;
8379 +@@ -449,6 +449,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
8380 +
8381 + struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
8382 +
8383 ++struct xt_table_info
8384 ++*xt_table_get_private_protected(const struct xt_table *table);
8385 ++
8386 + #ifdef CONFIG_COMPAT
8387 + #include <net/compat.h>
8388 +
8389 +diff --git a/include/linux/security.h b/include/linux/security.h
8390 +index d2240605edc46..454cc963d1457 100644
8391 +--- a/include/linux/security.h
8392 ++++ b/include/linux/security.h
8393 +@@ -787,7 +787,7 @@ static inline int security_inode_killpriv(struct dentry *dentry)
8394 +
8395 + static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
8396 + {
8397 +- return -EOPNOTSUPP;
8398 ++ return cap_inode_getsecurity(inode, name, buffer, alloc);
8399 + }
8400 +
8401 + static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
8402 +diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
8403 +index aa5deb041c25d..7cc952282e8be 100644
8404 +--- a/include/linux/seq_buf.h
8405 ++++ b/include/linux/seq_buf.h
8406 +@@ -30,7 +30,7 @@ static inline void seq_buf_clear(struct seq_buf *s)
8407 + }
8408 +
8409 + static inline void
8410 +-seq_buf_init(struct seq_buf *s, unsigned char *buf, unsigned int size)
8411 ++seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
8412 + {
8413 + s->buffer = buf;
8414 + s->size = size;
8415 +diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
8416 +index e7bbd82908b10..69fed13e633b7 100644
8417 +--- a/include/linux/sunrpc/xprt.h
8418 ++++ b/include/linux/sunrpc/xprt.h
8419 +@@ -317,6 +317,7 @@ struct xprt_class {
8420 + struct rpc_xprt * (*setup)(struct xprt_create *);
8421 + struct module *owner;
8422 + char name[32];
8423 ++ const char * netid[];
8424 + };
8425 +
8426 + /*
8427 +diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
8428 +index 6609b39a72326..6db257466af68 100644
8429 +--- a/include/linux/trace_seq.h
8430 ++++ b/include/linux/trace_seq.h
8431 +@@ -12,7 +12,7 @@
8432 + */
8433 +
8434 + struct trace_seq {
8435 +- unsigned char buffer[PAGE_SIZE];
8436 ++ char buffer[PAGE_SIZE];
8437 + struct seq_buf seq;
8438 + int full;
8439 + };
8440 +@@ -51,7 +51,7 @@ static inline int trace_seq_used(struct trace_seq *s)
8441 + * that is about to be written to and then return the result
8442 + * of that write.
8443 + */
8444 +-static inline unsigned char *
8445 ++static inline char *
8446 + trace_seq_buffer_ptr(struct trace_seq *s)
8447 + {
8448 + return s->buffer + seq_buf_used(&s->seq);
8449 +diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
8450 +index 000a5954b2e89..a7f7ebdd3069e 100644
8451 +--- a/include/linux/usb_usual.h
8452 ++++ b/include/linux/usb_usual.h
8453 +@@ -84,6 +84,8 @@
8454 + /* Cannot handle REPORT_LUNS */ \
8455 + US_FLAG(ALWAYS_SYNC, 0x20000000) \
8456 + /* lies about caching, so always sync */ \
8457 ++ US_FLAG(NO_SAME, 0x40000000) \
8458 ++ /* Cannot handle WRITE_SAME */ \
8459 +
8460 + #define US_FLAG(name, value) US_FL_##name = value ,
8461 + enum { US_DO_ALL_FLAGS };
8462 +diff --git a/include/uapi/linux/if_alg.h b/include/uapi/linux/if_alg.h
8463 +index bc2bcdec377b4..7690507714231 100644
8464 +--- a/include/uapi/linux/if_alg.h
8465 ++++ b/include/uapi/linux/if_alg.h
8466 +@@ -24,6 +24,22 @@ struct sockaddr_alg {
8467 + __u8 salg_name[64];
8468 + };
8469 +
8470 ++/*
8471 ++ * Linux v4.12 and later removed the 64-byte limit on salg_name[]; it's now an
8472 ++ * arbitrary-length field. We had to keep the original struct above for source
8473 ++ * compatibility with existing userspace programs, though. Use the new struct
8474 ++ * below if support for very long algorithm names is needed. To do this,
8475 ++ * allocate 'sizeof(struct sockaddr_alg_new) + strlen(algname) + 1' bytes, and
8476 ++ * copy algname (including the null terminator) into salg_name.
8477 ++ */
8478 ++struct sockaddr_alg_new {
8479 ++ __u16 salg_family;
8480 ++ __u8 salg_type[14];
8481 ++ __u32 salg_feat;
8482 ++ __u32 salg_mask;
8483 ++ __u8 salg_name[];
8484 ++};
8485 ++
8486 + struct af_alg_iv {
8487 + __u32 ivlen;
8488 + __u8 iv[0];
8489 +diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
8490 +index 869c816d5f8c3..eba01ab5a55e0 100644
8491 +--- a/include/xen/xenbus.h
8492 ++++ b/include/xen/xenbus.h
8493 +@@ -59,6 +59,15 @@ struct xenbus_watch
8494 + /* Path being watched. */
8495 + const char *node;
8496 +
8497 ++ unsigned int nr_pending;
8498 ++
8499 ++ /*
8500 ++ * Called just before enqueing new event while a spinlock is held.
8501 ++ * The event will be discarded if this callback returns false.
8502 ++ */
8503 ++ bool (*will_handle)(struct xenbus_watch *,
8504 ++ const char *path, const char *token);
8505 ++
8506 + /* Callback (executed in a process context with no locks held). */
8507 + void (*callback)(struct xenbus_watch *,
8508 + const char *path, const char *token);
8509 +@@ -192,10 +201,14 @@ void xenbus_probe(struct work_struct *);
8510 +
8511 + int xenbus_watch_path(struct xenbus_device *dev, const char *path,
8512 + struct xenbus_watch *watch,
8513 ++ bool (*will_handle)(struct xenbus_watch *,
8514 ++ const char *, const char *),
8515 + void (*callback)(struct xenbus_watch *,
8516 + const char *, const char *));
8517 +-__printf(4, 5)
8518 ++__printf(5, 6)
8519 + int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch,
8520 ++ bool (*will_handle)(struct xenbus_watch *,
8521 ++ const char *, const char *),
8522 + void (*callback)(struct xenbus_watch *,
8523 + const char *, const char *),
8524 + const char *pathfmt, ...);
8525 +diff --git a/kernel/cpu.c b/kernel/cpu.c
8526 +index 08b9d6ba0807f..9a39a24f60253 100644
8527 +--- a/kernel/cpu.c
8528 ++++ b/kernel/cpu.c
8529 +@@ -776,6 +776,10 @@ void __init cpuhp_threads_init(void)
8530 + }
8531 +
8532 + #ifdef CONFIG_HOTPLUG_CPU
8533 ++#ifndef arch_clear_mm_cpumask_cpu
8534 ++#define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
8535 ++#endif
8536 ++
8537 + /**
8538 + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
8539 + * @cpu: a CPU id
8540 +@@ -811,7 +815,7 @@ void clear_tasks_mm_cpumask(int cpu)
8541 + t = find_lock_task_mm(p);
8542 + if (!t)
8543 + continue;
8544 +- cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
8545 ++ arch_clear_mm_cpumask_cpu(cpu, t->mm);
8546 + task_unlock(t);
8547 + }
8548 + rcu_read_unlock();
8549 +diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
8550 +index 0a76c44eb6b29..1e42fc2ad4d57 100644
8551 +--- a/kernel/irq/irqdomain.c
8552 ++++ b/kernel/irq/irqdomain.c
8553 +@@ -1247,8 +1247,15 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
8554 + unsigned int irq_base,
8555 + unsigned int nr_irqs)
8556 + {
8557 +- if (domain->ops->free)
8558 +- domain->ops->free(domain, irq_base, nr_irqs);
8559 ++ unsigned int i;
8560 ++
8561 ++ if (!domain->ops->free)
8562 ++ return;
8563 ++
8564 ++ for (i = 0; i < nr_irqs; i++) {
8565 ++ if (irq_domain_get_irq_data(domain, irq_base + i))
8566 ++ domain->ops->free(domain, irq_base + i, 1);
8567 ++ }
8568 + }
8569 +
8570 + int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
8571 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8572 +index b166320f7633e..013b1c6cb4ed9 100644
8573 +--- a/kernel/sched/core.c
8574 ++++ b/kernel/sched/core.c
8575 +@@ -4984,12 +4984,8 @@ static void do_sched_yield(void)
8576 + schedstat_inc(rq->yld_count);
8577 + current->sched_class->yield_task(rq);
8578 +
8579 +- /*
8580 +- * Since we are going to call schedule() anyway, there's
8581 +- * no need to preempt or enable interrupts:
8582 +- */
8583 + preempt_disable();
8584 +- rq_unlock(rq, &rf);
8585 ++ rq_unlock_irq(rq, &rf);
8586 + sched_preempt_enable_no_resched();
8587 +
8588 + schedule();
8589 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
8590 +index 8aecfb143859d..aa592dc3cb401 100644
8591 +--- a/kernel/sched/deadline.c
8592 ++++ b/kernel/sched/deadline.c
8593 +@@ -2464,7 +2464,7 @@ int sched_dl_global_validate(void)
8594 + u64 period = global_rt_period();
8595 + u64 new_bw = to_ratio(period, runtime);
8596 + struct dl_bw *dl_b;
8597 +- int cpu, ret = 0;
8598 ++ int cpu, cpus, ret = 0;
8599 + unsigned long flags;
8600 +
8601 + /*
8602 +@@ -2479,9 +2479,10 @@ int sched_dl_global_validate(void)
8603 + for_each_possible_cpu(cpu) {
8604 + rcu_read_lock_sched();
8605 + dl_b = dl_bw_of(cpu);
8606 ++ cpus = dl_bw_cpus(cpu);
8607 +
8608 + raw_spin_lock_irqsave(&dl_b->lock, flags);
8609 +- if (new_bw < dl_b->total_bw)
8610 ++ if (new_bw * cpus < dl_b->total_bw)
8611 + ret = -EBUSY;
8612 + raw_spin_unlock_irqrestore(&dl_b->lock, flags);
8613 +
8614 +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
8615 +index 41b7954be68b7..7b7ba91e319bb 100644
8616 +--- a/kernel/sched/sched.h
8617 ++++ b/kernel/sched/sched.h
8618 +@@ -247,30 +247,6 @@ struct rt_bandwidth {
8619 +
8620 + void __dl_clear_params(struct task_struct *p);
8621 +
8622 +-/*
8623 +- * To keep the bandwidth of -deadline tasks and groups under control
8624 +- * we need some place where:
8625 +- * - store the maximum -deadline bandwidth of the system (the group);
8626 +- * - cache the fraction of that bandwidth that is currently allocated.
8627 +- *
8628 +- * This is all done in the data structure below. It is similar to the
8629 +- * one used for RT-throttling (rt_bandwidth), with the main difference
8630 +- * that, since here we are only interested in admission control, we
8631 +- * do not decrease any runtime while the group "executes", neither we
8632 +- * need a timer to replenish it.
8633 +- *
8634 +- * With respect to SMP, the bandwidth is given on a per-CPU basis,
8635 +- * meaning that:
8636 +- * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
8637 +- * - dl_total_bw array contains, in the i-eth element, the currently
8638 +- * allocated bandwidth on the i-eth CPU.
8639 +- * Moreover, groups consume bandwidth on each CPU, while tasks only
8640 +- * consume bandwidth on the CPU they're running on.
8641 +- * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
8642 +- * that will be shown the next time the proc or cgroup controls will
8643 +- * be red. It on its turn can be changed by writing on its own
8644 +- * control.
8645 +- */
8646 + struct dl_bandwidth {
8647 + raw_spinlock_t dl_runtime_lock;
8648 + u64 dl_runtime;
8649 +@@ -282,6 +258,24 @@ static inline int dl_bandwidth_enabled(void)
8650 + return sysctl_sched_rt_runtime >= 0;
8651 + }
8652 +
8653 ++/*
8654 ++ * To keep the bandwidth of -deadline tasks under control
8655 ++ * we need some place where:
8656 ++ * - store the maximum -deadline bandwidth of each cpu;
8657 ++ * - cache the fraction of bandwidth that is currently allocated in
8658 ++ * each root domain;
8659 ++ *
8660 ++ * This is all done in the data structure below. It is similar to the
8661 ++ * one used for RT-throttling (rt_bandwidth), with the main difference
8662 ++ * that, since here we are only interested in admission control, we
8663 ++ * do not decrease any runtime while the group "executes", neither we
8664 ++ * need a timer to replenish it.
8665 ++ *
8666 ++ * With respect to SMP, bandwidth is given on a per root domain basis,
8667 ++ * meaning that:
8668 ++ * - bw (< 100%) is the deadline bandwidth of each CPU;
8669 ++ * - total_bw is the currently allocated bandwidth in each root domain;
8670 ++ */
8671 + struct dl_bw {
8672 + raw_spinlock_t lock;
8673 + u64 bw;
8674 +diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
8675 +index d98d8e78b7363..b58afd2d5ebf4 100644
8676 +--- a/net/bluetooth/hci_event.c
8677 ++++ b/net/bluetooth/hci_event.c
8678 +@@ -4672,6 +4672,11 @@ static void hci_phy_link_complete_evt(struct hci_dev *hdev,
8679 + return;
8680 + }
8681 +
8682 ++ if (!hcon->amp_mgr) {
8683 ++ hci_dev_unlock(hdev);
8684 ++ return;
8685 ++ }
8686 ++
8687 + if (ev->status) {
8688 + hci_conn_del(hcon);
8689 + hci_dev_unlock(hdev);
8690 +@@ -5596,21 +5601,19 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
8691 + struct sk_buff *skb)
8692 + {
8693 + u8 num_reports = skb->data[0];
8694 +- void *ptr = &skb->data[1];
8695 ++ struct hci_ev_le_direct_adv_info *ev = (void *)&skb->data[1];
8696 +
8697 +- hci_dev_lock(hdev);
8698 ++ if (!num_reports || skb->len < num_reports * sizeof(*ev) + 1)
8699 ++ return;
8700 +
8701 +- while (num_reports--) {
8702 +- struct hci_ev_le_direct_adv_info *ev = ptr;
8703 ++ hci_dev_lock(hdev);
8704 +
8705 ++ for (; num_reports; num_reports--, ev++)
8706 + process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
8707 + ev->bdaddr_type, &ev->direct_addr,
8708 + ev->direct_addr_type, ev->rssi, NULL, 0,
8709 + false);
8710 +
8711 +- ptr += sizeof(*ev);
8712 +- }
8713 +-
8714 + hci_dev_unlock(hdev);
8715 + }
8716 +
8717 +diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
8718 +index 5f3950f00f73b..a82d0021d4617 100644
8719 +--- a/net/bridge/br_vlan.c
8720 ++++ b/net/bridge/br_vlan.c
8721 +@@ -242,8 +242,10 @@ static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
8722 + }
8723 +
8724 + masterv = br_vlan_get_master(br, v->vid);
8725 +- if (!masterv)
8726 ++ if (!masterv) {
8727 ++ err = -ENOMEM;
8728 + goto out_filt;
8729 ++ }
8730 + v->brvlan = masterv;
8731 + v->stats = masterv->stats;
8732 + } else {
8733 +diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
8734 +index a648568c5e8fe..4a5f4fbffd836 100644
8735 +--- a/net/core/lwt_bpf.c
8736 ++++ b/net/core/lwt_bpf.c
8737 +@@ -44,12 +44,11 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
8738 + {
8739 + int ret;
8740 +
8741 +- /* Preempt disable is needed to protect per-cpu redirect_info between
8742 +- * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
8743 +- * access to maps strictly require a rcu_read_lock() for protection,
8744 +- * mixing with BH RCU lock doesn't work.
8745 ++ /* Preempt disable and BH disable are needed to protect per-cpu
8746 ++ * redirect_info between BPF prog and skb_do_redirect().
8747 + */
8748 + preempt_disable();
8749 ++ local_bh_disable();
8750 + bpf_compute_data_pointers(skb);
8751 + ret = bpf_prog_run_save_cb(lwt->prog, skb);
8752 +
8753 +@@ -82,6 +81,7 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
8754 + break;
8755 + }
8756 +
8757 ++ local_bh_enable();
8758 + preempt_enable();
8759 +
8760 + return ret;
8761 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
8762 +index 10d8f95eb7712..ca20efe775ee4 100644
8763 +--- a/net/ipv4/netfilter/arp_tables.c
8764 ++++ b/net/ipv4/netfilter/arp_tables.c
8765 +@@ -202,7 +202,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
8766 +
8767 + local_bh_disable();
8768 + addend = xt_write_recseq_begin();
8769 +- private = READ_ONCE(table->private); /* Address dependency. */
8770 ++ private = rcu_access_pointer(table->private);
8771 + cpu = smp_processor_id();
8772 + table_base = private->entries;
8773 + jumpstack = (struct arpt_entry **)private->jumpstack[cpu];
8774 +@@ -648,7 +648,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
8775 + {
8776 + unsigned int countersize;
8777 + struct xt_counters *counters;
8778 +- const struct xt_table_info *private = table->private;
8779 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8780 +
8781 + /* We need atomic snapshot of counters: rest doesn't change
8782 + * (other than comefrom, which userspace doesn't care
8783 +@@ -672,7 +672,7 @@ static int copy_entries_to_user(unsigned int total_size,
8784 + unsigned int off, num;
8785 + const struct arpt_entry *e;
8786 + struct xt_counters *counters;
8787 +- struct xt_table_info *private = table->private;
8788 ++ struct xt_table_info *private = xt_table_get_private_protected(table);
8789 + int ret = 0;
8790 + void *loc_cpu_entry;
8791 +
8792 +@@ -807,7 +807,7 @@ static int get_info(struct net *net, void __user *user,
8793 + t = xt_request_find_table_lock(net, NFPROTO_ARP, name);
8794 + if (!IS_ERR(t)) {
8795 + struct arpt_getinfo info;
8796 +- const struct xt_table_info *private = t->private;
8797 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
8798 + #ifdef CONFIG_COMPAT
8799 + struct xt_table_info tmp;
8800 +
8801 +@@ -860,7 +860,7 @@ static int get_entries(struct net *net, struct arpt_get_entries __user *uptr,
8802 +
8803 + t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
8804 + if (!IS_ERR(t)) {
8805 +- const struct xt_table_info *private = t->private;
8806 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
8807 +
8808 + if (get.size == private->size)
8809 + ret = copy_entries_to_user(private->size,
8810 +@@ -1019,7 +1019,7 @@ static int do_add_counters(struct net *net, const void __user *user,
8811 + }
8812 +
8813 + local_bh_disable();
8814 +- private = t->private;
8815 ++ private = xt_table_get_private_protected(t);
8816 + if (private->number != tmp.num_counters) {
8817 + ret = -EINVAL;
8818 + goto unlock_up_free;
8819 +@@ -1356,7 +1356,7 @@ static int compat_copy_entries_to_user(unsigned int total_size,
8820 + void __user *userptr)
8821 + {
8822 + struct xt_counters *counters;
8823 +- const struct xt_table_info *private = table->private;
8824 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8825 + void __user *pos;
8826 + unsigned int size;
8827 + int ret = 0;
8828 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
8829 +index e77872c93c206..115d48049686f 100644
8830 +--- a/net/ipv4/netfilter/ip_tables.c
8831 ++++ b/net/ipv4/netfilter/ip_tables.c
8832 +@@ -261,7 +261,7 @@ ipt_do_table(struct sk_buff *skb,
8833 + WARN_ON(!(table->valid_hooks & (1 << hook)));
8834 + local_bh_disable();
8835 + addend = xt_write_recseq_begin();
8836 +- private = READ_ONCE(table->private); /* Address dependency. */
8837 ++ private = rcu_access_pointer(table->private);
8838 + cpu = smp_processor_id();
8839 + table_base = private->entries;
8840 + jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
8841 +@@ -794,7 +794,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
8842 + {
8843 + unsigned int countersize;
8844 + struct xt_counters *counters;
8845 +- const struct xt_table_info *private = table->private;
8846 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8847 +
8848 + /* We need atomic snapshot of counters: rest doesn't change
8849 + (other than comefrom, which userspace doesn't care
8850 +@@ -818,7 +818,7 @@ copy_entries_to_user(unsigned int total_size,
8851 + unsigned int off, num;
8852 + const struct ipt_entry *e;
8853 + struct xt_counters *counters;
8854 +- const struct xt_table_info *private = table->private;
8855 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8856 + int ret = 0;
8857 + const void *loc_cpu_entry;
8858 +
8859 +@@ -968,7 +968,7 @@ static int get_info(struct net *net, void __user *user,
8860 + t = xt_request_find_table_lock(net, AF_INET, name);
8861 + if (!IS_ERR(t)) {
8862 + struct ipt_getinfo info;
8863 +- const struct xt_table_info *private = t->private;
8864 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
8865 + #ifdef CONFIG_COMPAT
8866 + struct xt_table_info tmp;
8867 +
8868 +@@ -1022,7 +1022,7 @@ get_entries(struct net *net, struct ipt_get_entries __user *uptr,
8869 +
8870 + t = xt_find_table_lock(net, AF_INET, get.name);
8871 + if (!IS_ERR(t)) {
8872 +- const struct xt_table_info *private = t->private;
8873 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
8874 + if (get.size == private->size)
8875 + ret = copy_entries_to_user(private->size,
8876 + t, uptr->entrytable);
8877 +@@ -1178,7 +1178,7 @@ do_add_counters(struct net *net, const void __user *user,
8878 + }
8879 +
8880 + local_bh_disable();
8881 +- private = t->private;
8882 ++ private = xt_table_get_private_protected(t);
8883 + if (private->number != tmp.num_counters) {
8884 + ret = -EINVAL;
8885 + goto unlock_up_free;
8886 +@@ -1573,7 +1573,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
8887 + void __user *userptr)
8888 + {
8889 + struct xt_counters *counters;
8890 +- const struct xt_table_info *private = table->private;
8891 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8892 + void __user *pos;
8893 + unsigned int size;
8894 + int ret = 0;
8895 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
8896 +index 686833dfaa7fd..aa025cfda77bf 100644
8897 +--- a/net/ipv4/tcp_input.c
8898 ++++ b/net/ipv4/tcp_input.c
8899 +@@ -439,7 +439,6 @@ void tcp_init_buffer_space(struct sock *sk)
8900 + if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
8901 + tcp_sndbuf_expand(sk);
8902 +
8903 +- tp->rcvq_space.space = min_t(u32, tp->rcv_wnd, TCP_INIT_CWND * tp->advmss);
8904 + tcp_mstamp_refresh(tp);
8905 + tp->rcvq_space.time = tp->tcp_mstamp;
8906 + tp->rcvq_space.seq = tp->copied_seq;
8907 +@@ -463,6 +462,8 @@ void tcp_init_buffer_space(struct sock *sk)
8908 +
8909 + tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
8910 + tp->snd_cwnd_stamp = tcp_jiffies32;
8911 ++ tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd,
8912 ++ (u32)TCP_INIT_CWND * tp->advmss);
8913 + }
8914 +
8915 + /* 4. Recalculate window clamp after socket hit its memory bounds. */
8916 +diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
8917 +index 74fb211e0ea61..3cfefec819758 100644
8918 +--- a/net/ipv4/tcp_output.c
8919 ++++ b/net/ipv4/tcp_output.c
8920 +@@ -1622,7 +1622,8 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
8921 + * window, and remember whether we were cwnd-limited then.
8922 + */
8923 + if (!before(tp->snd_una, tp->max_packets_seq) ||
8924 +- tp->packets_out > tp->max_packets_out) {
8925 ++ tp->packets_out > tp->max_packets_out ||
8926 ++ is_cwnd_limited) {
8927 + tp->max_packets_out = tp->packets_out;
8928 + tp->max_packets_seq = tp->snd_nxt;
8929 + tp->is_cwnd_limited = is_cwnd_limited;
8930 +@@ -2407,6 +2408,10 @@ repair:
8931 + else
8932 + tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
8933 +
8934 ++ is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
8935 ++ if (likely(sent_pkts || is_cwnd_limited))
8936 ++ tcp_cwnd_validate(sk, is_cwnd_limited);
8937 ++
8938 + if (likely(sent_pkts)) {
8939 + if (tcp_in_cwnd_reduction(sk))
8940 + tp->prr_out += sent_pkts;
8941 +@@ -2414,8 +2419,6 @@ repair:
8942 + /* Send one loss probe per tail loss episode. */
8943 + if (push_one != 2)
8944 + tcp_schedule_loss_probe(sk, false);
8945 +- is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
8946 +- tcp_cwnd_validate(sk, is_cwnd_limited);
8947 + return false;
8948 + }
8949 + return !tp->packets_out && !tcp_write_queue_empty(sk);
8950 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
8951 +index daf2e9e9193d1..b1441349e1517 100644
8952 +--- a/net/ipv6/netfilter/ip6_tables.c
8953 ++++ b/net/ipv6/netfilter/ip6_tables.c
8954 +@@ -283,7 +283,7 @@ ip6t_do_table(struct sk_buff *skb,
8955 +
8956 + local_bh_disable();
8957 + addend = xt_write_recseq_begin();
8958 +- private = READ_ONCE(table->private); /* Address dependency. */
8959 ++ private = rcu_access_pointer(table->private);
8960 + cpu = smp_processor_id();
8961 + table_base = private->entries;
8962 + jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
8963 +@@ -810,7 +810,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
8964 + {
8965 + unsigned int countersize;
8966 + struct xt_counters *counters;
8967 +- const struct xt_table_info *private = table->private;
8968 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8969 +
8970 + /* We need atomic snapshot of counters: rest doesn't change
8971 + (other than comefrom, which userspace doesn't care
8972 +@@ -834,7 +834,7 @@ copy_entries_to_user(unsigned int total_size,
8973 + unsigned int off, num;
8974 + const struct ip6t_entry *e;
8975 + struct xt_counters *counters;
8976 +- const struct xt_table_info *private = table->private;
8977 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
8978 + int ret = 0;
8979 + const void *loc_cpu_entry;
8980 +
8981 +@@ -984,7 +984,7 @@ static int get_info(struct net *net, void __user *user,
8982 + t = xt_request_find_table_lock(net, AF_INET6, name);
8983 + if (!IS_ERR(t)) {
8984 + struct ip6t_getinfo info;
8985 +- const struct xt_table_info *private = t->private;
8986 ++ const struct xt_table_info *private = xt_table_get_private_protected(t);
8987 + #ifdef CONFIG_COMPAT
8988 + struct xt_table_info tmp;
8989 +
8990 +@@ -1039,7 +1039,7 @@ get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
8991 +
8992 + t = xt_find_table_lock(net, AF_INET6, get.name);
8993 + if (!IS_ERR(t)) {
8994 +- struct xt_table_info *private = t->private;
8995 ++ struct xt_table_info *private = xt_table_get_private_protected(t);
8996 + if (get.size == private->size)
8997 + ret = copy_entries_to_user(private->size,
8998 + t, uptr->entrytable);
8999 +@@ -1194,7 +1194,7 @@ do_add_counters(struct net *net, const void __user *user, unsigned int len,
9000 + }
9001 +
9002 + local_bh_disable();
9003 +- private = t->private;
9004 ++ private = xt_table_get_private_protected(t);
9005 + if (private->number != tmp.num_counters) {
9006 + ret = -EINVAL;
9007 + goto unlock_up_free;
9008 +@@ -1582,7 +1582,7 @@ compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
9009 + void __user *userptr)
9010 + {
9011 + struct xt_counters *counters;
9012 +- const struct xt_table_info *private = table->private;
9013 ++ const struct xt_table_info *private = xt_table_get_private_protected(table);
9014 + void __user *pos;
9015 + unsigned int size;
9016 + int ret = 0;
9017 +diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
9018 +index 4fc720c77e37e..6dc5f93b1e4d1 100644
9019 +--- a/net/mac80211/mesh_pathtbl.c
9020 ++++ b/net/mac80211/mesh_pathtbl.c
9021 +@@ -63,6 +63,7 @@ static struct mesh_table *mesh_table_alloc(void)
9022 + atomic_set(&newtbl->entries, 0);
9023 + spin_lock_init(&newtbl->gates_lock);
9024 + spin_lock_init(&newtbl->walk_lock);
9025 ++ rhashtable_init(&newtbl->rhead, &mesh_rht_params);
9026 +
9027 + return newtbl;
9028 + }
9029 +@@ -786,9 +787,6 @@ int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
9030 + goto free_path;
9031 + }
9032 +
9033 +- rhashtable_init(&tbl_path->rhead, &mesh_rht_params);
9034 +- rhashtable_init(&tbl_mpp->rhead, &mesh_rht_params);
9035 +-
9036 + sdata->u.mesh.mesh_paths = tbl_path;
9037 + sdata->u.mesh.mpp_paths = tbl_mpp;
9038 +
9039 +diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
9040 +index 4d154efb80c88..d691c2f2e92e7 100644
9041 +--- a/net/mac80211/vht.c
9042 ++++ b/net/mac80211/vht.c
9043 +@@ -421,12 +421,18 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta)
9044 + * IEEE80211-2016 specification makes higher bandwidth operation
9045 + * possible on the TDLS link if the peers have wider bandwidth
9046 + * capability.
9047 ++ *
9048 ++ * However, in this case, and only if the TDLS peer is authorized,
9049 ++ * limit to the tdls_chandef so that the configuration here isn't
9050 ++ * wider than what's actually requested on the channel context.
9051 + */
9052 + if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
9053 +- test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW))
9054 +- return bw;
9055 +-
9056 +- bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
9057 ++ test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) &&
9058 ++ test_sta_flag(sta, WLAN_STA_AUTHORIZED) &&
9059 ++ sta->tdls_chandef.chan)
9060 ++ bw = min(bw, ieee80211_chan_width_to_rx_bw(sta->tdls_chandef.width));
9061 ++ else
9062 ++ bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width));
9063 +
9064 + return bw;
9065 + }
9066 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
9067 +index 3bab89dbc3717..6a7d0303d058f 100644
9068 +--- a/net/netfilter/x_tables.c
9069 ++++ b/net/netfilter/x_tables.c
9070 +@@ -1354,6 +1354,14 @@ struct xt_counters *xt_counters_alloc(unsigned int counters)
9071 + }
9072 + EXPORT_SYMBOL(xt_counters_alloc);
9073 +
9074 ++struct xt_table_info
9075 ++*xt_table_get_private_protected(const struct xt_table *table)
9076 ++{
9077 ++ return rcu_dereference_protected(table->private,
9078 ++ mutex_is_locked(&xt[table->af].mutex));
9079 ++}
9080 ++EXPORT_SYMBOL(xt_table_get_private_protected);
9081 ++
9082 + struct xt_table_info *
9083 + xt_replace_table(struct xt_table *table,
9084 + unsigned int num_counters,
9085 +@@ -1361,7 +1369,6 @@ xt_replace_table(struct xt_table *table,
9086 + int *error)
9087 + {
9088 + struct xt_table_info *private;
9089 +- unsigned int cpu;
9090 + int ret;
9091 +
9092 + ret = xt_jumpstack_alloc(newinfo);
9093 +@@ -1371,47 +1378,20 @@ xt_replace_table(struct xt_table *table,
9094 + }
9095 +
9096 + /* Do the substitution. */
9097 +- local_bh_disable();
9098 +- private = table->private;
9099 ++ private = xt_table_get_private_protected(table);
9100 +
9101 + /* Check inside lock: is the old number correct? */
9102 + if (num_counters != private->number) {
9103 + pr_debug("num_counters != table->private->number (%u/%u)\n",
9104 + num_counters, private->number);
9105 +- local_bh_enable();
9106 + *error = -EAGAIN;
9107 + return NULL;
9108 + }
9109 +
9110 + newinfo->initial_entries = private->initial_entries;
9111 +- /*
9112 +- * Ensure contents of newinfo are visible before assigning to
9113 +- * private.
9114 +- */
9115 +- smp_wmb();
9116 +- table->private = newinfo;
9117 +-
9118 +- /* make sure all cpus see new ->private value */
9119 +- smp_wmb();
9120 +
9121 +- /*
9122 +- * Even though table entries have now been swapped, other CPU's
9123 +- * may still be using the old entries...
9124 +- */
9125 +- local_bh_enable();
9126 +-
9127 +- /* ... so wait for even xt_recseq on all cpus */
9128 +- for_each_possible_cpu(cpu) {
9129 +- seqcount_t *s = &per_cpu(xt_recseq, cpu);
9130 +- u32 seq = raw_read_seqcount(s);
9131 +-
9132 +- if (seq & 1) {
9133 +- do {
9134 +- cond_resched();
9135 +- cpu_relax();
9136 +- } while (seq == raw_read_seqcount(s));
9137 +- }
9138 +- }
9139 ++ rcu_assign_pointer(table->private, newinfo);
9140 ++ synchronize_rcu();
9141 +
9142 + #ifdef CONFIG_AUDIT
9143 + if (audit_enabled) {
9144 +@@ -1452,12 +1432,12 @@ struct xt_table *xt_register_table(struct net *net,
9145 + }
9146 +
9147 + /* Simplifies replace_table code. */
9148 +- table->private = bootstrap;
9149 ++ rcu_assign_pointer(table->private, bootstrap);
9150 +
9151 + if (!xt_replace_table(table, 0, newinfo, &ret))
9152 + goto unlock;
9153 +
9154 +- private = table->private;
9155 ++ private = xt_table_get_private_protected(table);
9156 + pr_debug("table->private->number = %u\n", private->number);
9157 +
9158 + /* save number of initial entries */
9159 +@@ -1480,7 +1460,8 @@ void *xt_unregister_table(struct xt_table *table)
9160 + struct xt_table_info *private;
9161 +
9162 + mutex_lock(&xt[table->af].mutex);
9163 +- private = table->private;
9164 ++ private = xt_table_get_private_protected(table);
9165 ++ RCU_INIT_POINTER(table->private, NULL);
9166 + list_del(&table->list);
9167 + mutex_unlock(&xt[table->af].mutex);
9168 + kfree(table);
9169 +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
9170 +index 5e7c13aa66d0d..9c4235ce57894 100644
9171 +--- a/net/sunrpc/xprt.c
9172 ++++ b/net/sunrpc/xprt.c
9173 +@@ -143,31 +143,64 @@ out:
9174 + }
9175 + EXPORT_SYMBOL_GPL(xprt_unregister_transport);
9176 +
9177 ++static void
9178 ++xprt_class_release(const struct xprt_class *t)
9179 ++{
9180 ++ module_put(t->owner);
9181 ++}
9182 ++
9183 ++static const struct xprt_class *
9184 ++xprt_class_find_by_netid_locked(const char *netid)
9185 ++{
9186 ++ const struct xprt_class *t;
9187 ++ unsigned int i;
9188 ++
9189 ++ list_for_each_entry(t, &xprt_list, list) {
9190 ++ for (i = 0; t->netid[i][0] != '\0'; i++) {
9191 ++ if (strcmp(t->netid[i], netid) != 0)
9192 ++ continue;
9193 ++ if (!try_module_get(t->owner))
9194 ++ continue;
9195 ++ return t;
9196 ++ }
9197 ++ }
9198 ++ return NULL;
9199 ++}
9200 ++
9201 ++static const struct xprt_class *
9202 ++xprt_class_find_by_netid(const char *netid)
9203 ++{
9204 ++ const struct xprt_class *t;
9205 ++
9206 ++ spin_lock(&xprt_list_lock);
9207 ++ t = xprt_class_find_by_netid_locked(netid);
9208 ++ if (!t) {
9209 ++ spin_unlock(&xprt_list_lock);
9210 ++ request_module("rpc%s", netid);
9211 ++ spin_lock(&xprt_list_lock);
9212 ++ t = xprt_class_find_by_netid_locked(netid);
9213 ++ }
9214 ++ spin_unlock(&xprt_list_lock);
9215 ++ return t;
9216 ++}
9217 ++
9218 + /**
9219 + * xprt_load_transport - load a transport implementation
9220 +- * @transport_name: transport to load
9221 ++ * @netid: transport to load
9222 + *
9223 + * Returns:
9224 + * 0: transport successfully loaded
9225 + * -ENOENT: transport module not available
9226 + */
9227 +-int xprt_load_transport(const char *transport_name)
9228 ++int xprt_load_transport(const char *netid)
9229 + {
9230 +- struct xprt_class *t;
9231 +- int result;
9232 ++ const struct xprt_class *t;
9233 +
9234 +- result = 0;
9235 +- spin_lock(&xprt_list_lock);
9236 +- list_for_each_entry(t, &xprt_list, list) {
9237 +- if (strcmp(t->name, transport_name) == 0) {
9238 +- spin_unlock(&xprt_list_lock);
9239 +- goto out;
9240 +- }
9241 +- }
9242 +- spin_unlock(&xprt_list_lock);
9243 +- result = request_module("xprt%s", transport_name);
9244 +-out:
9245 +- return result;
9246 ++ t = xprt_class_find_by_netid(netid);
9247 ++ if (!t)
9248 ++ return -ENOENT;
9249 ++ xprt_class_release(t);
9250 ++ return 0;
9251 + }
9252 + EXPORT_SYMBOL_GPL(xprt_load_transport);
9253 +
9254 +diff --git a/net/sunrpc/xprtrdma/module.c b/net/sunrpc/xprtrdma/module.c
9255 +index 620327c01302c..45c5b41ac8dc9 100644
9256 +--- a/net/sunrpc/xprtrdma/module.c
9257 ++++ b/net/sunrpc/xprtrdma/module.c
9258 +@@ -24,6 +24,7 @@ MODULE_DESCRIPTION("RPC/RDMA Transport");
9259 + MODULE_LICENSE("Dual BSD/GPL");
9260 + MODULE_ALIAS("svcrdma");
9261 + MODULE_ALIAS("xprtrdma");
9262 ++MODULE_ALIAS("rpcrdma6");
9263 +
9264 + static void __exit rpc_rdma_cleanup(void)
9265 + {
9266 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
9267 +index f56f36b4d742d..fdd14908eacbd 100644
9268 +--- a/net/sunrpc/xprtrdma/transport.c
9269 ++++ b/net/sunrpc/xprtrdma/transport.c
9270 +@@ -854,6 +854,7 @@ static struct xprt_class xprt_rdma = {
9271 + .owner = THIS_MODULE,
9272 + .ident = XPRT_TRANSPORT_RDMA,
9273 + .setup = xprt_setup_rdma,
9274 ++ .netid = { "rdma", "rdma6", "" },
9275 + };
9276 +
9277 + void xprt_rdma_cleanup(void)
9278 +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
9279 +index 9dc059dea689d..798fbd89ed42f 100644
9280 +--- a/net/sunrpc/xprtsock.c
9281 ++++ b/net/sunrpc/xprtsock.c
9282 +@@ -3241,6 +3241,7 @@ static struct xprt_class xs_local_transport = {
9283 + .owner = THIS_MODULE,
9284 + .ident = XPRT_TRANSPORT_LOCAL,
9285 + .setup = xs_setup_local,
9286 ++ .netid = { "" },
9287 + };
9288 +
9289 + static struct xprt_class xs_udp_transport = {
9290 +@@ -3249,6 +3250,7 @@ static struct xprt_class xs_udp_transport = {
9291 + .owner = THIS_MODULE,
9292 + .ident = XPRT_TRANSPORT_UDP,
9293 + .setup = xs_setup_udp,
9294 ++ .netid = { "udp", "udp6", "" },
9295 + };
9296 +
9297 + static struct xprt_class xs_tcp_transport = {
9298 +@@ -3257,6 +3259,7 @@ static struct xprt_class xs_tcp_transport = {
9299 + .owner = THIS_MODULE,
9300 + .ident = XPRT_TRANSPORT_TCP,
9301 + .setup = xs_setup_tcp,
9302 ++ .netid = { "tcp", "tcp6", "" },
9303 + };
9304 +
9305 + static struct xprt_class xs_bc_tcp_transport = {
9306 +@@ -3265,6 +3268,7 @@ static struct xprt_class xs_bc_tcp_transport = {
9307 + .owner = THIS_MODULE,
9308 + .ident = XPRT_TRANSPORT_BC_TCP,
9309 + .setup = xs_setup_bc_tcp,
9310 ++ .netid = { "" },
9311 + };
9312 +
9313 + /**
9314 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
9315 +index fbc8875502c3e..5f0605275fa39 100644
9316 +--- a/net/wireless/nl80211.c
9317 ++++ b/net/wireless/nl80211.c
9318 +@@ -11502,7 +11502,7 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
9319 + struct net_device *dev = info->user_ptr[1];
9320 + struct wireless_dev *wdev = dev->ieee80211_ptr;
9321 + struct nlattr *tb[NUM_NL80211_REKEY_DATA];
9322 +- struct cfg80211_gtk_rekey_data rekey_data;
9323 ++ struct cfg80211_gtk_rekey_data rekey_data = {};
9324 + int err;
9325 +
9326 + if (!info->attrs[NL80211_ATTR_REKEY_DATA])
9327 +diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
9328 +index 9ff2ab63e6392..6bb0649c028c4 100644
9329 +--- a/net/xdp/xsk.c
9330 ++++ b/net/xdp/xsk.c
9331 +@@ -289,17 +289,17 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
9332 + return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
9333 + }
9334 +
9335 +-static unsigned int xsk_poll(struct file *file, struct socket *sock,
9336 ++static __poll_t xsk_poll(struct file *file, struct socket *sock,
9337 + struct poll_table_struct *wait)
9338 + {
9339 +- unsigned int mask = datagram_poll(file, sock, wait);
9340 ++ __poll_t mask = datagram_poll(file, sock, wait);
9341 + struct sock *sk = sock->sk;
9342 + struct xdp_sock *xs = xdp_sk(sk);
9343 +
9344 + if (xs->rx && !xskq_empty_desc(xs->rx))
9345 +- mask |= POLLIN | POLLRDNORM;
9346 ++ mask |= EPOLLIN | EPOLLRDNORM;
9347 + if (xs->tx && !xskq_full_desc(xs->tx))
9348 +- mask |= POLLOUT | POLLWRNORM;
9349 ++ mask |= EPOLLOUT | EPOLLWRNORM;
9350 +
9351 + return mask;
9352 + }
9353 +diff --git a/samples/bpf/lwt_len_hist.sh b/samples/bpf/lwt_len_hist.sh
9354 +old mode 100644
9355 +new mode 100755
9356 +index 090b96eaf7f76..0eda9754f50b8
9357 +--- a/samples/bpf/lwt_len_hist.sh
9358 ++++ b/samples/bpf/lwt_len_hist.sh
9359 +@@ -8,6 +8,8 @@ VETH1=tst_lwt1b
9360 + TRACE_ROOT=/sys/kernel/debug/tracing
9361 +
9362 + function cleanup {
9363 ++ # To reset saved histogram, remove pinned map
9364 ++ rm /sys/fs/bpf/tc/globals/lwt_len_hist_map
9365 + ip route del 192.168.253.2/32 dev $VETH0 2> /dev/null
9366 + ip link del $VETH0 2> /dev/null
9367 + ip link del $VETH1 2> /dev/null
9368 +diff --git a/samples/bpf/test_lwt_bpf.sh b/samples/bpf/test_lwt_bpf.sh
9369 +old mode 100644
9370 +new mode 100755
9371 +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
9372 +index 7eb944cbbaeab..2e31ec1378219 100755
9373 +--- a/scripts/checkpatch.pl
9374 ++++ b/scripts/checkpatch.pl
9375 +@@ -4059,7 +4059,7 @@ sub process {
9376 + $fix) {
9377 + fix_delete_line($fixlinenr, $rawline);
9378 + my $fixed_line = $rawline;
9379 +- $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*){(.*)$/;
9380 ++ $fixed_line =~ /(^..*$Type\s*$Ident\(.*\)\s*)\{(.*)$/;
9381 + my $line1 = $1;
9382 + my $line2 = $2;
9383 + fix_insert_line($fixlinenr, ltrim($line1));
9384 +diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
9385 +index 5ca2df790d3cf..389814b02d06b 100644
9386 +--- a/scripts/kconfig/preprocess.c
9387 ++++ b/scripts/kconfig/preprocess.c
9388 +@@ -111,7 +111,7 @@ static char *do_error_if(int argc, char *argv[])
9389 + if (!strcmp(argv[0], "y"))
9390 + pperror("%s", argv[1]);
9391 +
9392 +- return NULL;
9393 ++ return xstrdup("");
9394 + }
9395 +
9396 + static char *do_filename(int argc, char *argv[])
9397 +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
9398 +index f4f3de5f06ca5..5596ea8f339a3 100644
9399 +--- a/security/integrity/ima/ima_crypto.c
9400 ++++ b/security/integrity/ima/ima_crypto.c
9401 +@@ -415,7 +415,7 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
9402 + loff_t i_size;
9403 + int rc;
9404 + struct file *f = file;
9405 +- bool new_file_instance = false, modified_mode = false;
9406 ++ bool new_file_instance = false;
9407 +
9408 + /*
9409 + * For consistency, fail file's opened with the O_DIRECT flag on
9410 +@@ -433,18 +433,10 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
9411 + O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
9412 + flags |= O_RDONLY;
9413 + f = dentry_open(&file->f_path, flags, file->f_cred);
9414 +- if (IS_ERR(f)) {
9415 +- /*
9416 +- * Cannot open the file again, lets modify f_mode
9417 +- * of original and continue
9418 +- */
9419 +- pr_info_ratelimited("Unable to reopen file for reading.\n");
9420 +- f = file;
9421 +- f->f_mode |= FMODE_READ;
9422 +- modified_mode = true;
9423 +- } else {
9424 +- new_file_instance = true;
9425 +- }
9426 ++ if (IS_ERR(f))
9427 ++ return PTR_ERR(f);
9428 ++
9429 ++ new_file_instance = true;
9430 + }
9431 +
9432 + i_size = i_size_read(file_inode(f));
9433 +@@ -459,8 +451,6 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
9434 + out:
9435 + if (new_file_instance)
9436 + fput(f);
9437 +- else if (modified_mode)
9438 +- f->f_mode &= ~FMODE_READ;
9439 + return rc;
9440 + }
9441 +
9442 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
9443 +index 250b725f5754c..08833bbb97aab 100644
9444 +--- a/security/selinux/hooks.c
9445 ++++ b/security/selinux/hooks.c
9446 +@@ -1618,7 +1618,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
9447 + * inode_doinit with a dentry, before these inodes could
9448 + * be used again by userspace.
9449 + */
9450 +- goto out;
9451 ++ goto out_invalid;
9452 + }
9453 +
9454 + len = INITCONTEXTLEN;
9455 +@@ -1734,7 +1734,7 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
9456 + * could be used again by userspace.
9457 + */
9458 + if (!dentry)
9459 +- goto out;
9460 ++ goto out_invalid;
9461 + rc = selinux_genfs_get_sid(dentry, sclass,
9462 + sbsec->flags, &sid);
9463 + dput(dentry);
9464 +@@ -1747,11 +1747,10 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
9465 + out:
9466 + spin_lock(&isec->lock);
9467 + if (isec->initialized == LABEL_PENDING) {
9468 +- if (!sid || rc) {
9469 ++ if (rc) {
9470 + isec->initialized = LABEL_INVALID;
9471 + goto out_unlock;
9472 + }
9473 +-
9474 + isec->initialized = LABEL_INITIALIZED;
9475 + isec->sid = sid;
9476 + }
9477 +@@ -1759,6 +1758,15 @@ out:
9478 + out_unlock:
9479 + spin_unlock(&isec->lock);
9480 + return rc;
9481 ++
9482 ++out_invalid:
9483 ++ spin_lock(&isec->lock);
9484 ++ if (isec->initialized == LABEL_PENDING) {
9485 ++ isec->initialized = LABEL_INVALID;
9486 ++ isec->sid = sid;
9487 ++ }
9488 ++ spin_unlock(&isec->lock);
9489 ++ return 0;
9490 + }
9491 +
9492 + /* Convert a Linux signal to an access vector. */
9493 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
9494 +index 41abb8bd466af..2a286167460f6 100644
9495 +--- a/sound/core/oss/pcm_oss.c
9496 ++++ b/sound/core/oss/pcm_oss.c
9497 +@@ -708,6 +708,8 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
9498 +
9499 + oss_buffer_size = snd_pcm_plug_client_size(substream,
9500 + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, NULL)) * oss_frame_size;
9501 ++ if (!oss_buffer_size)
9502 ++ return -EINVAL;
9503 + oss_buffer_size = rounddown_pow_of_two(oss_buffer_size);
9504 + if (atomic_read(&substream->mmap_count)) {
9505 + if (oss_buffer_size > runtime->oss.mmap_bytes)
9506 +@@ -743,17 +745,21 @@ static int snd_pcm_oss_period_size(struct snd_pcm_substream *substream,
9507 +
9508 + min_period_size = snd_pcm_plug_client_size(substream,
9509 + snd_pcm_hw_param_value_min(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
9510 +- min_period_size *= oss_frame_size;
9511 +- min_period_size = roundup_pow_of_two(min_period_size);
9512 +- if (oss_period_size < min_period_size)
9513 +- oss_period_size = min_period_size;
9514 ++ if (min_period_size) {
9515 ++ min_period_size *= oss_frame_size;
9516 ++ min_period_size = roundup_pow_of_two(min_period_size);
9517 ++ if (oss_period_size < min_period_size)
9518 ++ oss_period_size = min_period_size;
9519 ++ }
9520 +
9521 + max_period_size = snd_pcm_plug_client_size(substream,
9522 + snd_pcm_hw_param_value_max(slave_params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, NULL));
9523 +- max_period_size *= oss_frame_size;
9524 +- max_period_size = rounddown_pow_of_two(max_period_size);
9525 +- if (oss_period_size > max_period_size)
9526 +- oss_period_size = max_period_size;
9527 ++ if (max_period_size) {
9528 ++ max_period_size *= oss_frame_size;
9529 ++ max_period_size = rounddown_pow_of_two(max_period_size);
9530 ++ if (oss_period_size > max_period_size)
9531 ++ oss_period_size = max_period_size;
9532 ++ }
9533 +
9534 + oss_periods = oss_buffer_size / oss_period_size;
9535 +
9536 +@@ -1949,11 +1955,15 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
9537 + static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsigned int val)
9538 + {
9539 + struct snd_pcm_runtime *runtime;
9540 ++ int fragshift;
9541 +
9542 + runtime = substream->runtime;
9543 + if (runtime->oss.subdivision || runtime->oss.fragshift)
9544 + return -EINVAL;
9545 +- runtime->oss.fragshift = val & 0xffff;
9546 ++ fragshift = val & 0xffff;
9547 ++ if (fragshift >= 31)
9548 ++ return -EINVAL;
9549 ++ runtime->oss.fragshift = fragshift;
9550 + runtime->oss.maxfrags = (val >> 16) & 0xffff;
9551 + if (runtime->oss.fragshift < 4) /* < 16 */
9552 + runtime->oss.fragshift = 4;
9553 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
9554 +index dbeb62362f1c3..7f1e763ccca88 100644
9555 +--- a/sound/pci/hda/hda_codec.c
9556 ++++ b/sound/pci/hda/hda_codec.c
9557 +@@ -1782,7 +1782,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
9558 + return -EBUSY;
9559 +
9560 + /* OK, let it free */
9561 +- snd_hdac_device_unregister(&codec->core);
9562 ++ device_release_driver(hda_codec_dev(codec));
9563 +
9564 + /* allow device access again */
9565 + snd_hda_unlock_devices(bus);
9566 +diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
9567 +index 6535155e992df..25a4c2d580dae 100644
9568 +--- a/sound/pci/hda/hda_sysfs.c
9569 ++++ b/sound/pci/hda/hda_sysfs.c
9570 +@@ -138,7 +138,7 @@ static int reconfig_codec(struct hda_codec *codec)
9571 + "The codec is being used, can't reconfigure.\n");
9572 + goto error;
9573 + }
9574 +- err = snd_hda_codec_configure(codec);
9575 ++ err = device_reprobe(hda_codec_dev(codec));
9576 + if (err < 0)
9577 + goto error;
9578 + err = snd_card_register(codec->card);
9579 +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
9580 +index c9f3c002bd553..004a7772bb5d0 100644
9581 +--- a/sound/pci/hda/patch_ca0132.c
9582 ++++ b/sound/pci/hda/patch_ca0132.c
9583 +@@ -106,7 +106,7 @@ enum {
9584 + };
9585 +
9586 + /* Strings for Input Source Enum Control */
9587 +-static const char *const in_src_str[3] = {"Rear Mic", "Line", "Front Mic" };
9588 ++static const char *const in_src_str[3] = { "Microphone", "Line In", "Front Microphone" };
9589 + #define IN_SRC_NUM_OF_INPUTS 3
9590 + enum {
9591 + REAR_MIC,
9592 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
9593 +index 790a2e79aba54..37b2bcdb3d65a 100644
9594 +--- a/sound/pci/hda/patch_realtek.c
9595 ++++ b/sound/pci/hda/patch_realtek.c
9596 +@@ -2491,6 +2491,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
9597 + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950),
9598 + SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950),
9599 + SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
9600 ++ SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950),
9601 + SND_PCI_QUIRK(0x1462, 0x1275, "MSI-GL63", ALC1220_FIXUP_CLEVO_P950),
9602 + SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
9603 + SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
9604 +@@ -7084,6 +7085,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9605 + SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
9606 + SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
9607 + SND_PCI_QUIRK(0x1043, 0x11c0, "ASUS X556UR", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
9608 ++ SND_PCI_QUIRK(0x1043, 0x1271, "ASUS X430UN", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
9609 + SND_PCI_QUIRK(0x1043, 0x1290, "ASUS X441SA", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
9610 + SND_PCI_QUIRK(0x1043, 0x12a0, "ASUS X441UV", ALC233_FIXUP_EAPD_COEF_AND_MIC_NO_PRESENCE),
9611 + SND_PCI_QUIRK(0x1043, 0x12f0, "ASUS X541UV", ALC256_FIXUP_ASUS_MIC),
9612 +@@ -7100,6 +7102,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9613 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
9614 + SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
9615 + SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
9616 ++ SND_PCI_QUIRK(0x1043, 0x125e, "ASUS Q524UQK", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
9617 + SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
9618 + SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
9619 + SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
9620 +@@ -7126,6 +7129,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
9621 + SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
9622 + SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
9623 + SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
9624 ++ SND_PCI_QUIRK(0x152d, 0x1082, "Quanta NL3", ALC269_FIXUP_LIFEBOOK),
9625 + SND_PCI_QUIRK(0x1558, 0x1323, "Clevo N130ZU", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
9626 + SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
9627 + SND_PCI_QUIRK(0x1558, 0x1401, "Clevo L140[CZ]U", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
9628 +diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
9629 +index df5b36b8fc5a6..bb6a95be87265 100644
9630 +--- a/sound/soc/codecs/wm8997.c
9631 ++++ b/sound/soc/codecs/wm8997.c
9632 +@@ -1180,6 +1180,8 @@ static int wm8997_probe(struct platform_device *pdev)
9633 + goto err_spk_irqs;
9634 + }
9635 +
9636 ++ return ret;
9637 ++
9638 + err_spk_irqs:
9639 + arizona_free_spk_irqs(arizona);
9640 +
9641 +diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
9642 +index 61294c787f274..17dc5780ab686 100644
9643 +--- a/sound/soc/codecs/wm8998.c
9644 ++++ b/sound/soc/codecs/wm8998.c
9645 +@@ -1378,7 +1378,7 @@ static int wm8998_probe(struct platform_device *pdev)
9646 +
9647 + ret = arizona_init_spk_irqs(arizona);
9648 + if (ret < 0)
9649 +- return ret;
9650 ++ goto err_pm_disable;
9651 +
9652 + ret = devm_snd_soc_register_component(&pdev->dev,
9653 + &soc_component_dev_wm8998,
9654 +@@ -1393,6 +1393,8 @@ static int wm8998_probe(struct platform_device *pdev)
9655 +
9656 + err_spk_irqs:
9657 + arizona_free_spk_irqs(arizona);
9658 ++err_pm_disable:
9659 ++ pm_runtime_disable(&pdev->dev);
9660 +
9661 + return ret;
9662 + }
9663 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
9664 +index b114fc7b2a95e..02c557e1f779c 100644
9665 +--- a/sound/soc/codecs/wm_adsp.c
9666 ++++ b/sound/soc/codecs/wm_adsp.c
9667 +@@ -1379,7 +1379,7 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
9668 + ctl_work = kzalloc(sizeof(*ctl_work), GFP_KERNEL);
9669 + if (!ctl_work) {
9670 + ret = -ENOMEM;
9671 +- goto err_ctl_cache;
9672 ++ goto err_list_del;
9673 + }
9674 +
9675 + ctl_work->dsp = dsp;
9676 +@@ -1389,7 +1389,8 @@ static int wm_adsp_create_control(struct wm_adsp *dsp,
9677 +
9678 + return 0;
9679 +
9680 +-err_ctl_cache:
9681 ++err_list_del:
9682 ++ list_del(&ctl->list);
9683 + kfree(ctl->cache);
9684 + err_ctl_name:
9685 + kfree(ctl->name);
9686 +diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
9687 +index e099c0505b765..2c6b0ac97c684 100644
9688 +--- a/sound/soc/jz4740/jz4740-i2s.c
9689 ++++ b/sound/soc/jz4740/jz4740-i2s.c
9690 +@@ -318,10 +318,14 @@ static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
9691 + switch (clk_id) {
9692 + case JZ4740_I2S_CLKSRC_EXT:
9693 + parent = clk_get(NULL, "ext");
9694 ++ if (IS_ERR(parent))
9695 ++ return PTR_ERR(parent);
9696 + clk_set_parent(i2s->clk_i2s, parent);
9697 + break;
9698 + case JZ4740_I2S_CLKSRC_PLL:
9699 + parent = clk_get(NULL, "pll half");
9700 ++ if (IS_ERR(parent))
9701 ++ return PTR_ERR(parent);
9702 + clk_set_parent(i2s->clk_i2s, parent);
9703 + ret = clk_set_rate(i2s->clk_i2s, freq);
9704 + break;
9705 +diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
9706 +index 8af8bc358a90a..19fd4d583b869 100644
9707 +--- a/sound/soc/meson/Kconfig
9708 ++++ b/sound/soc/meson/Kconfig
9709 +@@ -1,5 +1,5 @@
9710 + menu "ASoC support for Amlogic platforms"
9711 +- depends on ARCH_MESON || COMPILE_TEST
9712 ++ depends on ARCH_MESON || (COMPILE_TEST && COMMON_CLK)
9713 +
9714 + config SND_MESON_AXG_FIFO
9715 + tristate
9716 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
9717 +index a0d1ce0edaf9a..af14304645ce8 100644
9718 +--- a/sound/soc/soc-pcm.c
9719 ++++ b/sound/soc/soc-pcm.c
9720 +@@ -2390,6 +2390,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
9721 + case SNDRV_PCM_TRIGGER_START:
9722 + case SNDRV_PCM_TRIGGER_RESUME:
9723 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
9724 ++ case SNDRV_PCM_TRIGGER_DRAIN:
9725 + ret = dpcm_dai_trigger_fe_be(substream, cmd, true);
9726 + break;
9727 + case SNDRV_PCM_TRIGGER_STOP:
9728 +@@ -2407,6 +2408,7 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
9729 + case SNDRV_PCM_TRIGGER_START:
9730 + case SNDRV_PCM_TRIGGER_RESUME:
9731 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
9732 ++ case SNDRV_PCM_TRIGGER_DRAIN:
9733 + ret = dpcm_dai_trigger_fe_be(substream, cmd, false);
9734 + break;
9735 + case SNDRV_PCM_TRIGGER_STOP:
9736 +diff --git a/sound/usb/clock.c b/sound/usb/clock.c
9737 +index bfe5540030b80..54818658d0217 100644
9738 +--- a/sound/usb/clock.c
9739 ++++ b/sound/usb/clock.c
9740 +@@ -508,6 +508,12 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
9741 + }
9742 +
9743 + crate = data[0] | (data[1] << 8) | (data[2] << 16);
9744 ++ if (!crate) {
9745 ++ dev_info(&dev->dev, "failed to read current rate; disabling the check\n");
9746 ++ chip->sample_rate_read_error = 3; /* three strikes, see above */
9747 ++ return 0;
9748 ++ }
9749 ++
9750 + if (crate != rate) {
9751 + dev_warn(&dev->dev, "current rate %d is different from the runtime rate %d\n", crate, rate);
9752 + // runtime->rate = crate;
9753 +diff --git a/sound/usb/format.c b/sound/usb/format.c
9754 +index c8207b52c651c..a3daf93c565aa 100644
9755 +--- a/sound/usb/format.c
9756 ++++ b/sound/usb/format.c
9757 +@@ -53,6 +53,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
9758 + case UAC_VERSION_1:
9759 + default: {
9760 + struct uac_format_type_i_discrete_descriptor *fmt = _fmt;
9761 ++ if (format >= 64)
9762 ++ return 0; /* invalid format */
9763 + sample_width = fmt->bBitResolution;
9764 + sample_bytes = fmt->bSubframeSize;
9765 + format = 1ULL << format;
9766 +diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
9767 +index d4aae3fcd3cd7..d52ab6d49d18c 100644
9768 +--- a/sound/usb/quirks.c
9769 ++++ b/sound/usb/quirks.c
9770 +@@ -1465,6 +1465,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
9771 + case 0x25ce: /* Mytek devices */
9772 + case 0x278b: /* Rotel? */
9773 + case 0x292b: /* Gustard/Ess based devices */
9774 ++ case 0x2972: /* FiiO devices */
9775 + case 0x2ab6: /* T+A devices */
9776 + case 0x3353: /* Khadas devices */
9777 + case 0x3842: /* EVGA */
9778 +diff --git a/sound/usb/stream.c b/sound/usb/stream.c
9779 +index ff5d803cfaf09..94bef3d043786 100644
9780 +--- a/sound/usb/stream.c
9781 ++++ b/sound/usb/stream.c
9782 +@@ -198,16 +198,16 @@ static int usb_chmap_ctl_get(struct snd_kcontrol *kcontrol,
9783 + struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
9784 + struct snd_usb_substream *subs = info->private_data;
9785 + struct snd_pcm_chmap_elem *chmap = NULL;
9786 +- int i;
9787 ++ int i = 0;
9788 +
9789 +- memset(ucontrol->value.integer.value, 0,
9790 +- sizeof(ucontrol->value.integer.value));
9791 + if (subs->cur_audiofmt)
9792 + chmap = subs->cur_audiofmt->chmap;
9793 + if (chmap) {
9794 + for (i = 0; i < chmap->channels; i++)
9795 + ucontrol->value.integer.value[i] = chmap->map[i];
9796 + }
9797 ++ for (; i < subs->channels_max; i++)
9798 ++ ucontrol->value.integer.value[i] = 0;
9799 + return 0;
9800 + }
9801 +
9802 +diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
9803 +index 938def6d0bb98..f540037eb7050 100644
9804 +--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
9805 ++++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
9806 +@@ -278,14 +278,12 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
9807 + enum cs_etm_sample_type sample_type)
9808 + {
9809 + u32 et = 0;
9810 +- struct int_node *inode = NULL;
9811 ++ int cpu;
9812 +
9813 + if (decoder->packet_count >= MAX_BUFFER - 1)
9814 + return OCSD_RESP_FATAL_SYS_ERR;
9815 +
9816 +- /* Search the RB tree for the cpu associated with this traceID */
9817 +- inode = intlist__find(traceid_list, trace_chan_id);
9818 +- if (!inode)
9819 ++ if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
9820 + return OCSD_RESP_FATAL_SYS_ERR;
9821 +
9822 + et = decoder->tail;
9823 +@@ -296,7 +294,7 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder,
9824 + decoder->packet_buffer[et].sample_type = sample_type;
9825 + decoder->packet_buffer[et].exc = false;
9826 + decoder->packet_buffer[et].exc_ret = false;
9827 +- decoder->packet_buffer[et].cpu = *((int *)inode->priv);
9828 ++ decoder->packet_buffer[et].cpu = cpu;
9829 + decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
9830 + decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
9831 +
9832 +diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
9833 +index 7b5e15cc6b717..3275b8dc93442 100644
9834 +--- a/tools/perf/util/cs-etm.c
9835 ++++ b/tools/perf/util/cs-etm.c
9836 +@@ -87,10 +87,27 @@ struct cs_etm_queue {
9837 + struct cs_etm_packet *packet;
9838 + };
9839 +
9840 ++/* RB tree for quick conversion between traceID and metadata pointers */
9841 ++static struct intlist *traceid_list;
9842 ++
9843 + static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
9844 + static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
9845 + pid_t tid, u64 time_);
9846 +
9847 ++int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
9848 ++{
9849 ++ struct int_node *inode;
9850 ++ u64 *metadata;
9851 ++
9852 ++ inode = intlist__find(traceid_list, trace_chan_id);
9853 ++ if (!inode)
9854 ++ return -EINVAL;
9855 ++
9856 ++ metadata = inode->priv;
9857 ++ *cpu = (int)metadata[CS_ETM_CPU];
9858 ++ return 0;
9859 ++}
9860 ++
9861 + static void cs_etm__packet_dump(const char *pkt_string)
9862 + {
9863 + const char *color = PERF_COLOR_BLUE;
9864 +@@ -230,7 +247,7 @@ static void cs_etm__free(struct perf_session *session)
9865 + cs_etm__free_events(session);
9866 + session->auxtrace = NULL;
9867 +
9868 +- /* First remove all traceID/CPU# nodes for the RB tree */
9869 ++ /* First remove all traceID/metadata nodes for the RB tree */
9870 + intlist__for_each_entry_safe(inode, tmp, traceid_list)
9871 + intlist__remove(traceid_list, inode);
9872 + /* Then the RB tree itself */
9873 +@@ -1316,9 +1333,9 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
9874 + 0xffffffff);
9875 +
9876 + /*
9877 +- * Create an RB tree for traceID-CPU# tuple. Since the conversion has
9878 +- * to be made for each packet that gets decoded, optimizing access in
9879 +- * anything other than a sequential array is worth doing.
9880 ++ * Create an RB tree for traceID-metadata tuple. Since the conversion
9881 ++ * has to be made for each packet that gets decoded, optimizing access
9882 ++ * in anything other than a sequential array is worth doing.
9883 + */
9884 + traceid_list = intlist__new(NULL);
9885 + if (!traceid_list) {
9886 +@@ -1384,8 +1401,8 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
9887 + err = -EINVAL;
9888 + goto err_free_metadata;
9889 + }
9890 +- /* All good, associate the traceID with the CPU# */
9891 +- inode->priv = &metadata[j][CS_ETM_CPU];
9892 ++ /* All good, associate the traceID with the metadata pointer */
9893 ++ inode->priv = metadata[j];
9894 + }
9895 +
9896 + /*
9897 +diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
9898 +index 37f8d48179cae..97c3152f5bfd5 100644
9899 +--- a/tools/perf/util/cs-etm.h
9900 ++++ b/tools/perf/util/cs-etm.h
9901 +@@ -53,9 +53,6 @@ enum {
9902 + CS_ETMV4_PRIV_MAX,
9903 + };
9904 +
9905 +-/* RB tree for quick conversion between traceID and CPUs */
9906 +-struct intlist *traceid_list;
9907 +-
9908 + #define KiB(x) ((x) * 1024)
9909 + #define MiB(x) ((x) * 1024 * 1024)
9910 +
9911 +@@ -69,6 +66,7 @@ static const u64 __perf_cs_etmv4_magic = 0x4040404040404040ULL;
9912 + #ifdef HAVE_CSTRACE_SUPPORT
9913 + int cs_etm__process_auxtrace_info(union perf_event *event,
9914 + struct perf_session *session);
9915 ++int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
9916 + #else
9917 + static inline int
9918 + cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
9919 +@@ -76,6 +74,12 @@ cs_etm__process_auxtrace_info(union perf_event *event __maybe_unused,
9920 + {
9921 + return -1;
9922 + }
9923 ++
9924 ++static inline int cs_etm__get_cpu(u8 trace_chan_id __maybe_unused,
9925 ++ int *cpu __maybe_unused)
9926 ++{
9927 ++ return -1;
9928 ++}
9929 + #endif
9930 +
9931 + #endif
9932 +diff --git a/tools/perf/util/parse-regs-options.c b/tools/perf/util/parse-regs-options.c
9933 +index e6599e290f467..e5ad120e7f69a 100644
9934 +--- a/tools/perf/util/parse-regs-options.c
9935 ++++ b/tools/perf/util/parse-regs-options.c
9936 +@@ -41,7 +41,7 @@ parse_regs(const struct option *opt, const char *str, int unset)
9937 + }
9938 + fputc('\n', stderr);
9939 + /* just printing available regs */
9940 +- return -1;
9941 ++ goto error;
9942 + }
9943 + for (r = sample_reg_masks; r->name; r++) {
9944 + if (!strcasecmp(s, r->name))
9945 +diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
9946 +index 3118fc0d149b0..406401f1acc25 100755
9947 +--- a/tools/testing/ktest/ktest.pl
9948 ++++ b/tools/testing/ktest/ktest.pl
9949 +@@ -4177,7 +4177,12 @@ sub do_send_mail {
9950 + $mail_command =~ s/\$SUBJECT/$subject/g;
9951 + $mail_command =~ s/\$MESSAGE/$message/g;
9952 +
9953 +- run_command $mail_command;
9954 ++ my $ret = run_command $mail_command;
9955 ++ if (!$ret && defined($file)) {
9956 ++ # try again without the file
9957 ++ $message .= "\n\n*** FAILED TO SEND LOG ***\n\n";
9958 ++ do_send_email($subject, $message);
9959 ++ }
9960 + }
9961 +
9962 + sub send_email {
9963 +diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
9964 +index d59642e70f562..2229e55216a97 100755
9965 +--- a/tools/testing/selftests/bpf/test_offload.py
9966 ++++ b/tools/testing/selftests/bpf/test_offload.py
9967 +@@ -787,6 +787,7 @@ try:
9968 + start_test("Test disabling TC offloads is rejected while filters installed...")
9969 + ret, _ = sim.set_ethtool_tc_offloads(False, fail=False)
9970 + fail(ret == 0, "Driver should refuse to disable TC offloads with filters installed...")
9971 ++ sim.set_ethtool_tc_offloads(True)
9972 +
9973 + start_test("Test qdisc removal frees things...")
9974 + sim.tc_flush_filters()