Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.16 commit in: /
Date: Tue, 24 Apr 2018 11:31:32
Message-Id: 1524569480.a3bf978c7d70cc9633c062dcfbaa009437563b42.mpagano@gentoo
1 commit: a3bf978c7d70cc9633c062dcfbaa009437563b42
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Apr 24 11:31:20 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Apr 24 11:31:20 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=a3bf978c
7
8 Linux patch 4.16.4
9
10 0000_README | 4 +
11 1003_linux-4.16.4.patch | 7880 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 7884 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 65c079f..c127441 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -55,6 +55,10 @@ Patch: 1002_linux-4.16.3.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.16.3
21
22 +Patch: 1003_linux-4.16.4.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.16.4
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1003_linux-4.16.4.patch b/1003_linux-4.16.4.patch
31 new file mode 100644
32 index 0000000..e8ddb45
33 --- /dev/null
34 +++ b/1003_linux-4.16.4.patch
35 @@ -0,0 +1,7880 @@
36 +diff --git a/Makefile b/Makefile
37 +index 38df392e45e4..d51175192ac1 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 16
44 +-SUBLEVEL = 3
45 ++SUBLEVEL = 4
46 + EXTRAVERSION =
47 + NAME = Fearless Coyote
48 +
49 +diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
50 +index a7da0dd0c98f..0898213f3bb2 100644
51 +--- a/arch/arm/boot/dts/at91sam9g25.dtsi
52 ++++ b/arch/arm/boot/dts/at91sam9g25.dtsi
53 +@@ -21,7 +21,7 @@
54 + atmel,mux-mask = <
55 + /* A B C */
56 + 0xffffffff 0xffe0399f 0xc000001c /* pioA */
57 +- 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */
58 ++ 0x0007ffff 0x00047e3f 0x00000000 /* pioB */
59 + 0x80000000 0x07c0ffff 0xb83fffff /* pioC */
60 + 0x003fffff 0x003f8000 0x00000000 /* pioD */
61 + >;
62 +diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
63 +index 56626d1a4235..cf89385e7888 100644
64 +--- a/arch/arm/boot/dts/exynos5250.dtsi
65 ++++ b/arch/arm/boot/dts/exynos5250.dtsi
66 +@@ -655,7 +655,7 @@
67 + power-domains = <&pd_gsc>;
68 + clocks = <&clock CLK_GSCL0>;
69 + clock-names = "gscl";
70 +- iommu = <&sysmmu_gsc0>;
71 ++ iommus = <&sysmmu_gsc0>;
72 + };
73 +
74 + gsc_1: gsc@13e10000 {
75 +@@ -665,7 +665,7 @@
76 + power-domains = <&pd_gsc>;
77 + clocks = <&clock CLK_GSCL1>;
78 + clock-names = "gscl";
79 +- iommu = <&sysmmu_gsc1>;
80 ++ iommus = <&sysmmu_gsc1>;
81 + };
82 +
83 + gsc_2: gsc@13e20000 {
84 +@@ -675,7 +675,7 @@
85 + power-domains = <&pd_gsc>;
86 + clocks = <&clock CLK_GSCL2>;
87 + clock-names = "gscl";
88 +- iommu = <&sysmmu_gsc2>;
89 ++ iommus = <&sysmmu_gsc2>;
90 + };
91 +
92 + gsc_3: gsc@13e30000 {
93 +@@ -685,7 +685,7 @@
94 + power-domains = <&pd_gsc>;
95 + clocks = <&clock CLK_GSCL3>;
96 + clock-names = "gscl";
97 +- iommu = <&sysmmu_gsc3>;
98 ++ iommus = <&sysmmu_gsc3>;
99 + };
100 +
101 + hdmi: hdmi@14530000 {
102 +diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
103 +index 7bf5aa2237c9..7de704575aee 100644
104 +--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
105 ++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
106 +@@ -39,6 +39,24 @@
107 + };
108 + };
109 +
110 ++ reg_3p3v: regulator-3p3v {
111 ++ compatible = "regulator-fixed";
112 ++ regulator-name = "fixed-3.3V";
113 ++ regulator-min-microvolt = <3300000>;
114 ++ regulator-max-microvolt = <3300000>;
115 ++ regulator-boot-on;
116 ++ regulator-always-on;
117 ++ };
118 ++
119 ++ reg_5v: regulator-5v {
120 ++ compatible = "regulator-fixed";
121 ++ regulator-name = "fixed-5V";
122 ++ regulator-min-microvolt = <5000000>;
123 ++ regulator-max-microvolt = <5000000>;
124 ++ regulator-boot-on;
125 ++ regulator-always-on;
126 ++ };
127 ++
128 + gpio_keys {
129 + compatible = "gpio-keys";
130 + pinctrl-names = "default";
131 +@@ -468,12 +486,14 @@
132 + };
133 +
134 + &usb1 {
135 +- vusb33-supply = <&mt6323_vusb_reg>;
136 ++ vusb33-supply = <&reg_3p3v>;
137 ++ vbus-supply = <&reg_5v>;
138 + status = "okay";
139 + };
140 +
141 + &usb2 {
142 +- vusb33-supply = <&mt6323_vusb_reg>;
143 ++ vusb33-supply = <&reg_3p3v>;
144 ++ vbus-supply = <&reg_5v>;
145 + status = "okay";
146 + };
147 +
148 +diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
149 +index 373b3621b536..c7105096c623 100644
150 +--- a/arch/arm/boot/dts/sama5d4.dtsi
151 ++++ b/arch/arm/boot/dts/sama5d4.dtsi
152 +@@ -1379,7 +1379,7 @@
153 + pinctrl@fc06a000 {
154 + #address-cells = <1>;
155 + #size-cells = <1>;
156 +- compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
157 ++ compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
158 + ranges = <0xfc068000 0xfc068000 0x100
159 + 0xfc06a000 0xfc06a000 0x4000>;
160 + /* WARNING: revisit as pin spec has changed */
161 +diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
162 +index dc4346ecf16d..a1055a2b8d54 100644
163 +--- a/arch/arm/mach-exynos/pm.c
164 ++++ b/arch/arm/mach-exynos/pm.c
165 +@@ -271,11 +271,7 @@ static int exynos_cpu0_enter_aftr(void)
166 + goto fail;
167 +
168 + call_firmware_op(cpu_boot, 1);
169 +-
170 +- if (soc_is_exynos3250())
171 +- dsb_sev();
172 +- else
173 +- arch_send_wakeup_ipi_mask(cpumask_of(1));
174 ++ dsb_sev();
175 + }
176 + }
177 + fail:
178 +diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
179 +index ee4ada61c59c..93a7830706f5 100644
180 +--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
181 ++++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
182 +@@ -310,7 +310,7 @@
183 + pinctrl-names = "default", "clk-gate";
184 +
185 + bus-width = <8>;
186 +- max-frequency = <200000000>;
187 ++ max-frequency = <100000000>;
188 + non-removable;
189 + disable-wp;
190 + cap-mmc-highspeed;
191 +diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
192 +index b71306947290..06629011a434 100644
193 +--- a/arch/mips/include/asm/uaccess.h
194 ++++ b/arch/mips/include/asm/uaccess.h
195 +@@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
196 + {
197 + __kernel_size_t res;
198 +
199 ++#ifdef CONFIG_CPU_MICROMIPS
200 ++/* micromips memset / bzero also clobbers t7 & t8 */
201 ++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
202 ++#else
203 ++#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
204 ++#endif /* CONFIG_CPU_MICROMIPS */
205 ++
206 + if (eva_kernel_access()) {
207 + __asm__ __volatile__(
208 + "move\t$4, %1\n\t"
209 +@@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
210 + "move\t%0, $6"
211 + : "=r" (res)
212 + : "r" (addr), "r" (size)
213 +- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
214 ++ : bzero_clobbers);
215 + } else {
216 + might_fault();
217 + __asm__ __volatile__(
218 +@@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
219 + "move\t%0, $6"
220 + : "=r" (res)
221 + : "r" (addr), "r" (size)
222 +- : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
223 ++ : bzero_clobbers);
224 + }
225 +
226 + return res;
227 +diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
228 +index a1456664d6c2..f7327979a8f8 100644
229 +--- a/arch/mips/lib/memset.S
230 ++++ b/arch/mips/lib/memset.S
231 +@@ -219,7 +219,7 @@
232 + 1: PTR_ADDIU a0, 1 /* fill bytewise */
233 + R10KCBARRIER(0(ra))
234 + bne t1, a0, 1b
235 +- sb a1, -1(a0)
236 ++ EX(sb, a1, -1(a0), .Lsmall_fixup\@)
237 +
238 + 2: jr ra /* done */
239 + move a2, zero
240 +@@ -252,13 +252,18 @@
241 + PTR_L t0, TI_TASK($28)
242 + andi a2, STORMASK
243 + LONG_L t0, THREAD_BUADDR(t0)
244 +- LONG_ADDU a2, t1
245 ++ LONG_ADDU a2, a0
246 + jr ra
247 + LONG_SUBU a2, t0
248 +
249 + .Llast_fixup\@:
250 + jr ra
251 +- andi v1, a2, STORMASK
252 ++ nop
253 ++
254 ++.Lsmall_fixup\@:
255 ++ PTR_SUBU a2, t1, a0
256 ++ jr ra
257 ++ PTR_ADDIU a2, 1
258 +
259 + .endm
260 +
261 +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
262 +index 10daa1d56e0a..c7c63959ba91 100644
263 +--- a/arch/powerpc/include/asm/barrier.h
264 ++++ b/arch/powerpc/include/asm/barrier.h
265 +@@ -35,7 +35,8 @@
266 + #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
267 + #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
268 +
269 +-#ifdef __SUBARCH_HAS_LWSYNC
270 ++/* The sub-arch has lwsync */
271 ++#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
272 + # define SMPWMB LWSYNC
273 + #else
274 + # define SMPWMB eieio
275 +diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
276 +index 12e70fb58700..fcf3ed5b8b18 100644
277 +--- a/arch/powerpc/include/asm/opal.h
278 ++++ b/arch/powerpc/include/asm/opal.h
279 +@@ -21,6 +21,9 @@
280 + /* We calculate number of sg entries based on PAGE_SIZE */
281 + #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
282 +
283 ++/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
284 ++#define OPAL_BUSY_DELAY_MS 10
285 ++
286 + /* /sys/firmware/opal */
287 + extern struct kobject *opal_kobj;
288 +
289 +diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
290 +index 63e7f5a1f105..6ec546090ba1 100644
291 +--- a/arch/powerpc/include/asm/synch.h
292 ++++ b/arch/powerpc/include/asm/synch.h
293 +@@ -6,10 +6,6 @@
294 + #include <linux/stringify.h>
295 + #include <asm/feature-fixups.h>
296 +
297 +-#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
298 +-#define __SUBARCH_HAS_LWSYNC
299 +-#endif
300 +-
301 + #ifndef __ASSEMBLY__
302 + extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
303 + extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
304 +diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
305 +index 8ca5d5b74618..078553a177de 100644
306 +--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
307 ++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
308 +@@ -84,6 +84,7 @@ static int hv_mode;
309 +
310 + static struct {
311 + u64 lpcr;
312 ++ u64 lpcr_clear;
313 + u64 hfscr;
314 + u64 fscr;
315 + } system_registers;
316 +@@ -92,6 +93,8 @@ static void (*init_pmu_registers)(void);
317 +
318 + static void __restore_cpu_cpufeatures(void)
319 + {
320 ++ u64 lpcr;
321 ++
322 + /*
323 + * LPCR is restored by the power on engine already. It can be changed
324 + * after early init e.g., by radix enable, and we have no unified API
325 +@@ -104,8 +107,10 @@ static void __restore_cpu_cpufeatures(void)
326 + * The best we can do to accommodate secondary boot and idle restore
327 + * for now is "or" LPCR with existing.
328 + */
329 +-
330 +- mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
331 ++ lpcr = mfspr(SPRN_LPCR);
332 ++ lpcr |= system_registers.lpcr;
333 ++ lpcr &= ~system_registers.lpcr_clear;
334 ++ mtspr(SPRN_LPCR, lpcr);
335 + if (hv_mode) {
336 + mtspr(SPRN_LPID, 0);
337 + mtspr(SPRN_HFSCR, system_registers.hfscr);
338 +@@ -325,8 +330,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
339 + {
340 + u64 lpcr;
341 +
342 ++ system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR);
343 + lpcr = mfspr(SPRN_LPCR);
344 +- lpcr &= ~LPCR_ISL;
345 ++ lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
346 + mtspr(SPRN_LPCR, lpcr);
347 +
348 + cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
349 +@@ -658,6 +664,13 @@ static void __init cpufeatures_setup_start(u32 isa)
350 + cur_cpu_spec->cpu_features |= CPU_FTR_ARCH_300;
351 + cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_ARCH_3_00;
352 + }
353 ++
354 ++ /*
355 ++ * PKEY was not in the initial base or feature node
356 ++ * specification, but it should become optional in the next
357 ++ * cpu feature version sequence.
358 ++ */
359 ++ cur_cpu_spec->cpu_features |= CPU_FTR_PKEY;
360 + }
361 +
362 + static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
363 +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
364 +index 0c0b66fc5bfb..295ba833846e 100644
365 +--- a/arch/powerpc/kernel/eeh_driver.c
366 ++++ b/arch/powerpc/kernel/eeh_driver.c
367 +@@ -207,18 +207,18 @@ static void *eeh_report_error(void *data, void *userdata)
368 +
369 + if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
370 + return NULL;
371 ++
372 ++ device_lock(&dev->dev);
373 + dev->error_state = pci_channel_io_frozen;
374 +
375 + driver = eeh_pcid_get(dev);
376 +- if (!driver) return NULL;
377 ++ if (!driver) goto out_no_dev;
378 +
379 + eeh_disable_irq(dev);
380 +
381 + if (!driver->err_handler ||
382 +- !driver->err_handler->error_detected) {
383 +- eeh_pcid_put(dev);
384 +- return NULL;
385 +- }
386 ++ !driver->err_handler->error_detected)
387 ++ goto out;
388 +
389 + rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
390 +
391 +@@ -227,8 +227,12 @@ static void *eeh_report_error(void *data, void *userdata)
392 + if (*res == PCI_ERS_RESULT_NONE) *res = rc;
393 +
394 + edev->in_error = true;
395 +- eeh_pcid_put(dev);
396 + pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
397 ++
398 ++out:
399 ++ eeh_pcid_put(dev);
400 ++out_no_dev:
401 ++ device_unlock(&dev->dev);
402 + return NULL;
403 + }
404 +
405 +@@ -251,15 +255,14 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
406 + if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
407 + return NULL;
408 +
409 ++ device_lock(&dev->dev);
410 + driver = eeh_pcid_get(dev);
411 +- if (!driver) return NULL;
412 ++ if (!driver) goto out_no_dev;
413 +
414 + if (!driver->err_handler ||
415 + !driver->err_handler->mmio_enabled ||
416 +- (edev->mode & EEH_DEV_NO_HANDLER)) {
417 +- eeh_pcid_put(dev);
418 +- return NULL;
419 +- }
420 ++ (edev->mode & EEH_DEV_NO_HANDLER))
421 ++ goto out;
422 +
423 + rc = driver->err_handler->mmio_enabled(dev);
424 +
425 +@@ -267,7 +270,10 @@ static void *eeh_report_mmio_enabled(void *data, void *userdata)
426 + if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
427 + if (*res == PCI_ERS_RESULT_NONE) *res = rc;
428 +
429 ++out:
430 + eeh_pcid_put(dev);
431 ++out_no_dev:
432 ++ device_unlock(&dev->dev);
433 + return NULL;
434 + }
435 +
436 +@@ -290,20 +296,20 @@ static void *eeh_report_reset(void *data, void *userdata)
437 +
438 + if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
439 + return NULL;
440 ++
441 ++ device_lock(&dev->dev);
442 + dev->error_state = pci_channel_io_normal;
443 +
444 + driver = eeh_pcid_get(dev);
445 +- if (!driver) return NULL;
446 ++ if (!driver) goto out_no_dev;
447 +
448 + eeh_enable_irq(dev);
449 +
450 + if (!driver->err_handler ||
451 + !driver->err_handler->slot_reset ||
452 + (edev->mode & EEH_DEV_NO_HANDLER) ||
453 +- (!edev->in_error)) {
454 +- eeh_pcid_put(dev);
455 +- return NULL;
456 +- }
457 ++ (!edev->in_error))
458 ++ goto out;
459 +
460 + rc = driver->err_handler->slot_reset(dev);
461 + if ((*res == PCI_ERS_RESULT_NONE) ||
462 +@@ -311,7 +317,10 @@ static void *eeh_report_reset(void *data, void *userdata)
463 + if (*res == PCI_ERS_RESULT_DISCONNECT &&
464 + rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
465 +
466 ++out:
467 + eeh_pcid_put(dev);
468 ++out_no_dev:
469 ++ device_unlock(&dev->dev);
470 + return NULL;
471 + }
472 +
473 +@@ -362,10 +371,12 @@ static void *eeh_report_resume(void *data, void *userdata)
474 +
475 + if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
476 + return NULL;
477 ++
478 ++ device_lock(&dev->dev);
479 + dev->error_state = pci_channel_io_normal;
480 +
481 + driver = eeh_pcid_get(dev);
482 +- if (!driver) return NULL;
483 ++ if (!driver) goto out_no_dev;
484 +
485 + was_in_error = edev->in_error;
486 + edev->in_error = false;
487 +@@ -375,18 +386,20 @@ static void *eeh_report_resume(void *data, void *userdata)
488 + !driver->err_handler->resume ||
489 + (edev->mode & EEH_DEV_NO_HANDLER) || !was_in_error) {
490 + edev->mode &= ~EEH_DEV_NO_HANDLER;
491 +- eeh_pcid_put(dev);
492 +- return NULL;
493 ++ goto out;
494 + }
495 +
496 + driver->err_handler->resume(dev);
497 +
498 +- eeh_pcid_put(dev);
499 + pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
500 ++out:
501 ++ eeh_pcid_put(dev);
502 + #ifdef CONFIG_PCI_IOV
503 + if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
504 + eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
505 + #endif
506 ++out_no_dev:
507 ++ device_unlock(&dev->dev);
508 + return NULL;
509 + }
510 +
511 +@@ -406,23 +419,26 @@ static void *eeh_report_failure(void *data, void *userdata)
512 +
513 + if (!dev || eeh_dev_removed(edev) || eeh_pe_passed(edev->pe))
514 + return NULL;
515 ++
516 ++ device_lock(&dev->dev);
517 + dev->error_state = pci_channel_io_perm_failure;
518 +
519 + driver = eeh_pcid_get(dev);
520 +- if (!driver) return NULL;
521 ++ if (!driver) goto out_no_dev;
522 +
523 + eeh_disable_irq(dev);
524 +
525 + if (!driver->err_handler ||
526 +- !driver->err_handler->error_detected) {
527 +- eeh_pcid_put(dev);
528 +- return NULL;
529 +- }
530 ++ !driver->err_handler->error_detected)
531 ++ goto out;
532 +
533 + driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
534 +
535 +- eeh_pcid_put(dev);
536 + pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
537 ++out:
538 ++ eeh_pcid_put(dev);
539 ++out_no_dev:
540 ++ device_unlock(&dev->dev);
541 + return NULL;
542 + }
543 +
544 +diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
545 +index 2d4956e97aa9..ee5a67d57aab 100644
546 +--- a/arch/powerpc/kernel/eeh_pe.c
547 ++++ b/arch/powerpc/kernel/eeh_pe.c
548 +@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
549 + eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
550 +
551 + /* PCI Command: 0x4 */
552 +- eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
553 ++ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
554 ++ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
555 +
556 + /* Check the PCIe link is ready */
557 + eeh_bridge_check_link(edev);
558 +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
559 +index ca5d5a081e75..e4c5bf33970b 100644
560 +--- a/arch/powerpc/kernel/kprobes.c
561 ++++ b/arch/powerpc/kernel/kprobes.c
562 +@@ -455,29 +455,33 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
563 + }
564 +
565 + kretprobe_assert(ri, orig_ret_address, trampoline_address);
566 +- regs->nip = orig_ret_address;
567 ++
568 + /*
569 +- * Make LR point to the orig_ret_address.
570 +- * When the 'nop' inside the kretprobe_trampoline
571 +- * is optimized, we can do a 'blr' after executing the
572 +- * detour buffer code.
573 ++ * We get here through one of two paths:
574 ++ * 1. by taking a trap -> kprobe_handler() -> here
575 ++ * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
576 ++ *
577 ++ * When going back through (1), we need regs->nip to be setup properly
578 ++ * as it is used to determine the return address from the trap.
579 ++ * For (2), since nip is not honoured with optprobes, we instead setup
580 ++ * the link register properly so that the subsequent 'blr' in
581 ++ * kretprobe_trampoline jumps back to the right instruction.
582 ++ *
583 ++ * For nip, we should set the address to the previous instruction since
584 ++ * we end up emulating it in kprobe_handler(), which increments the nip
585 ++ * again.
586 + */
587 ++ regs->nip = orig_ret_address - 4;
588 + regs->link = orig_ret_address;
589 +
590 +- reset_current_kprobe();
591 + kretprobe_hash_unlock(current, &flags);
592 +- preempt_enable_no_resched();
593 +
594 + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
595 + hlist_del(&ri->hlist);
596 + kfree(ri);
597 + }
598 +- /*
599 +- * By returning a non-zero value, we are telling
600 +- * kprobe_handler() that we don't want the post_handler
601 +- * to run (and have re-enabled preemption)
602 +- */
603 +- return 1;
604 ++
605 ++ return 0;
606 + }
607 + NOKPROBE_SYMBOL(trampoline_probe_handler);
608 +
609 +diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
610 +index e4395f937d63..45e0b7d5f200 100644
611 +--- a/arch/powerpc/kernel/machine_kexec_file_64.c
612 ++++ b/arch/powerpc/kernel/machine_kexec_file_64.c
613 +@@ -43,7 +43,7 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
614 +
615 + /* We don't support crash kernels yet. */
616 + if (image->type == KEXEC_TYPE_CRASH)
617 +- return -ENOTSUPP;
618 ++ return -EOPNOTSUPP;
619 +
620 + for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
621 + fops = kexec_file_loaders[i];
622 +diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
623 +index 73697c4e3468..f61ff5a6bddb 100644
624 +--- a/arch/powerpc/lib/feature-fixups.c
625 ++++ b/arch/powerpc/lib/feature-fixups.c
626 +@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
627 + unsigned int *target = (unsigned int *)branch_target(src);
628 +
629 + /* Branch within the section doesn't need translating */
630 +- if (target < alt_start || target >= alt_end) {
631 ++ if (target < alt_start || target > alt_end) {
632 + instr = translate_branch(dest, src);
633 + if (!instr)
634 + return 1;
635 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
636 +index cf290d415dcd..1d388a0e1746 100644
637 +--- a/arch/powerpc/mm/hash_utils_64.c
638 ++++ b/arch/powerpc/mm/hash_utils_64.c
639 +@@ -875,6 +875,12 @@ static void __init htab_initialize(void)
640 + /* Using a hypervisor which owns the htab */
641 + htab_address = NULL;
642 + _SDR1 = 0;
643 ++ /*
644 ++ * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
645 ++ * to inform the hypervisor that we wish to use the HPT.
646 ++ */
647 ++ if (cpu_has_feature(CPU_FTR_ARCH_300))
648 ++ register_process_table(0, 0, 0);
649 + #ifdef CONFIG_FA_DUMP
650 + /*
651 + * If firmware assisted dump is active firmware preserves
652 +diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
653 +index a07f5372a4bf..9ab051155af3 100644
654 +--- a/arch/powerpc/mm/tlb-radix.c
655 ++++ b/arch/powerpc/mm/tlb-radix.c
656 +@@ -33,13 +33,12 @@ static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
657 + {
658 + unsigned long rb;
659 + unsigned long rs;
660 +- unsigned int r = 1; /* radix format */
661 +
662 + rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
663 + rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
664 +
665 +- asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
666 +- : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
667 ++ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, 1)
668 ++ : : "r"(rb), "r"(rs), "i"(ric), "i"(prs)
669 + : "memory");
670 + }
671 +
672 +diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
673 +index 9db4398ded5d..1bceb95f422d 100644
674 +--- a/arch/powerpc/platforms/powernv/opal-nvram.c
675 ++++ b/arch/powerpc/platforms/powernv/opal-nvram.c
676 +@@ -11,6 +11,7 @@
677 +
678 + #define DEBUG
679 +
680 ++#include <linux/delay.h>
681 + #include <linux/kernel.h>
682 + #include <linux/init.h>
683 + #include <linux/of.h>
684 +@@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
685 +
686 + while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
687 + rc = opal_write_nvram(__pa(buf), count, off);
688 +- if (rc == OPAL_BUSY_EVENT)
689 ++ if (rc == OPAL_BUSY_EVENT) {
690 ++ msleep(OPAL_BUSY_DELAY_MS);
691 + opal_poll_events(NULL);
692 ++ } else if (rc == OPAL_BUSY) {
693 ++ msleep(OPAL_BUSY_DELAY_MS);
694 ++ }
695 + }
696 ++
697 ++ if (rc)
698 ++ return -EIO;
699 ++
700 + *index += count;
701 + return count;
702 + }
703 +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
704 +index 0ee4a469a4ae..d11f3c14c21e 100644
705 +--- a/arch/powerpc/platforms/pseries/lpar.c
706 ++++ b/arch/powerpc/platforms/pseries/lpar.c
707 +@@ -726,15 +726,18 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
708 + return 0;
709 + }
710 +
711 +-/* Actually only used for radix, so far */
712 + static int pseries_lpar_register_process_table(unsigned long base,
713 + unsigned long page_size, unsigned long table_size)
714 + {
715 + long rc;
716 +- unsigned long flags = PROC_TABLE_NEW;
717 ++ unsigned long flags = 0;
718 +
719 ++ if (table_size)
720 ++ flags |= PROC_TABLE_NEW;
721 + if (radix_enabled())
722 + flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
723 ++ else
724 ++ flags |= PROC_TABLE_HPT_SLB;
725 + for (;;) {
726 + rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
727 + page_size, table_size);
728 +@@ -760,6 +763,7 @@ void __init hpte_init_pseries(void)
729 + mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
730 + mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
731 + mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
732 ++ register_process_table = pseries_lpar_register_process_table;
733 +
734 + if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
735 + mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
736 +diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
737 +index d22aeb0b69e1..b48454be5b98 100644
738 +--- a/arch/powerpc/sysdev/xive/native.c
739 ++++ b/arch/powerpc/sysdev/xive/native.c
740 +@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
741 + if (xive_pool_vps == XIVE_INVALID_VP)
742 + return;
743 +
744 ++ /* Check if pool VP already active, if it is, pull it */
745 ++ if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
746 ++ in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
747 ++
748 + /* Enable the pool VP */
749 + vp = xive_pool_vps + cpu;
750 + pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
751 +diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
752 +index eaee7087886f..6e91e0d422ea 100644
753 +--- a/arch/s390/Kconfig
754 ++++ b/arch/s390/Kconfig
755 +@@ -289,12 +289,12 @@ config MARCH_Z13
756 + older machines.
757 +
758 + config MARCH_Z14
759 +- bool "IBM z14"
760 ++ bool "IBM z14 ZR1 and z14"
761 + select HAVE_MARCH_Z14_FEATURES
762 + help
763 +- Select this to enable optimizations for IBM z14 (3906 series).
764 +- The kernel will be slightly faster but will not work on older
765 +- machines.
766 ++ Select this to enable optimizations for IBM z14 ZR1 and z14 (3907
767 ++ and 3906 series). The kernel will be slightly faster but will not
768 ++ work on older machines.
769 +
770 + endchoice
771 +
772 +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
773 +index 43bbe63e2992..06b513d192b9 100644
774 +--- a/arch/s390/hypfs/inode.c
775 ++++ b/arch/s390/hypfs/inode.c
776 +@@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb)
777 +
778 + if (sb->s_root)
779 + hypfs_delete_tree(sb->s_root);
780 +- if (sb_info->update_file)
781 ++ if (sb_info && sb_info->update_file)
782 + hypfs_remove(sb_info->update_file);
783 + kfree(sb->s_fs_info);
784 + sb->s_fs_info = NULL;
785 +diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
786 +index c5bc3f209652..5ee27dc9a10c 100644
787 +--- a/arch/s390/kernel/perf_cpum_cf_events.c
788 ++++ b/arch/s390/kernel/perf_cpum_cf_events.c
789 +@@ -583,6 +583,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
790 + model = cpumcf_z13_pmu_event_attr;
791 + break;
792 + case 0x3906:
793 ++ case 0x3907:
794 + model = cpumcf_z14_pmu_event_attr;
795 + break;
796 + default:
797 +diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
798 +index a6a91f01a17a..ce5ff4c4d435 100644
799 +--- a/arch/s390/kernel/setup.c
800 ++++ b/arch/s390/kernel/setup.c
801 +@@ -819,6 +819,7 @@ static int __init setup_hwcaps(void)
802 + strcpy(elf_platform, "z13");
803 + break;
804 + case 0x3906:
805 ++ case 0x3907:
806 + strcpy(elf_platform, "z14");
807 + break;
808 + }
809 +diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
810 +index 2db18cbbb0ea..c0197097c86e 100644
811 +--- a/arch/um/os-Linux/file.c
812 ++++ b/arch/um/os-Linux/file.c
813 +@@ -12,6 +12,7 @@
814 + #include <sys/mount.h>
815 + #include <sys/socket.h>
816 + #include <sys/stat.h>
817 ++#include <sys/sysmacros.h>
818 + #include <sys/un.h>
819 + #include <sys/types.h>
820 + #include <os.h>
821 +diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
822 +index a86d7cc2c2d8..bf0acb8aad8b 100644
823 +--- a/arch/um/os-Linux/signal.c
824 ++++ b/arch/um/os-Linux/signal.c
825 +@@ -16,6 +16,7 @@
826 + #include <os.h>
827 + #include <sysdep/mcontext.h>
828 + #include <um_malloc.h>
829 ++#include <sys/ucontext.h>
830 +
831 + void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
832 + [SIGTRAP] = relay_signal,
833 +@@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
834 +
835 + static void hard_handler(int sig, siginfo_t *si, void *p)
836 + {
837 +- struct ucontext *uc = p;
838 ++ ucontext_t *uc = p;
839 + mcontext_t *mc = &uc->uc_mcontext;
840 + unsigned long pending = 1UL << sig;
841 +
842 +diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
843 +index 1518d2805ae8..27361cbb7ca9 100644
844 +--- a/arch/x86/um/stub_segv.c
845 ++++ b/arch/x86/um/stub_segv.c
846 +@@ -6,11 +6,12 @@
847 + #include <sysdep/stub.h>
848 + #include <sysdep/faultinfo.h>
849 + #include <sysdep/mcontext.h>
850 ++#include <sys/ucontext.h>
851 +
852 + void __attribute__ ((__section__ (".__syscall_stub")))
853 + stub_segv_handler(int sig, siginfo_t *info, void *p)
854 + {
855 +- struct ucontext *uc = p;
856 ++ ucontext_t *uc = p;
857 +
858 + GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
859 + &uc->uc_mcontext);
860 +diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
861 +index 3c2c2530737e..c36d23aa6c35 100644
862 +--- a/arch/x86/xen/enlighten_pv.c
863 ++++ b/arch/x86/xen/enlighten_pv.c
864 +@@ -1259,10 +1259,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
865 + */
866 + __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
867 +
868 +- /* Work out if we support NX */
869 +- get_cpu_cap(&boot_cpu_data);
870 +- x86_configure_nx();
871 +-
872 + /* Get mfn list */
873 + xen_build_dynamic_phys_to_machine();
874 +
875 +@@ -1272,6 +1268,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
876 + */
877 + xen_setup_gdt(0);
878 +
879 ++ /* Work out if we support NX */
880 ++ get_cpu_cap(&boot_cpu_data);
881 ++ x86_configure_nx();
882 ++
883 + xen_init_irq_ops();
884 +
885 + /* Let's presume PV guests always boot on vCPU with id 0. */
886 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
887 +index 9f8f39d49396..984ec6b288df 100644
888 +--- a/drivers/acpi/nfit/core.c
889 ++++ b/drivers/acpi/nfit/core.c
890 +@@ -196,7 +196,7 @@ static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd
891 + * In the _LSI, _LSR, _LSW case the locked status is
892 + * communicated via the read/write commands
893 + */
894 +- if (nfit_mem->has_lsi)
895 ++ if (nfit_mem->has_lsr)
896 + break;
897 +
898 + if (status >> 16 & ND_CONFIG_LOCKED)
899 +@@ -483,7 +483,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
900 + min_t(u32, 256, in_buf.buffer.length), true);
901 +
902 + /* call the BIOS, prefer the named methods over _DSM if available */
903 +- if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi)
904 ++ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
905 + out_obj = acpi_label_info(handle);
906 + else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
907 + struct nd_cmd_get_config_data_hdr *p = buf;
908 +@@ -1250,8 +1250,11 @@ static ssize_t scrub_show(struct device *dev,
909 + if (nd_desc) {
910 + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
911 +
912 ++ mutex_lock(&acpi_desc->init_mutex);
913 + rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
914 +- (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
915 ++ work_busy(&acpi_desc->work)
916 ++ && !acpi_desc->cancel ? "+\n" : "\n");
917 ++ mutex_unlock(&acpi_desc->init_mutex);
918 + }
919 + device_unlock(dev);
920 + return rc;
921 +@@ -1654,12 +1657,23 @@ static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
922 + device_unlock(dev->parent);
923 + }
924 +
925 ++static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
926 ++{
927 ++ acpi_handle handle;
928 ++ acpi_status status;
929 ++
930 ++ status = acpi_get_handle(adev->handle, method, &handle);
931 ++
932 ++ if (ACPI_SUCCESS(status))
933 ++ return true;
934 ++ return false;
935 ++}
936 ++
937 + static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
938 + struct nfit_mem *nfit_mem, u32 device_handle)
939 + {
940 + struct acpi_device *adev, *adev_dimm;
941 + struct device *dev = acpi_desc->dev;
942 +- union acpi_object *obj;
943 + unsigned long dsm_mask;
944 + const guid_t *guid;
945 + int i;
946 +@@ -1732,25 +1746,15 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
947 + 1ULL << i))
948 + set_bit(i, &nfit_mem->dsm_mask);
949 +
950 +- obj = acpi_label_info(adev_dimm->handle);
951 +- if (obj) {
952 +- ACPI_FREE(obj);
953 +- nfit_mem->has_lsi = 1;
954 +- dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
955 +- }
956 +-
957 +- obj = acpi_label_read(adev_dimm->handle, 0, 0);
958 +- if (obj) {
959 +- ACPI_FREE(obj);
960 +- nfit_mem->has_lsr = 1;
961 ++ if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
962 ++ && acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
963 + dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
964 ++ nfit_mem->has_lsr = true;
965 + }
966 +
967 +- obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL);
968 +- if (obj) {
969 +- ACPI_FREE(obj);
970 +- nfit_mem->has_lsw = 1;
971 ++ if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
972 + dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
973 ++ nfit_mem->has_lsw = true;
974 + }
975 +
976 + return 0;
977 +@@ -1839,10 +1843,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
978 + cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
979 + }
980 +
981 +- if (nfit_mem->has_lsi)
982 ++ if (nfit_mem->has_lsr) {
983 + set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
984 +- if (nfit_mem->has_lsr)
985 + set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
986 ++ }
987 + if (nfit_mem->has_lsw)
988 + set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
989 +
990 +@@ -2579,7 +2583,7 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
991 + struct acpi_nfit_system_address *spa = nfit_spa->spa;
992 + struct nd_blk_region_desc *ndbr_desc;
993 + struct nfit_mem *nfit_mem;
994 +- int blk_valid = 0, rc;
995 ++ int rc;
996 +
997 + if (!nvdimm) {
998 + dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
999 +@@ -2599,15 +2603,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1000 + if (!nfit_mem || !nfit_mem->bdw) {
1001 + dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
1002 + spa->range_index, nvdimm_name(nvdimm));
1003 +- } else {
1004 +- mapping->size = nfit_mem->bdw->capacity;
1005 +- mapping->start = nfit_mem->bdw->start_address;
1006 +- ndr_desc->num_lanes = nfit_mem->bdw->windows;
1007 +- blk_valid = 1;
1008 ++ break;
1009 + }
1010 +
1011 ++ mapping->size = nfit_mem->bdw->capacity;
1012 ++ mapping->start = nfit_mem->bdw->start_address;
1013 ++ ndr_desc->num_lanes = nfit_mem->bdw->windows;
1014 + ndr_desc->mapping = mapping;
1015 +- ndr_desc->num_mappings = blk_valid;
1016 ++ ndr_desc->num_mappings = 1;
1017 + ndbr_desc = to_blk_region_desc(ndr_desc);
1018 + ndbr_desc->enable = acpi_nfit_blk_region_enable;
1019 + ndbr_desc->do_io = acpi_desc->blk_do_io;
1020 +diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
1021 +index 50d36e166d70..ac9c49463731 100644
1022 +--- a/drivers/acpi/nfit/nfit.h
1023 ++++ b/drivers/acpi/nfit/nfit.h
1024 +@@ -171,9 +171,8 @@ struct nfit_mem {
1025 + struct resource *flush_wpq;
1026 + unsigned long dsm_mask;
1027 + int family;
1028 +- u32 has_lsi:1;
1029 +- u32 has_lsr:1;
1030 +- u32 has_lsw:1;
1031 ++ bool has_lsr;
1032 ++ bool has_lsw;
1033 + };
1034 +
1035 + struct acpi_nfit_desc {
1036 +diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1037 +index 601e5d372887..43587ac680e4 100644
1038 +--- a/drivers/acpi/video_detect.c
1039 ++++ b/drivers/acpi/video_detect.c
1040 +@@ -219,6 +219,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1041 + "3570R/370R/470R/450R/510R/4450RV"),
1042 + },
1043 + },
1044 ++ {
1045 ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
1046 ++ .callback = video_detect_force_video,
1047 ++ .ident = "SAMSUNG 670Z5E",
1048 ++ .matches = {
1049 ++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
1050 ++ DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
1051 ++ },
1052 ++ },
1053 + {
1054 + /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
1055 + .callback = video_detect_force_video,
1056 +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
1057 +index ee302ccdfbc8..453116fd4362 100644
1058 +--- a/drivers/base/regmap/regmap.c
1059 ++++ b/drivers/base/regmap/regmap.c
1060 +@@ -1831,7 +1831,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
1061 + return -EINVAL;
1062 + if (val_len % map->format.val_bytes)
1063 + return -EINVAL;
1064 +- if (map->max_raw_write && map->max_raw_write > val_len)
1065 ++ if (map->max_raw_write && map->max_raw_write < val_len)
1066 + return -E2BIG;
1067 +
1068 + map->lock(map->lock_arg);
1069 +diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
1070 +index 1a81f6b8c2ce..942ac63f0b12 100644
1071 +--- a/drivers/bluetooth/hci_bcm.c
1072 ++++ b/drivers/bluetooth/hci_bcm.c
1073 +@@ -126,6 +126,10 @@ struct bcm_data {
1074 + static DEFINE_MUTEX(bcm_device_lock);
1075 + static LIST_HEAD(bcm_device_list);
1076 +
1077 ++static int irq_polarity = -1;
1078 ++module_param(irq_polarity, int, 0444);
1079 ++MODULE_PARM_DESC(irq_polarity, "IRQ polarity 0: active-high 1: active-low");
1080 ++
1081 + static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
1082 + {
1083 + if (hu->serdev)
1084 +@@ -975,11 +979,17 @@ static int bcm_acpi_probe(struct bcm_device *dev)
1085 + }
1086 + acpi_dev_free_resource_list(&resources);
1087 +
1088 +- dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table);
1089 +- if (dmi_id) {
1090 +- dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low",
1091 +- dmi_id->ident);
1092 +- dev->irq_active_low = true;
1093 ++ if (irq_polarity != -1) {
1094 ++ dev->irq_active_low = irq_polarity;
1095 ++ dev_warn(dev->dev, "Overwriting IRQ polarity to active %s by module-param\n",
1096 ++ dev->irq_active_low ? "low" : "high");
1097 ++ } else {
1098 ++ dmi_id = dmi_first_match(bcm_active_low_irq_dmi_table);
1099 ++ if (dmi_id) {
1100 ++ dev_warn(dev->dev, "%s: Overwriting IRQ polarity to active low",
1101 ++ dmi_id->ident);
1102 ++ dev->irq_active_low = true;
1103 ++ }
1104 + }
1105 +
1106 + return 0;
1107 +diff --git a/drivers/char/random.c b/drivers/char/random.c
1108 +index e5b3d3ba4660..38729baed6ee 100644
1109 +--- a/drivers/char/random.c
1110 ++++ b/drivers/char/random.c
1111 +@@ -427,8 +427,9 @@ struct crng_state primary_crng = {
1112 + * its value (from 0->1->2).
1113 + */
1114 + static int crng_init = 0;
1115 +-#define crng_ready() (likely(crng_init > 0))
1116 ++#define crng_ready() (likely(crng_init > 1))
1117 + static int crng_init_cnt = 0;
1118 ++static unsigned long crng_global_init_time = 0;
1119 + #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
1120 + static void _extract_crng(struct crng_state *crng,
1121 + __u32 out[CHACHA20_BLOCK_WORDS]);
1122 +@@ -732,7 +733,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
1123 +
1124 + static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
1125 + {
1126 +- const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
1127 ++ const int nbits_max = r->poolinfo->poolwords * 32;
1128 +
1129 + if (nbits < 0)
1130 + return -EINVAL;
1131 +@@ -786,6 +787,10 @@ static void crng_initialize(struct crng_state *crng)
1132 + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
1133 + }
1134 +
1135 ++/*
1136 ++ * crng_fast_load() can be called by code in the interrupt service
1137 ++ * path. So we can't afford to dilly-dally.
1138 ++ */
1139 + static int crng_fast_load(const char *cp, size_t len)
1140 + {
1141 + unsigned long flags;
1142 +@@ -793,7 +798,7 @@ static int crng_fast_load(const char *cp, size_t len)
1143 +
1144 + if (!spin_trylock_irqsave(&primary_crng.lock, flags))
1145 + return 0;
1146 +- if (crng_ready()) {
1147 ++ if (crng_init != 0) {
1148 + spin_unlock_irqrestore(&primary_crng.lock, flags);
1149 + return 0;
1150 + }
1151 +@@ -812,6 +817,51 @@ static int crng_fast_load(const char *cp, size_t len)
1152 + return 1;
1153 + }
1154 +
1155 ++/*
1156 ++ * crng_slow_load() is called by add_device_randomness, which has two
1157 ++ * attributes. (1) We can't trust the buffer passed to it is
1158 ++ * guaranteed to be unpredictable (so it might not have any entropy at
1159 ++ * all), and (2) it doesn't have the performance constraints of
1160 ++ * crng_fast_load().
1161 ++ *
1162 ++ * So we do something more comprehensive which is guaranteed to touch
1163 ++ * all of the primary_crng's state, and which uses a LFSR with a
1164 ++ * period of 255 as part of the mixing algorithm. Finally, we do
1165 ++ * *not* advance crng_init_cnt since buffer we may get may be something
1166 ++ * like a fixed DMI table (for example), which might very well be
1167 ++ * unique to the machine, but is otherwise unvarying.
1168 ++ */
1169 ++static int crng_slow_load(const char *cp, size_t len)
1170 ++{
1171 ++ unsigned long flags;
1172 ++ static unsigned char lfsr = 1;
1173 ++ unsigned char tmp;
1174 ++ unsigned i, max = CHACHA20_KEY_SIZE;
1175 ++ const char * src_buf = cp;
1176 ++ char * dest_buf = (char *) &primary_crng.state[4];
1177 ++
1178 ++ if (!spin_trylock_irqsave(&primary_crng.lock, flags))
1179 ++ return 0;
1180 ++ if (crng_init != 0) {
1181 ++ spin_unlock_irqrestore(&primary_crng.lock, flags);
1182 ++ return 0;
1183 ++ }
1184 ++ if (len > max)
1185 ++ max = len;
1186 ++
1187 ++ for (i = 0; i < max ; i++) {
1188 ++ tmp = lfsr;
1189 ++ lfsr >>= 1;
1190 ++ if (tmp & 1)
1191 ++ lfsr ^= 0xE1;
1192 ++ tmp = dest_buf[i % CHACHA20_KEY_SIZE];
1193 ++ dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
1194 ++ lfsr += (tmp << 3) | (tmp >> 5);
1195 ++ }
1196 ++ spin_unlock_irqrestore(&primary_crng.lock, flags);
1197 ++ return 1;
1198 ++}
1199 ++
1200 + static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
1201 + {
1202 + unsigned long flags;
1203 +@@ -830,7 +880,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
1204 + _crng_backtrack_protect(&primary_crng, buf.block,
1205 + CHACHA20_KEY_SIZE);
1206 + }
1207 +- spin_lock_irqsave(&primary_crng.lock, flags);
1208 ++ spin_lock_irqsave(&crng->lock, flags);
1209 + for (i = 0; i < 8; i++) {
1210 + unsigned long rv;
1211 + if (!arch_get_random_seed_long(&rv) &&
1212 +@@ -840,7 +890,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
1213 + }
1214 + memzero_explicit(&buf, sizeof(buf));
1215 + crng->init_time = jiffies;
1216 +- spin_unlock_irqrestore(&primary_crng.lock, flags);
1217 ++ spin_unlock_irqrestore(&crng->lock, flags);
1218 + if (crng == &primary_crng && crng_init < 2) {
1219 + invalidate_batched_entropy();
1220 + crng_init = 2;
1221 +@@ -855,8 +905,9 @@ static void _extract_crng(struct crng_state *crng,
1222 + {
1223 + unsigned long v, flags;
1224 +
1225 +- if (crng_init > 1 &&
1226 +- time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
1227 ++ if (crng_ready() &&
1228 ++ (time_after(crng_global_init_time, crng->init_time) ||
1229 ++ time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
1230 + crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
1231 + spin_lock_irqsave(&crng->lock, flags);
1232 + if (arch_get_random_long(&v))
1233 +@@ -981,10 +1032,8 @@ void add_device_randomness(const void *buf, unsigned int size)
1234 + unsigned long time = random_get_entropy() ^ jiffies;
1235 + unsigned long flags;
1236 +
1237 +- if (!crng_ready()) {
1238 +- crng_fast_load(buf, size);
1239 +- return;
1240 +- }
1241 ++ if (!crng_ready() && size)
1242 ++ crng_slow_load(buf, size);
1243 +
1244 + trace_add_device_randomness(size, _RET_IP_);
1245 + spin_lock_irqsave(&input_pool.lock, flags);
1246 +@@ -1141,7 +1190,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
1247 + fast_mix(fast_pool);
1248 + add_interrupt_bench(cycles);
1249 +
1250 +- if (!crng_ready()) {
1251 ++ if (unlikely(crng_init == 0)) {
1252 + if ((fast_pool->count >= 64) &&
1253 + crng_fast_load((char *) fast_pool->pool,
1254 + sizeof(fast_pool->pool))) {
1255 +@@ -1691,6 +1740,7 @@ static int rand_initialize(void)
1256 + init_std_data(&input_pool);
1257 + init_std_data(&blocking_pool);
1258 + crng_initialize(&primary_crng);
1259 ++ crng_global_init_time = jiffies;
1260 +
1261 + #ifdef CONFIG_NUMA
1262 + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
1263 +@@ -1877,6 +1927,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1264 + input_pool.entropy_count = 0;
1265 + blocking_pool.entropy_count = 0;
1266 + return 0;
1267 ++ case RNDRESEEDCRNG:
1268 ++ if (!capable(CAP_SYS_ADMIN))
1269 ++ return -EPERM;
1270 ++ if (crng_init < 2)
1271 ++ return -ENODATA;
1272 ++ crng_reseed(&primary_crng, NULL);
1273 ++ crng_global_init_time = jiffies - 1;
1274 ++ return 0;
1275 + default:
1276 + return -EINVAL;
1277 + }
1278 +@@ -2214,7 +2272,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
1279 + {
1280 + struct entropy_store *poolp = &input_pool;
1281 +
1282 +- if (!crng_ready()) {
1283 ++ if (unlikely(crng_init == 0)) {
1284 + crng_fast_load(buffer, count);
1285 + return;
1286 + }
1287 +diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
1288 +index 9e80a953d693..248c04090dea 100644
1289 +--- a/drivers/char/tpm/tpm-interface.c
1290 ++++ b/drivers/char/tpm/tpm-interface.c
1291 +@@ -969,6 +969,10 @@ int tpm_do_selftest(struct tpm_chip *chip)
1292 + loops = jiffies_to_msecs(duration) / delay_msec;
1293 +
1294 + rc = tpm_continue_selftest(chip);
1295 ++ if (rc == TPM_ERR_INVALID_POSTINIT) {
1296 ++ chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
1297 ++ dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
1298 ++ }
1299 + /* This may fail if there was no TPM driver during a suspend/resume
1300 + * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
1301 + */
1302 +diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
1303 +index a07f6451694a..fa0d5c8611a0 100644
1304 +--- a/drivers/clk/bcm/clk-bcm2835.c
1305 ++++ b/drivers/clk/bcm/clk-bcm2835.c
1306 +@@ -602,9 +602,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
1307 + const struct bcm2835_pll_data *data = pll->data;
1308 +
1309 + spin_lock(&cprman->regs_lock);
1310 +- cprman_write(cprman, data->cm_ctrl_reg,
1311 +- cprman_read(cprman, data->cm_ctrl_reg) |
1312 +- CM_PLL_ANARST);
1313 ++ cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
1314 + cprman_write(cprman, data->a2w_ctrl_reg,
1315 + cprman_read(cprman, data->a2w_ctrl_reg) |
1316 + A2W_PLL_CTRL_PWRDN);
1317 +@@ -640,6 +638,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
1318 + cpu_relax();
1319 + }
1320 +
1321 ++ cprman_write(cprman, data->a2w_ctrl_reg,
1322 ++ cprman_read(cprman, data->a2w_ctrl_reg) |
1323 ++ A2W_PLL_CTRL_PRST_DISABLE);
1324 ++
1325 + return 0;
1326 + }
1327 +
1328 +diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
1329 +index 8e7f16fd87c9..deca7527f92f 100644
1330 +--- a/drivers/clk/mediatek/clk-mt2701.c
1331 ++++ b/drivers/clk/mediatek/clk-mt2701.c
1332 +@@ -148,6 +148,7 @@ static const struct mtk_fixed_factor top_fixed_divs[] = {
1333 + FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
1334 + FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
1335 + FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
1336 ++ FACTOR(CLK_TOP_AXISEL_D4, "axisel_d4", "axi_sel", 1, 4),
1337 + };
1338 +
1339 + static const char * const axi_parents[] = {
1340 +@@ -857,13 +858,13 @@ static const struct mtk_gate peri_clks[] = {
1341 + GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
1342 + GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
1343 + GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
1344 +- GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
1345 +- GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
1346 +- GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
1347 +- GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
1348 +- GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
1349 +- GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
1350 +- GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
1351 ++ GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axisel_d4", 8),
1352 ++ GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axisel_d4", 7),
1353 ++ GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axisel_d4", 6),
1354 ++ GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axisel_d4", 5),
1355 ++ GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axisel_d4", 4),
1356 ++ GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axisel_d4", 3),
1357 ++ GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axisel_d4", 2),
1358 + GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
1359 + GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
1360 +
1361 +diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
1362 +index 394aa6f03f01..9ff4ea63932d 100644
1363 +--- a/drivers/clk/mvebu/armada-38x.c
1364 ++++ b/drivers/clk/mvebu/armada-38x.c
1365 +@@ -46,11 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
1366 + }
1367 +
1368 + static const u32 armada_38x_cpu_frequencies[] __initconst = {
1369 +- 0, 0, 0, 0,
1370 +- 1066 * 1000 * 1000, 0, 0, 0,
1371 ++ 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
1372 ++ 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
1373 + 1332 * 1000 * 1000, 0, 0, 0,
1374 + 1600 * 1000 * 1000, 0, 0, 0,
1375 +- 1866 * 1000 * 1000,
1376 ++ 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
1377 + };
1378 +
1379 + static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
1380 +@@ -76,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
1381 + };
1382 +
1383 + static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
1384 +- {0, 1}, {0, 1}, {0, 1}, {0, 1},
1385 +- {1, 2}, {0, 1}, {0, 1}, {0, 1},
1386 +- {1, 2}, {0, 1}, {0, 1}, {0, 1},
1387 ++ {1, 2}, {0, 1}, {1, 2}, {0, 1},
1388 ++ {1, 2}, {0, 1}, {1, 2}, {0, 1},
1389 + {1, 2}, {0, 1}, {0, 1}, {0, 1},
1390 + {1, 2}, {0, 1}, {0, 1}, {0, 1},
1391 ++ {1, 2}, {0, 1}, {0, 1}, {1, 2},
1392 + {0, 1}, {0, 1}, {0, 1}, {0, 1},
1393 + {0, 1}, {0, 1}, {0, 1}, {0, 1},
1394 + {0, 1}, {0, 1}, {0, 1}, {0, 1},
1395 +@@ -91,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
1396 + {1, 2}, {0, 1}, {0, 1}, {0, 1},
1397 + {1, 2}, {0, 1}, {0, 1}, {0, 1},
1398 + {1, 2}, {0, 1}, {0, 1}, {0, 1},
1399 +- {1, 2}, {0, 1}, {0, 1}, {0, 1},
1400 ++ {1, 2}, {0, 1}, {0, 1}, {7, 15},
1401 + {0, 1}, {0, 1}, {0, 1}, {0, 1},
1402 + {0, 1}, {0, 1}, {0, 1}, {0, 1},
1403 + {0, 1}, {0, 1}, {0, 1}, {0, 1},
1404 +diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
1405 +index eea38f6ea77e..3892346c4fcc 100644
1406 +--- a/drivers/clk/renesas/clk-sh73a0.c
1407 ++++ b/drivers/clk/renesas/clk-sh73a0.c
1408 +@@ -46,7 +46,7 @@ struct div4_clk {
1409 + unsigned int shift;
1410 + };
1411 +
1412 +-static struct div4_clk div4_clks[] = {
1413 ++static const struct div4_clk div4_clks[] = {
1414 + { "zg", "pll0", CPG_FRQCRA, 16 },
1415 + { "m3", "pll1", CPG_FRQCRA, 12 },
1416 + { "b", "pll1", CPG_FRQCRA, 8 },
1417 +@@ -79,7 +79,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
1418 + {
1419 + const struct clk_div_table *table = NULL;
1420 + unsigned int shift, reg, width;
1421 +- const char *parent_name;
1422 ++ const char *parent_name = NULL;
1423 + unsigned int mult = 1;
1424 + unsigned int div = 1;
1425 +
1426 +@@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
1427 + shift = 24;
1428 + width = 5;
1429 + } else {
1430 +- struct div4_clk *c;
1431 ++ const struct div4_clk *c;
1432 +
1433 + for (c = div4_clks; c->name; c++) {
1434 + if (!strcmp(name, c->name)) {
1435 +diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
1436 +index 11a5066e5c27..5234acd30e89 100644
1437 +--- a/drivers/clk/tegra/clk-emc.c
1438 ++++ b/drivers/clk/tegra/clk-emc.c
1439 +@@ -515,7 +515,7 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
1440 +
1441 + init.name = "emc";
1442 + init.ops = &tegra_clk_emc_ops;
1443 +- init.flags = 0;
1444 ++ init.flags = CLK_IS_CRITICAL;
1445 + init.parent_names = emc_parent_clk_names;
1446 + init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
1447 +
1448 +diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
1449 +index c02711927d79..2acba2986bc6 100644
1450 +--- a/drivers/clk/tegra/clk-tegra-periph.c
1451 ++++ b/drivers/clk/tegra/clk-tegra-periph.c
1452 +@@ -830,7 +830,7 @@ static struct tegra_periph_init_data gate_clks[] = {
1453 + GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
1454 + GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
1455 + GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
1456 +- GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
1457 ++ GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IS_CRITICAL),
1458 + GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
1459 + GATE("ispa", "isp", 23, 0, tegra_clk_ispa, 0),
1460 + GATE("ispb", "isp", 3, 0, tegra_clk_ispb, 0),
1461 +diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
1462 +index 10047107c1dc..89d6b47a27a8 100644
1463 +--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
1464 ++++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
1465 +@@ -125,7 +125,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
1466 + /* SCLK */
1467 + dt_clk = tegra_lookup_dt_id(tegra_clk_sclk, tegra_clks);
1468 + if (dt_clk) {
1469 +- clk = clk_register_divider(NULL, "sclk", "sclk_mux", 0,
1470 ++ clk = clk_register_divider(NULL, "sclk", "sclk_mux",
1471 ++ CLK_IS_CRITICAL,
1472 + clk_base + SCLK_DIVIDER, 0, 8,
1473 + 0, &sysrate_lock);
1474 + *dt_clk = clk;
1475 +@@ -137,7 +138,8 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
1476 + clk = tegra_clk_register_super_mux("sclk",
1477 + gen_info->sclk_parents,
1478 + gen_info->num_sclk_parents,
1479 +- CLK_SET_RATE_PARENT,
1480 ++ CLK_SET_RATE_PARENT |
1481 ++ CLK_IS_CRITICAL,
1482 + clk_base + SCLK_BURST_POLICY,
1483 + 0, 4, 0, 0, NULL);
1484 + *dt_clk = clk;
1485 +@@ -151,7 +153,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
1486 + clk_base + SYSTEM_CLK_RATE, 4, 2, 0,
1487 + &sysrate_lock);
1488 + clk = clk_register_gate(NULL, "hclk", "hclk_div",
1489 +- CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
1490 ++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
1491 + clk_base + SYSTEM_CLK_RATE,
1492 + 7, CLK_GATE_SET_TO_DISABLE, &sysrate_lock);
1493 + *dt_clk = clk;
1494 +diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
1495 +index 63087d17c3e2..c3945c683f60 100644
1496 +--- a/drivers/clk/tegra/clk-tegra114.c
1497 ++++ b/drivers/clk/tegra/clk-tegra114.c
1498 +@@ -955,8 +955,7 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
1499 +
1500 + /* PLLM */
1501 + clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
1502 +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
1503 +- &pll_m_params, NULL);
1504 ++ CLK_SET_RATE_GATE, &pll_m_params, NULL);
1505 + clks[TEGRA114_CLK_PLL_M] = clk;
1506 +
1507 + /* PLLM_OUT1 */
1508 +diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
1509 +index e81ea5b11577..230f9a2c1abf 100644
1510 +--- a/drivers/clk/tegra/clk-tegra124.c
1511 ++++ b/drivers/clk/tegra/clk-tegra124.c
1512 +@@ -1089,8 +1089,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
1513 +
1514 + /* PLLM */
1515 + clk = tegra_clk_register_pllm("pll_m", "pll_ref", clk_base, pmc,
1516 +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
1517 +- &pll_m_params, NULL);
1518 ++ CLK_SET_RATE_GATE, &pll_m_params, NULL);
1519 + clk_register_clkdev(clk, "pll_m", NULL);
1520 + clks[TEGRA124_CLK_PLL_M] = clk;
1521 +
1522 +@@ -1099,7 +1098,7 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
1523 + clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
1524 + 8, 8, 1, NULL);
1525 + clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
1526 +- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
1527 ++ clk_base + PLLM_OUT, 1, 0,
1528 + CLK_SET_RATE_PARENT, 0, NULL);
1529 + clk_register_clkdev(clk, "pll_m_out1", NULL);
1530 + clks[TEGRA124_CLK_PLL_M_OUT1] = clk;
1531 +@@ -1272,7 +1271,7 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
1532 + { TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1 },
1533 + { TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0 },
1534 + { TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0 },
1535 +- { TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1 },
1536 ++ { TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 0 },
1537 + { TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1 },
1538 + { TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1 },
1539 + { TEGRA124_CLK_PLL_C, TEGRA124_CLK_CLK_MAX, 768000000, 0 },
1540 +diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
1541 +index cbd5a2e5c569..e3392ca2c2fc 100644
1542 +--- a/drivers/clk/tegra/clk-tegra20.c
1543 ++++ b/drivers/clk/tegra/clk-tegra20.c
1544 +@@ -576,6 +576,7 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
1545 + [tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
1546 + [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
1547 + [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
1548 ++ [tegra_clk_emc] = { .dt_id = TEGRA20_CLK_EMC, .present = true },
1549 + };
1550 +
1551 + static unsigned long tegra20_clk_measure_input_freq(void)
1552 +@@ -651,8 +652,7 @@ static void tegra20_pll_init(void)
1553 +
1554 + /* PLLM */
1555 + clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, NULL,
1556 +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
1557 +- &pll_m_params, NULL);
1558 ++ CLK_SET_RATE_GATE, &pll_m_params, NULL);
1559 + clks[TEGRA20_CLK_PLL_M] = clk;
1560 +
1561 + /* PLLM_OUT1 */
1562 +@@ -660,7 +660,7 @@ static void tegra20_pll_init(void)
1563 + clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
1564 + 8, 8, 1, NULL);
1565 + clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
1566 +- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
1567 ++ clk_base + PLLM_OUT, 1, 0,
1568 + CLK_SET_RATE_PARENT, 0, NULL);
1569 + clks[TEGRA20_CLK_PLL_M_OUT1] = clk;
1570 +
1571 +@@ -723,7 +723,8 @@ static void tegra20_super_clk_init(void)
1572 +
1573 + /* SCLK */
1574 + clk = tegra_clk_register_super_mux("sclk", sclk_parents,
1575 +- ARRAY_SIZE(sclk_parents), CLK_SET_RATE_PARENT,
1576 ++ ARRAY_SIZE(sclk_parents),
1577 ++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
1578 + clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
1579 + clks[TEGRA20_CLK_SCLK] = clk;
1580 +
1581 +@@ -814,9 +815,6 @@ static void __init tegra20_periph_clk_init(void)
1582 + CLK_SET_RATE_NO_REPARENT,
1583 + clk_base + CLK_SOURCE_EMC,
1584 + 30, 2, 0, &emc_lock);
1585 +- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
1586 +- 57, periph_clk_enb_refcnt);
1587 +- clks[TEGRA20_CLK_EMC] = clk;
1588 +
1589 + clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
1590 + &emc_lock);
1591 +@@ -1019,13 +1017,12 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1592 + { TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1 },
1593 + { TEGRA20_CLK_PLL_P_OUT3, TEGRA20_CLK_CLK_MAX, 72000000, 1 },
1594 + { TEGRA20_CLK_PLL_P_OUT4, TEGRA20_CLK_CLK_MAX, 24000000, 1 },
1595 +- { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 1 },
1596 +- { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
1597 +- { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 1 },
1598 +- { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
1599 +- { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 1 },
1600 ++ { TEGRA20_CLK_PLL_C, TEGRA20_CLK_CLK_MAX, 600000000, 0 },
1601 ++ { TEGRA20_CLK_PLL_C_OUT1, TEGRA20_CLK_CLK_MAX, 216000000, 0 },
1602 ++ { TEGRA20_CLK_SCLK, TEGRA20_CLK_PLL_C_OUT1, 0, 0 },
1603 ++ { TEGRA20_CLK_HCLK, TEGRA20_CLK_CLK_MAX, 0, 0 },
1604 ++ { TEGRA20_CLK_PCLK, TEGRA20_CLK_CLK_MAX, 60000000, 0 },
1605 + { TEGRA20_CLK_CSITE, TEGRA20_CLK_CLK_MAX, 0, 1 },
1606 +- { TEGRA20_CLK_EMC, TEGRA20_CLK_CLK_MAX, 0, 1 },
1607 + { TEGRA20_CLK_CCLK, TEGRA20_CLK_CLK_MAX, 0, 1 },
1608 + { TEGRA20_CLK_UARTA, TEGRA20_CLK_PLL_P, 0, 0 },
1609 + { TEGRA20_CLK_UARTB, TEGRA20_CLK_PLL_P, 0, 0 },
1610 +diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
1611 +index 9e6260869eb9..25cc6e0905be 100644
1612 +--- a/drivers/clk/tegra/clk-tegra210.c
1613 ++++ b/drivers/clk/tegra/clk-tegra210.c
1614 +@@ -3025,7 +3025,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1615 + { TEGRA210_CLK_I2S4, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 },
1616 + { TEGRA210_CLK_HOST1X, TEGRA210_CLK_PLL_P, 136000000, 1 },
1617 + { TEGRA210_CLK_SCLK_MUX, TEGRA210_CLK_PLL_P, 0, 1 },
1618 +- { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 1 },
1619 ++ { TEGRA210_CLK_SCLK, TEGRA210_CLK_CLK_MAX, 102000000, 0 },
1620 + { TEGRA210_CLK_DFLL_SOC, TEGRA210_CLK_PLL_P, 51000000, 1 },
1621 + { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
1622 + { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
1623 +@@ -3040,7 +3040,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1624 + { TEGRA210_CLK_XUSB_DEV_SRC, TEGRA210_CLK_PLL_P_OUT_XUSB, 102000000, 0 },
1625 + { TEGRA210_CLK_SATA, TEGRA210_CLK_PLL_P, 104000000, 0 },
1626 + { TEGRA210_CLK_SATA_OOB, TEGRA210_CLK_PLL_P, 204000000, 0 },
1627 +- { TEGRA210_CLK_EMC, TEGRA210_CLK_CLK_MAX, 0, 1 },
1628 + { TEGRA210_CLK_MSELECT, TEGRA210_CLK_CLK_MAX, 0, 1 },
1629 + { TEGRA210_CLK_CSITE, TEGRA210_CLK_CLK_MAX, 0, 1 },
1630 + /* TODO find a way to enable this on-demand */
1631 +diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
1632 +index bee84c554932..8428895ad475 100644
1633 +--- a/drivers/clk/tegra/clk-tegra30.c
1634 ++++ b/drivers/clk/tegra/clk-tegra30.c
1635 +@@ -819,6 +819,7 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = {
1636 + [tegra_clk_pll_a] = { .dt_id = TEGRA30_CLK_PLL_A, .present = true },
1637 + [tegra_clk_pll_a_out0] = { .dt_id = TEGRA30_CLK_PLL_A_OUT0, .present = true },
1638 + [tegra_clk_cec] = { .dt_id = TEGRA30_CLK_CEC, .present = true },
1639 ++ [tegra_clk_emc] = { .dt_id = TEGRA30_CLK_EMC, .present = true },
1640 + };
1641 +
1642 + static const char *pll_e_parents[] = { "pll_ref", "pll_p" };
1643 +@@ -843,8 +844,7 @@ static void __init tegra30_pll_init(void)
1644 +
1645 + /* PLLM */
1646 + clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
1647 +- CLK_IGNORE_UNUSED | CLK_SET_RATE_GATE,
1648 +- &pll_m_params, NULL);
1649 ++ CLK_SET_RATE_GATE, &pll_m_params, NULL);
1650 + clks[TEGRA30_CLK_PLL_M] = clk;
1651 +
1652 + /* PLLM_OUT1 */
1653 +@@ -852,7 +852,7 @@ static void __init tegra30_pll_init(void)
1654 + clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
1655 + 8, 8, 1, NULL);
1656 + clk = tegra_clk_register_pll_out("pll_m_out1", "pll_m_out1_div",
1657 +- clk_base + PLLM_OUT, 1, 0, CLK_IGNORE_UNUSED |
1658 ++ clk_base + PLLM_OUT, 1, 0,
1659 + CLK_SET_RATE_PARENT, 0, NULL);
1660 + clks[TEGRA30_CLK_PLL_M_OUT1] = clk;
1661 +
1662 +@@ -990,7 +990,7 @@ static void __init tegra30_super_clk_init(void)
1663 + /* SCLK */
1664 + clk = tegra_clk_register_super_mux("sclk", sclk_parents,
1665 + ARRAY_SIZE(sclk_parents),
1666 +- CLK_SET_RATE_PARENT,
1667 ++ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
1668 + clk_base + SCLK_BURST_POLICY,
1669 + 0, 4, 0, 0, NULL);
1670 + clks[TEGRA30_CLK_SCLK] = clk;
1671 +@@ -1060,9 +1060,6 @@ static void __init tegra30_periph_clk_init(void)
1672 + CLK_SET_RATE_NO_REPARENT,
1673 + clk_base + CLK_SOURCE_EMC,
1674 + 30, 2, 0, &emc_lock);
1675 +- clk = tegra_clk_register_periph_gate("emc", "emc_mux", 0, clk_base, 0,
1676 +- 57, periph_clk_enb_refcnt);
1677 +- clks[TEGRA30_CLK_EMC] = clk;
1678 +
1679 + clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC,
1680 + &emc_lock);
1681 +@@ -1252,10 +1249,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
1682 + { TEGRA30_CLK_SDMMC1, TEGRA30_CLK_PLL_P, 48000000, 0 },
1683 + { TEGRA30_CLK_SDMMC2, TEGRA30_CLK_PLL_P, 48000000, 0 },
1684 + { TEGRA30_CLK_SDMMC3, TEGRA30_CLK_PLL_P, 48000000, 0 },
1685 +- { TEGRA30_CLK_PLL_M, TEGRA30_CLK_CLK_MAX, 0, 1 },
1686 +- { TEGRA30_CLK_PCLK, TEGRA30_CLK_CLK_MAX, 0, 1 },
1687 + { TEGRA30_CLK_CSITE, TEGRA30_CLK_CLK_MAX, 0, 1 },
1688 +- { TEGRA30_CLK_EMC, TEGRA30_CLK_CLK_MAX, 0, 1 },
1689 + { TEGRA30_CLK_MSELECT, TEGRA30_CLK_CLK_MAX, 0, 1 },
1690 + { TEGRA30_CLK_SBC1, TEGRA30_CLK_PLL_P, 100000000, 0 },
1691 + { TEGRA30_CLK_SBC2, TEGRA30_CLK_PLL_P, 100000000, 0 },
1692 +diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
1693 +index c6ebc88a7d8d..72a2975499db 100644
1694 +--- a/drivers/cpufreq/armada-37xx-cpufreq.c
1695 ++++ b/drivers/cpufreq/armada-37xx-cpufreq.c
1696 +@@ -202,6 +202,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
1697 + cur_frequency = clk_get_rate(clk);
1698 + if (!cur_frequency) {
1699 + dev_err(cpu_dev, "Failed to get clock rate for CPU\n");
1700 ++ clk_put(clk);
1701 + return -EINVAL;
1702 + }
1703 +
1704 +@@ -210,6 +211,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
1705 + return -EINVAL;
1706 +
1707 + armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider);
1708 ++ clk_put(clk);
1709 +
1710 + for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR;
1711 + load_lvl++) {
1712 +diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
1713 +index a1c3025f9df7..dcb1cb9a4572 100644
1714 +--- a/drivers/cpufreq/cppc_cpufreq.c
1715 ++++ b/drivers/cpufreq/cppc_cpufreq.c
1716 +@@ -20,6 +20,7 @@
1717 + #include <linux/cpu.h>
1718 + #include <linux/cpufreq.h>
1719 + #include <linux/dmi.h>
1720 ++#include <linux/time.h>
1721 + #include <linux/vmalloc.h>
1722 +
1723 + #include <asm/unaligned.h>
1724 +@@ -162,6 +163,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
1725 + policy->cpuinfo.max_freq = cppc_dmi_max_khz;
1726 +
1727 + policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
1728 ++ policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
1729 ++ NSEC_PER_USEC;
1730 + policy->shared_type = cpu->shared_type;
1731 +
1732 + if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
1733 +diff --git a/drivers/dax/device.c b/drivers/dax/device.c
1734 +index 2137dbc29877..383779707404 100644
1735 +--- a/drivers/dax/device.c
1736 ++++ b/drivers/dax/device.c
1737 +@@ -19,6 +19,7 @@
1738 + #include <linux/dax.h>
1739 + #include <linux/fs.h>
1740 + #include <linux/mm.h>
1741 ++#include <linux/mman.h>
1742 + #include "dax-private.h"
1743 + #include "dax.h"
1744 +
1745 +@@ -534,6 +535,7 @@ static const struct file_operations dax_fops = {
1746 + .release = dax_release,
1747 + .get_unmapped_area = dax_get_unmapped_area,
1748 + .mmap = dax_mmap,
1749 ++ .mmap_supported_flags = MAP_SYNC,
1750 + };
1751 +
1752 + static void dev_dax_release(struct device *dev)
1753 +diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1754 +index c00e3923d7d8..94236ec9d410 100644
1755 +--- a/drivers/dma/at_xdmac.c
1756 ++++ b/drivers/dma/at_xdmac.c
1757 +@@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1758 + for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1759 + check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1760 + rmb();
1761 +- initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1762 +- rmb();
1763 + cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1764 + rmb();
1765 ++ initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1766 ++ rmb();
1767 + cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1768 + rmb();
1769 +
1770 +diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
1771 +index 7c4bc8c44c3f..b7e9ea377d70 100644
1772 +--- a/drivers/extcon/extcon-intel-cht-wc.c
1773 ++++ b/drivers/extcon/extcon-intel-cht-wc.c
1774 +@@ -66,6 +66,8 @@
1775 +
1776 + #define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
1777 + #define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
1778 ++#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4)
1779 ++#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5)
1780 +
1781 + enum cht_wc_usb_id {
1782 + USB_ID_OTG,
1783 +@@ -183,14 +185,15 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
1784 + {
1785 + int ret, val;
1786 +
1787 +- val = enable ? CHT_WC_VBUS_GPIO_CTLO_OUTPUT : 0;
1788 +-
1789 + /*
1790 + * The 5V boost converter is enabled through a gpio on the PMIC, since
1791 + * there currently is no gpio driver we access the gpio reg directly.
1792 + */
1793 +- ret = regmap_update_bits(ext->regmap, CHT_WC_VBUS_GPIO_CTLO,
1794 +- CHT_WC_VBUS_GPIO_CTLO_OUTPUT, val);
1795 ++ val = CHT_WC_VBUS_GPIO_CTLO_DRV_OD | CHT_WC_VBUS_GPIO_CTLO_DIR_OUT;
1796 ++ if (enable)
1797 ++ val |= CHT_WC_VBUS_GPIO_CTLO_OUTPUT;
1798 ++
1799 ++ ret = regmap_write(ext->regmap, CHT_WC_VBUS_GPIO_CTLO, val);
1800 + if (ret)
1801 + dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
1802 + }
1803 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1804 +index c53095b3b0fb..1ae5ae8c45a4 100644
1805 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1806 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1807 +@@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
1808 + { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
1809 + { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
1810 + { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
1811 ++ { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
1812 + { 0, 0, 0, 0, 0 },
1813 + };
1814 +
1815 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
1816 +index 59089e027f4d..92be7f6de197 100644
1817 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
1818 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
1819 +@@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
1820 + for (i = 0; i < list->num_entries; i++) {
1821 + unsigned priority = list->array[i].priority;
1822 +
1823 +- list_add_tail(&list->array[i].tv.head,
1824 +- &bucket[priority]);
1825 ++ if (!list->array[i].robj->parent)
1826 ++ list_add_tail(&list->array[i].tv.head,
1827 ++ &bucket[priority]);
1828 ++
1829 + list->array[i].user_pages = NULL;
1830 + }
1831 +
1832 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1833 +index e80fc38141b5..b03b2983de1e 100644
1834 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1835 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1836 +@@ -542,7 +542,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
1837 + INIT_LIST_HEAD(&duplicates);
1838 + amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
1839 +
1840 +- if (p->uf_entry.robj)
1841 ++ if (p->uf_entry.robj && !p->uf_entry.robj->parent)
1842 + list_add(&p->uf_entry.tv.head, &p->validated);
1843 +
1844 + while (1) {
1845 +diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1846 +index 6e8278e689b1..0066da3e79bb 100644
1847 +--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1848 ++++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1849 +@@ -866,7 +866,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1850 + amdgpu_ring_write(ring, addr & 0xfffffffc);
1851 + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1852 + amdgpu_ring_write(ring, seq); /* reference */
1853 +- amdgpu_ring_write(ring, 0xfffffff); /* mask */
1854 ++ amdgpu_ring_write(ring, 0xffffffff); /* mask */
1855 + amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
1856 + }
1857 +
1858 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1859 +index d4787ad4d346..bd844edad6b7 100644
1860 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1861 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1862 +@@ -844,7 +844,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1863 + amdgpu_ring_write(ring, addr & 0xfffffffc);
1864 + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1865 + amdgpu_ring_write(ring, seq); /* reference */
1866 +- amdgpu_ring_write(ring, 0xfffffff); /* mask */
1867 ++ amdgpu_ring_write(ring, 0xffffffff); /* mask */
1868 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1869 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1870 + }
1871 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1872 +index 521978c40537..fa63c564cf91 100644
1873 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1874 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1875 +@@ -1110,7 +1110,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1876 + amdgpu_ring_write(ring, addr & 0xfffffffc);
1877 + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1878 + amdgpu_ring_write(ring, seq); /* reference */
1879 +- amdgpu_ring_write(ring, 0xfffffff); /* mask */
1880 ++ amdgpu_ring_write(ring, 0xffffffff); /* mask */
1881 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1882 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1883 + }
1884 +diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1885 +index 91cf95a8c39c..036798b52f67 100644
1886 +--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1887 ++++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1888 +@@ -1113,7 +1113,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1889 + amdgpu_ring_write(ring, addr & 0xfffffffc);
1890 + amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1891 + amdgpu_ring_write(ring, seq); /* reference */
1892 +- amdgpu_ring_write(ring, 0xfffffff); /* mask */
1893 ++ amdgpu_ring_write(ring, 0xffffffff); /* mask */
1894 + amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1895 + SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1896 + }
1897 +diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
1898 +index 2095173aaabf..3598151652d7 100644
1899 +--- a/drivers/gpu/drm/amd/amdgpu/si.c
1900 ++++ b/drivers/gpu/drm/amd/amdgpu/si.c
1901 +@@ -1231,6 +1231,71 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
1902 + adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
1903 + }
1904 +
1905 ++static int si_get_pcie_lanes(struct amdgpu_device *adev)
1906 ++{
1907 ++ u32 link_width_cntl;
1908 ++
1909 ++ if (adev->flags & AMD_IS_APU)
1910 ++ return 0;
1911 ++
1912 ++ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1913 ++
1914 ++ switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
1915 ++ case LC_LINK_WIDTH_X1:
1916 ++ return 1;
1917 ++ case LC_LINK_WIDTH_X2:
1918 ++ return 2;
1919 ++ case LC_LINK_WIDTH_X4:
1920 ++ return 4;
1921 ++ case LC_LINK_WIDTH_X8:
1922 ++ return 8;
1923 ++ case LC_LINK_WIDTH_X0:
1924 ++ case LC_LINK_WIDTH_X16:
1925 ++ default:
1926 ++ return 16;
1927 ++ }
1928 ++}
1929 ++
1930 ++static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1931 ++{
1932 ++ u32 link_width_cntl, mask;
1933 ++
1934 ++ if (adev->flags & AMD_IS_APU)
1935 ++ return;
1936 ++
1937 ++ switch (lanes) {
1938 ++ case 0:
1939 ++ mask = LC_LINK_WIDTH_X0;
1940 ++ break;
1941 ++ case 1:
1942 ++ mask = LC_LINK_WIDTH_X1;
1943 ++ break;
1944 ++ case 2:
1945 ++ mask = LC_LINK_WIDTH_X2;
1946 ++ break;
1947 ++ case 4:
1948 ++ mask = LC_LINK_WIDTH_X4;
1949 ++ break;
1950 ++ case 8:
1951 ++ mask = LC_LINK_WIDTH_X8;
1952 ++ break;
1953 ++ case 16:
1954 ++ mask = LC_LINK_WIDTH_X16;
1955 ++ break;
1956 ++ default:
1957 ++ DRM_ERROR("invalid pcie lane request: %d\n", lanes);
1958 ++ return;
1959 ++ }
1960 ++
1961 ++ link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1962 ++ link_width_cntl &= ~LC_LINK_WIDTH_MASK;
1963 ++ link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
1964 ++ link_width_cntl |= (LC_RECONFIG_NOW |
1965 ++ LC_RECONFIG_ARC_MISSING_ESCAPE);
1966 ++
1967 ++ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1968 ++}
1969 ++
1970 + static const struct amdgpu_asic_funcs si_asic_funcs =
1971 + {
1972 + .read_disabled_bios = &si_read_disabled_bios,
1973 +@@ -1241,6 +1306,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1974 + .get_xclk = &si_get_xclk,
1975 + .set_uvd_clocks = &si_set_uvd_clocks,
1976 + .set_vce_clocks = NULL,
1977 ++ .get_pcie_lanes = &si_get_pcie_lanes,
1978 ++ .set_pcie_lanes = &si_set_pcie_lanes,
1979 + .get_config_memsize = &si_get_config_memsize,
1980 + };
1981 +
1982 +diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1983 +index 22f0b7ff3ac9..b1a3ca585ed1 100644
1984 +--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1985 ++++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1986 +@@ -6370,9 +6370,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
1987 + {
1988 + u32 lane_width;
1989 + u32 new_lane_width =
1990 +- (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
1991 ++ ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1992 + u32 current_lane_width =
1993 +- (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
1994 ++ ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1995 +
1996 + if (new_lane_width != current_lane_width) {
1997 + amdgpu_set_pcie_lanes(adev, new_lane_width);
1998 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1999 +index 63c67346d316..8a6e6fbc78cd 100644
2000 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2001 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2002 +@@ -4776,33 +4776,6 @@ static int dm_update_planes_state(struct dc *dc,
2003 + return ret;
2004 + }
2005 +
2006 +-static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
2007 +- struct drm_crtc *crtc)
2008 +-{
2009 +- struct drm_plane *plane;
2010 +- struct drm_crtc_state *crtc_state;
2011 +-
2012 +- WARN_ON(!drm_atomic_get_new_crtc_state(state, crtc));
2013 +-
2014 +- drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
2015 +- struct drm_plane_state *plane_state =
2016 +- drm_atomic_get_plane_state(state, plane);
2017 +-
2018 +- if (IS_ERR(plane_state))
2019 +- return -EDEADLK;
2020 +-
2021 +- crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
2022 +- if (IS_ERR(crtc_state))
2023 +- return PTR_ERR(crtc_state);
2024 +-
2025 +- if (crtc->primary == plane && crtc_state->active) {
2026 +- if (!plane_state->fb)
2027 +- return -EINVAL;
2028 +- }
2029 +- }
2030 +- return 0;
2031 +-}
2032 +-
2033 + static int amdgpu_dm_atomic_check(struct drm_device *dev,
2034 + struct drm_atomic_state *state)
2035 + {
2036 +@@ -4826,10 +4799,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
2037 + goto fail;
2038 +
2039 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2040 +- ret = dm_atomic_check_plane_state_fb(state, crtc);
2041 +- if (ret)
2042 +- goto fail;
2043 +-
2044 + if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
2045 + !new_crtc_state->color_mgmt_changed)
2046 + continue;
2047 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
2048 +index 83bae207371d..b3c30abcb8f1 100644
2049 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
2050 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
2051 +@@ -736,6 +736,8 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
2052 + if (info_frame->avi.valid) {
2053 + const uint32_t *content =
2054 + (const uint32_t *) &info_frame->avi.sb[0];
2055 ++ /*we need turn on clock before programming AFMT block*/
2056 ++ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
2057 +
2058 + REG_WRITE(AFMT_AVI_INFO0, content[0]);
2059 +
2060 +diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
2061 +index c6197d990818..d35b93f5ecfc 100644
2062 +--- a/drivers/gpu/drm/i915/gvt/gvt.h
2063 ++++ b/drivers/gpu/drm/i915/gvt/gvt.h
2064 +@@ -308,7 +308,10 @@ struct intel_gvt {
2065 + wait_queue_head_t service_thread_wq;
2066 + unsigned long service_request;
2067 +
2068 +- struct engine_mmio *engine_mmio_list;
2069 ++ struct {
2070 ++ struct engine_mmio *mmio;
2071 ++ int ctx_mmio_count[I915_NUM_ENGINES];
2072 ++ } engine_mmio_list;
2073 +
2074 + struct dentry *debugfs_root;
2075 + };
2076 +diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
2077 +index 152df3d0291e..c44dba338c57 100644
2078 +--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
2079 ++++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
2080 +@@ -50,6 +50,8 @@
2081 + #define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
2082 + #define VF_GUARDBAND _MMIO(0x83a4)
2083 +
2084 ++#define GEN9_MOCS_SIZE 64
2085 ++
2086 + /* Raw offset is appened to each line for convenience. */
2087 + static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
2088 + {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
2089 +@@ -152,8 +154,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
2090 +
2091 + static struct {
2092 + bool initialized;
2093 +- u32 control_table[I915_NUM_ENGINES][64];
2094 +- u32 l3cc_table[32];
2095 ++ u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
2096 ++ u32 l3cc_table[GEN9_MOCS_SIZE / 2];
2097 + } gen9_render_mocs;
2098 +
2099 + static void load_render_mocs(struct drm_i915_private *dev_priv)
2100 +@@ -170,7 +172,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
2101 +
2102 + for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
2103 + offset.reg = regs[ring_id];
2104 +- for (i = 0; i < 64; i++) {
2105 ++ for (i = 0; i < GEN9_MOCS_SIZE; i++) {
2106 + gen9_render_mocs.control_table[ring_id][i] =
2107 + I915_READ_FW(offset);
2108 + offset.reg += 4;
2109 +@@ -178,7 +180,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
2110 + }
2111 +
2112 + offset.reg = 0xb020;
2113 +- for (i = 0; i < 32; i++) {
2114 ++ for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
2115 + gen9_render_mocs.l3cc_table[i] =
2116 + I915_READ_FW(offset);
2117 + offset.reg += 4;
2118 +@@ -186,6 +188,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
2119 + gen9_render_mocs.initialized = true;
2120 + }
2121 +
2122 ++static int
2123 ++restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
2124 ++ struct drm_i915_gem_request *req)
2125 ++{
2126 ++ u32 *cs;
2127 ++ int ret;
2128 ++ struct engine_mmio *mmio;
2129 ++ struct intel_gvt *gvt = vgpu->gvt;
2130 ++ int ring_id = req->engine->id;
2131 ++ int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
2132 ++
2133 ++ if (count == 0)
2134 ++ return 0;
2135 ++
2136 ++ ret = req->engine->emit_flush(req, EMIT_BARRIER);
2137 ++ if (ret)
2138 ++ return ret;
2139 ++
2140 ++ cs = intel_ring_begin(req, count * 2 + 2);
2141 ++ if (IS_ERR(cs))
2142 ++ return PTR_ERR(cs);
2143 ++
2144 ++ *cs++ = MI_LOAD_REGISTER_IMM(count);
2145 ++ for (mmio = gvt->engine_mmio_list.mmio;
2146 ++ i915_mmio_reg_valid(mmio->reg); mmio++) {
2147 ++ if (mmio->ring_id != ring_id ||
2148 ++ !mmio->in_context)
2149 ++ continue;
2150 ++
2151 ++ *cs++ = i915_mmio_reg_offset(mmio->reg);
2152 ++ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
2153 ++ (mmio->mask << 16);
2154 ++ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
2155 ++ *(cs-2), *(cs-1), vgpu->id, ring_id);
2156 ++ }
2157 ++
2158 ++ *cs++ = MI_NOOP;
2159 ++ intel_ring_advance(req, cs);
2160 ++
2161 ++ ret = req->engine->emit_flush(req, EMIT_BARRIER);
2162 ++ if (ret)
2163 ++ return ret;
2164 ++
2165 ++ return 0;
2166 ++}
2167 ++
2168 ++static int
2169 ++restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
2170 ++ struct drm_i915_gem_request *req)
2171 ++{
2172 ++ unsigned int index;
2173 ++ u32 *cs;
2174 ++
2175 ++ cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
2176 ++ if (IS_ERR(cs))
2177 ++ return PTR_ERR(cs);
2178 ++
2179 ++ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
2180 ++
2181 ++ for (index = 0; index < GEN9_MOCS_SIZE; index++) {
2182 ++ *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
2183 ++ *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
2184 ++ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
2185 ++ *(cs-2), *(cs-1), vgpu->id, req->engine->id);
2186 ++
2187 ++ }
2188 ++
2189 ++ *cs++ = MI_NOOP;
2190 ++ intel_ring_advance(req, cs);
2191 ++
2192 ++ return 0;
2193 ++}
2194 ++
2195 ++static int
2196 ++restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
2197 ++ struct drm_i915_gem_request *req)
2198 ++{
2199 ++ unsigned int index;
2200 ++ u32 *cs;
2201 ++
2202 ++ cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
2203 ++ if (IS_ERR(cs))
2204 ++ return PTR_ERR(cs);
2205 ++
2206 ++ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
2207 ++
2208 ++ for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
2209 ++ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
2210 ++ *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
2211 ++ gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
2212 ++ *(cs-2), *(cs-1), vgpu->id, req->engine->id);
2213 ++
2214 ++ }
2215 ++
2216 ++ *cs++ = MI_NOOP;
2217 ++ intel_ring_advance(req, cs);
2218 ++
2219 ++ return 0;
2220 ++}
2221 ++
2222 ++/*
2223 ++ * Use lri command to initialize the mmio which is in context state image for
2224 ++ * inhibit context, it contains tracked engine mmio, render_mocs and
2225 ++ * render_mocs_l3cc.
2226 ++ */
2227 ++int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
2228 ++ struct drm_i915_gem_request *req)
2229 ++{
2230 ++ int ret;
2231 ++ u32 *cs;
2232 ++
2233 ++ cs = intel_ring_begin(req, 2);
2234 ++ if (IS_ERR(cs))
2235 ++ return PTR_ERR(cs);
2236 ++
2237 ++ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
2238 ++ *cs++ = MI_NOOP;
2239 ++ intel_ring_advance(req, cs);
2240 ++
2241 ++ ret = restore_context_mmio_for_inhibit(vgpu, req);
2242 ++ if (ret)
2243 ++ goto out;
2244 ++
2245 ++ /* no MOCS register in context except render engine */
2246 ++ if (req->engine->id != RCS)
2247 ++ goto out;
2248 ++
2249 ++ ret = restore_render_mocs_control_for_inhibit(vgpu, req);
2250 ++ if (ret)
2251 ++ goto out;
2252 ++
2253 ++ ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
2254 ++ if (ret)
2255 ++ goto out;
2256 ++
2257 ++out:
2258 ++ cs = intel_ring_begin(req, 2);
2259 ++ if (IS_ERR(cs))
2260 ++ return PTR_ERR(cs);
2261 ++
2262 ++ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
2263 ++ *cs++ = MI_NOOP;
2264 ++ intel_ring_advance(req, cs);
2265 ++
2266 ++ return ret;
2267 ++}
2268 ++
2269 + static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
2270 + {
2271 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
2272 +@@ -252,11 +401,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
2273 + if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
2274 + return;
2275 +
2276 ++ if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
2277 ++ return;
2278 ++
2279 + if (!pre && !gen9_render_mocs.initialized)
2280 + load_render_mocs(dev_priv);
2281 +
2282 + offset.reg = regs[ring_id];
2283 +- for (i = 0; i < 64; i++) {
2284 ++ for (i = 0; i < GEN9_MOCS_SIZE; i++) {
2285 + if (pre)
2286 + old_v = vgpu_vreg_t(pre, offset);
2287 + else
2288 +@@ -274,7 +426,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
2289 +
2290 + if (ring_id == RCS) {
2291 + l3_offset.reg = 0xb020;
2292 +- for (i = 0; i < 32; i++) {
2293 ++ for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
2294 + if (pre)
2295 + old_v = vgpu_vreg_t(pre, l3_offset);
2296 + else
2297 +@@ -294,6 +446,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
2298 +
2299 + #define CTX_CONTEXT_CONTROL_VAL 0x03
2300 +
2301 ++bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
2302 ++{
2303 ++ u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
2304 ++ u32 inhibit_mask =
2305 ++ _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2306 ++
2307 ++ return inhibit_mask ==
2308 ++ (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
2309 ++}
2310 ++
2311 + /* Switch ring mmio values (context). */
2312 + static void switch_mmio(struct intel_vgpu *pre,
2313 + struct intel_vgpu *next,
2314 +@@ -301,9 +463,6 @@ static void switch_mmio(struct intel_vgpu *pre,
2315 + {
2316 + struct drm_i915_private *dev_priv;
2317 + struct intel_vgpu_submission *s;
2318 +- u32 *reg_state, ctx_ctrl;
2319 +- u32 inhibit_mask =
2320 +- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2321 + struct engine_mmio *mmio;
2322 + u32 old_v, new_v;
2323 +
2324 +@@ -311,10 +470,18 @@ static void switch_mmio(struct intel_vgpu *pre,
2325 + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2326 + switch_mocs(pre, next, ring_id);
2327 +
2328 +- for (mmio = dev_priv->gvt->engine_mmio_list;
2329 ++ for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
2330 + i915_mmio_reg_valid(mmio->reg); mmio++) {
2331 + if (mmio->ring_id != ring_id)
2332 + continue;
2333 ++ /*
2334 ++ * No need to do save or restore of the mmio which is in context
2335 ++ * state image on kabylake, it's initialized by lri command and
2336 ++ * save or restore with context together.
2337 ++ */
2338 ++ if (IS_KABYLAKE(dev_priv) && mmio->in_context)
2339 ++ continue;
2340 ++
2341 + // save
2342 + if (pre) {
2343 + vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
2344 +@@ -328,16 +495,13 @@ static void switch_mmio(struct intel_vgpu *pre,
2345 + // restore
2346 + if (next) {
2347 + s = &next->submission;
2348 +- reg_state =
2349 +- s->shadow_ctx->engine[ring_id].lrc_reg_state;
2350 +- ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
2351 + /*
2352 +- * if it is an inhibit context, load in_context mmio
2353 +- * into HW by mmio write. If it is not, skip this mmio
2354 +- * write.
2355 ++ * No need to restore the mmio which is in context state
2356 ++ * image if it's not inhibit context, it will restore
2357 ++ * itself.
2358 + */
2359 + if (mmio->in_context &&
2360 +- (ctx_ctrl & inhibit_mask) != inhibit_mask)
2361 ++ !is_inhibit_context(s->shadow_ctx, ring_id))
2362 + continue;
2363 +
2364 + if (mmio->mask)
2365 +@@ -408,8 +572,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
2366 + */
2367 + void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
2368 + {
2369 ++ struct engine_mmio *mmio;
2370 ++
2371 + if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
2372 +- gvt->engine_mmio_list = gen9_engine_mmio_list;
2373 ++ gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
2374 + else
2375 +- gvt->engine_mmio_list = gen8_engine_mmio_list;
2376 ++ gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
2377 ++
2378 ++ for (mmio = gvt->engine_mmio_list.mmio;
2379 ++ i915_mmio_reg_valid(mmio->reg); mmio++) {
2380 ++ if (mmio->in_context)
2381 ++ gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
2382 ++ }
2383 + }
2384 +diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
2385 +index ca2c6a745673..0b1d98536653 100644
2386 +--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
2387 ++++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
2388 +@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
2389 +
2390 + void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
2391 +
2392 ++bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
2393 ++
2394 ++int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
2395 ++ struct drm_i915_gem_request *req);
2396 ++
2397 + #endif
2398 +diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
2399 +index d74d6f05c62c..88b7b47695bd 100644
2400 +--- a/drivers/gpu/drm/i915/gvt/scheduler.c
2401 ++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
2402 +@@ -275,6 +275,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
2403 + struct intel_vgpu *vgpu = workload->vgpu;
2404 + void *shadow_ring_buffer_va;
2405 + u32 *cs;
2406 ++ struct drm_i915_gem_request *req = workload->req;
2407 ++
2408 ++ if (IS_KABYLAKE(req->i915) &&
2409 ++ is_inhibit_context(req->ctx, req->engine->id))
2410 ++ intel_vgpu_restore_inhibit_context(vgpu, req);
2411 +
2412 + /* allocate shadow ring buffer */
2413 + cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
2414 +diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
2415 +index 2f5209de0391..f1cd4f0ffc62 100644
2416 +--- a/drivers/gpu/drm/i915/i915_drv.c
2417 ++++ b/drivers/gpu/drm/i915/i915_drv.c
2418 +@@ -1599,15 +1599,12 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2419 + {
2420 + struct drm_i915_private *dev_priv = to_i915(dev);
2421 + struct pci_dev *pdev = dev_priv->drm.pdev;
2422 +- bool fw_csr;
2423 + int ret;
2424 +
2425 + disable_rpm_wakeref_asserts(dev_priv);
2426 +
2427 + intel_display_set_init_power(dev_priv, false);
2428 +
2429 +- fw_csr = !IS_GEN9_LP(dev_priv) && !hibernation &&
2430 +- suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
2431 + /*
2432 + * In case of firmware assisted context save/restore don't manually
2433 + * deinit the power domains. This also means the CSR/DMC firmware will
2434 +@@ -1615,8 +1612,11 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2435 + * also enable deeper system power states that would be blocked if the
2436 + * firmware was inactive.
2437 + */
2438 +- if (!fw_csr)
2439 ++ if (IS_GEN9_LP(dev_priv) || hibernation || !suspend_to_idle(dev_priv) ||
2440 ++ dev_priv->csr.dmc_payload == NULL) {
2441 + intel_power_domains_suspend(dev_priv);
2442 ++ dev_priv->power_domains_suspended = true;
2443 ++ }
2444 +
2445 + ret = 0;
2446 + if (IS_GEN9_LP(dev_priv))
2447 +@@ -1628,8 +1628,10 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2448 +
2449 + if (ret) {
2450 + DRM_ERROR("Suspend complete failed: %d\n", ret);
2451 +- if (!fw_csr)
2452 ++ if (dev_priv->power_domains_suspended) {
2453 + intel_power_domains_init_hw(dev_priv, true);
2454 ++ dev_priv->power_domains_suspended = false;
2455 ++ }
2456 +
2457 + goto out;
2458 + }
2459 +@@ -1650,8 +1652,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
2460 + if (!(hibernation && INTEL_GEN(dev_priv) < 6))
2461 + pci_set_power_state(pdev, PCI_D3hot);
2462 +
2463 +- dev_priv->suspended_to_idle = suspend_to_idle(dev_priv);
2464 +-
2465 + out:
2466 + enable_rpm_wakeref_asserts(dev_priv);
2467 +
2468 +@@ -1818,8 +1818,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
2469 + intel_uncore_resume_early(dev_priv);
2470 +
2471 + if (IS_GEN9_LP(dev_priv)) {
2472 +- if (!dev_priv->suspended_to_idle)
2473 +- gen9_sanitize_dc_state(dev_priv);
2474 ++ gen9_sanitize_dc_state(dev_priv);
2475 + bxt_disable_dc9(dev_priv);
2476 + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
2477 + hsw_disable_pc8(dev_priv);
2478 +@@ -1827,8 +1826,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
2479 +
2480 + intel_uncore_sanitize(dev_priv);
2481 +
2482 +- if (IS_GEN9_LP(dev_priv) ||
2483 +- !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
2484 ++ if (dev_priv->power_domains_suspended)
2485 + intel_power_domains_init_hw(dev_priv, true);
2486 + else
2487 + intel_display_set_init_power(dev_priv, true);
2488 +@@ -1838,7 +1836,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
2489 + enable_rpm_wakeref_asserts(dev_priv);
2490 +
2491 + out:
2492 +- dev_priv->suspended_to_idle = false;
2493 ++ dev_priv->power_domains_suspended = false;
2494 +
2495 + return ret;
2496 + }
2497 +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
2498 +index d307429a5ae0..55c6d9077a8a 100644
2499 +--- a/drivers/gpu/drm/i915/i915_drv.h
2500 ++++ b/drivers/gpu/drm/i915/i915_drv.h
2501 +@@ -2099,7 +2099,7 @@ struct drm_i915_private {
2502 + u32 bxt_phy_grc;
2503 +
2504 + u32 suspend_count;
2505 +- bool suspended_to_idle;
2506 ++ bool power_domains_suspended;
2507 + struct i915_suspend_saved_registers regfile;
2508 + struct vlv_s0ix_state vlv_s0ix_state;
2509 +
2510 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
2511 +index 33eb0c5b1d32..175d552c8bae 100644
2512 +--- a/drivers/gpu/drm/i915/i915_reg.h
2513 ++++ b/drivers/gpu/drm/i915/i915_reg.h
2514 +@@ -6236,6 +6236,12 @@ enum {
2515 + #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
2516 + #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
2517 + #define SP_CONST_ALPHA_ENABLE (1<<31)
2518 ++#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
2519 ++#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
2520 ++#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
2521 ++#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
2522 ++#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */
2523 ++#define SP_SH_COS(x) (x) /* u3.7 */
2524 + #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
2525 +
2526 + #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
2527 +@@ -6249,6 +6255,8 @@ enum {
2528 + #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
2529 + #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
2530 + #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
2531 ++#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
2532 ++#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
2533 + #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
2534 +
2535 + #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
2536 +@@ -6265,6 +6273,8 @@ enum {
2537 + #define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
2538 + #define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
2539 + #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
2540 ++#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
2541 ++#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
2542 + #define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
2543 +
2544 + /*
2545 +diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
2546 +index dd485f59eb1d..fb95074a67ff 100644
2547 +--- a/drivers/gpu/drm/i915/intel_sprite.c
2548 ++++ b/drivers/gpu/drm/i915/intel_sprite.c
2549 +@@ -346,44 +346,87 @@ skl_plane_get_hw_state(struct intel_plane *plane)
2550 + }
2551 +
2552 + static void
2553 +-chv_update_csc(struct intel_plane *plane, uint32_t format)
2554 ++chv_update_csc(const struct intel_plane_state *plane_state)
2555 + {
2556 ++ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2557 + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2558 ++ const struct drm_framebuffer *fb = plane_state->base.fb;
2559 + enum plane_id plane_id = plane->id;
2560 +
2561 + /* Seems RGB data bypasses the CSC always */
2562 +- if (!format_is_yuv(format))
2563 ++ if (!format_is_yuv(fb->format->format))
2564 + return;
2565 +
2566 + /*
2567 +- * BT.601 limited range YCbCr -> full range RGB
2568 ++ * BT.601 full range YCbCr -> full range RGB
2569 + *
2570 +- * |r| | 6537 4769 0| |cr |
2571 +- * |g| = |-3330 4769 -1605| x |y-64|
2572 +- * |b| | 0 4769 8263| |cb |
2573 ++ * |r| | 5743 4096 0| |cr|
2574 ++ * |g| = |-2925 4096 -1410| x |y |
2575 ++ * |b| | 0 4096 7258| |cb|
2576 + *
2577 +- * Cb and Cr apparently come in as signed already, so no
2578 +- * need for any offset. For Y we need to remove the offset.
2579 ++ * Cb and Cr apparently come in as signed already,
2580 ++ * and we get full range data in on account of CLRC0/1
2581 + */
2582 +- I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
2583 ++ I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
2584 + I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
2585 + I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
2586 +
2587 +- I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
2588 +- I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
2589 +- I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
2590 +- I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
2591 +- I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
2592 ++ I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4096) | SPCSC_C0(5743));
2593 ++ I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-2925) | SPCSC_C0(0));
2594 ++ I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1410) | SPCSC_C0(4096));
2595 ++ I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4096) | SPCSC_C0(0));
2596 ++ I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(7258));
2597 +
2598 +- I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
2599 +- I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
2600 +- I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
2601 ++ I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
2602 ++ I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
2603 ++ I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
2604 +
2605 + I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
2606 + I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
2607 + I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
2608 + }
2609 +
2610 ++#define SIN_0 0
2611 ++#define COS_0 1
2612 ++
2613 ++static void
2614 ++vlv_update_clrc(const struct intel_plane_state *plane_state)
2615 ++{
2616 ++ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2617 ++ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2618 ++ const struct drm_framebuffer *fb = plane_state->base.fb;
2619 ++ enum pipe pipe = plane->pipe;
2620 ++ enum plane_id plane_id = plane->id;
2621 ++ int contrast, brightness, sh_scale, sh_sin, sh_cos;
2622 ++
2623 ++ if (format_is_yuv(fb->format->format)) {
2624 ++ /*
2625 ++ * Expand limited range to full range:
2626 ++ * Contrast is applied first and is used to expand Y range.
2627 ++ * Brightness is applied second and is used to remove the
2628 ++ * offset from Y. Saturation/hue is used to expand CbCr range.
2629 ++ */
2630 ++ contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
2631 ++ brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
2632 ++ sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
2633 ++ sh_sin = SIN_0 * sh_scale;
2634 ++ sh_cos = COS_0 * sh_scale;
2635 ++ } else {
2636 ++ /* Pass-through everything. */
2637 ++ contrast = 1 << 6;
2638 ++ brightness = 0;
2639 ++ sh_scale = 1 << 7;
2640 ++ sh_sin = SIN_0 * sh_scale;
2641 ++ sh_cos = COS_0 * sh_scale;
2642 ++ }
2643 ++
2644 ++ /* FIXME these register are single buffered :( */
2645 ++ I915_WRITE_FW(SPCLRC0(pipe, plane_id),
2646 ++ SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
2647 ++ I915_WRITE_FW(SPCLRC1(pipe, plane_id),
2648 ++ SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
2649 ++}
2650 ++
2651 + static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
2652 + const struct intel_plane_state *plane_state)
2653 + {
2654 +@@ -477,8 +520,10 @@ vlv_update_plane(struct intel_plane *plane,
2655 +
2656 + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
2657 +
2658 ++ vlv_update_clrc(plane_state);
2659 ++
2660 + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
2661 +- chv_update_csc(plane, fb->format->format);
2662 ++ chv_update_csc(plane_state);
2663 +
2664 + if (key->flags) {
2665 + I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
2666 +diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2667 +index 7828a5e10629..0bbc23175d49 100644
2668 +--- a/drivers/gpu/drm/radeon/radeon_device.c
2669 ++++ b/drivers/gpu/drm/radeon/radeon_device.c
2670 +@@ -139,6 +139,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
2671 + * https://bugs.freedesktop.org/show_bug.cgi?id=101491
2672 + */
2673 + { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
2674 ++ /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
2675 ++ * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
2676 ++ */
2677 ++ { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
2678 + { 0, 0, 0, 0, 0 },
2679 + };
2680 +
2681 +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2682 +index 97a0a639dad9..90d5b41007bf 100644
2683 +--- a/drivers/gpu/drm/radeon/si_dpm.c
2684 ++++ b/drivers/gpu/drm/radeon/si_dpm.c
2685 +@@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
2686 + {
2687 + u32 lane_width;
2688 + u32 new_lane_width =
2689 +- (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
2690 ++ ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
2691 + u32 current_lane_width =
2692 +- (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
2693 ++ ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
2694 +
2695 + if (new_lane_width != current_lane_width) {
2696 + radeon_set_pcie_lanes(rdev, new_lane_width);
2697 +diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2698 +index ba7505292b78..7b224e08cbf1 100644
2699 +--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2700 ++++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
2701 +@@ -1414,6 +1414,9 @@ static int vop_initial(struct vop *vop)
2702 + usleep_range(10, 20);
2703 + reset_control_deassert(ahb_rst);
2704 +
2705 ++ VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
2706 ++ VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
2707 ++
2708 + memcpy(vop->regsbak, vop->regs, vop->len);
2709 +
2710 + VOP_REG_SET(vop, misc, global_regdone_en, 1);
2711 +@@ -1569,17 +1572,9 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
2712 +
2713 + mutex_init(&vop->vsync_mutex);
2714 +
2715 +- ret = devm_request_irq(dev, vop->irq, vop_isr,
2716 +- IRQF_SHARED, dev_name(dev), vop);
2717 +- if (ret)
2718 +- return ret;
2719 +-
2720 +- /* IRQ is initially disabled; it gets enabled in power_on */
2721 +- disable_irq(vop->irq);
2722 +-
2723 + ret = vop_create_crtc(vop);
2724 + if (ret)
2725 +- goto err_enable_irq;
2726 ++ return ret;
2727 +
2728 + pm_runtime_enable(&pdev->dev);
2729 +
2730 +@@ -1590,13 +1585,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
2731 + goto err_disable_pm_runtime;
2732 + }
2733 +
2734 ++ ret = devm_request_irq(dev, vop->irq, vop_isr,
2735 ++ IRQF_SHARED, dev_name(dev), vop);
2736 ++ if (ret)
2737 ++ goto err_disable_pm_runtime;
2738 ++
2739 ++ /* IRQ is initially disabled; it gets enabled in power_on */
2740 ++ disable_irq(vop->irq);
2741 ++
2742 + return 0;
2743 +
2744 + err_disable_pm_runtime:
2745 + pm_runtime_disable(&pdev->dev);
2746 + vop_destroy_crtc(vop);
2747 +-err_enable_irq:
2748 +- enable_irq(vop->irq); /* To balance out the disable_irq above */
2749 + return ret;
2750 + }
2751 +
2752 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2753 +index c2560aae5542..4fc08c38bc0e 100644
2754 +--- a/drivers/hid/hid-core.c
2755 ++++ b/drivers/hid/hid-core.c
2756 +@@ -1365,7 +1365,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
2757 + * of implement() working on 8 byte chunks
2758 + */
2759 +
2760 +- int len = hid_report_len(report) + 7;
2761 ++ u32 len = hid_report_len(report) + 7;
2762 +
2763 + return kmalloc(len, flags);
2764 + }
2765 +@@ -1430,7 +1430,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
2766 + {
2767 + char *buf;
2768 + int ret;
2769 +- int len;
2770 ++ u32 len;
2771 +
2772 + buf = hid_alloc_report_buf(report, GFP_KERNEL);
2773 + if (!buf)
2774 +@@ -1456,14 +1456,14 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
2775 + }
2776 + EXPORT_SYMBOL_GPL(__hid_request);
2777 +
2778 +-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
2779 ++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
2780 + int interrupt)
2781 + {
2782 + struct hid_report_enum *report_enum = hid->report_enum + type;
2783 + struct hid_report *report;
2784 + struct hid_driver *hdrv;
2785 + unsigned int a;
2786 +- int rsize, csize = size;
2787 ++ u32 rsize, csize = size;
2788 + u8 *cdata = data;
2789 + int ret = 0;
2790 +
2791 +@@ -1521,7 +1521,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
2792 + *
2793 + * This is data entry for lower layers.
2794 + */
2795 +-int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
2796 ++int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
2797 + {
2798 + struct hid_report_enum *report_enum;
2799 + struct hid_driver *hdrv;
2800 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2801 +index 9454ac134ce2..c631d2c8988d 100644
2802 +--- a/drivers/hid/hid-ids.h
2803 ++++ b/drivers/hid/hid-ids.h
2804 +@@ -519,6 +519,9 @@
2805 + #define I2C_VENDOR_ID_HANTICK 0x0911
2806 + #define I2C_PRODUCT_ID_HANTICK_5288 0x5288
2807 +
2808 ++#define I2C_VENDOR_ID_RAYD 0x2386
2809 ++#define I2C_PRODUCT_ID_RAYD_3118 0x3118
2810 ++
2811 + #define USB_VENDOR_ID_HANWANG 0x0b57
2812 + #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
2813 + #define USB_DEVICE_ID_HANWANG_TABLET_LAST 0x8fff
2814 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
2815 +index 04d01b57d94c..0b9e06569bf5 100644
2816 +--- a/drivers/hid/hid-input.c
2817 ++++ b/drivers/hid/hid-input.c
2818 +@@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
2819 + break;
2820 +
2821 + case POWER_SUPPLY_PROP_CAPACITY:
2822 +- if (dev->battery_report_type == HID_FEATURE_REPORT) {
2823 ++ if (dev->battery_status != HID_BATTERY_REPORTED &&
2824 ++ !dev->battery_avoid_query) {
2825 + value = hidinput_query_battery_capacity(dev);
2826 + if (value < 0)
2827 + return value;
2828 +@@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
2829 + break;
2830 +
2831 + case POWER_SUPPLY_PROP_STATUS:
2832 +- if (!dev->battery_reported &&
2833 +- dev->battery_report_type == HID_FEATURE_REPORT) {
2834 ++ if (dev->battery_status != HID_BATTERY_REPORTED &&
2835 ++ !dev->battery_avoid_query) {
2836 + value = hidinput_query_battery_capacity(dev);
2837 + if (value < 0)
2838 + return value;
2839 +
2840 + dev->battery_capacity = value;
2841 +- dev->battery_reported = true;
2842 ++ dev->battery_status = HID_BATTERY_QUERIED;
2843 + }
2844 +
2845 +- if (!dev->battery_reported)
2846 ++ if (dev->battery_status == HID_BATTERY_UNKNOWN)
2847 + val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
2848 + else if (dev->battery_capacity == 100)
2849 + val->intval = POWER_SUPPLY_STATUS_FULL;
2850 +@@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
2851 + dev->battery_report_type = report_type;
2852 + dev->battery_report_id = field->report->id;
2853 +
2854 ++ /*
2855 ++ * Stylus is normally not connected to the device and thus we
2856 ++ * can't query the device and get meaningful battery strength.
2857 ++ * We have to wait for the device to report it on its own.
2858 ++ */
2859 ++ dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
2860 ++ field->physical == HID_DG_STYLUS;
2861 ++
2862 + dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
2863 + if (IS_ERR(dev->battery)) {
2864 + error = PTR_ERR(dev->battery);
2865 +@@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
2866 +
2867 + capacity = hidinput_scale_battery_capacity(dev, value);
2868 +
2869 +- if (!dev->battery_reported || capacity != dev->battery_capacity) {
2870 ++ if (dev->battery_status != HID_BATTERY_REPORTED ||
2871 ++ capacity != dev->battery_capacity) {
2872 + dev->battery_capacity = capacity;
2873 +- dev->battery_reported = true;
2874 ++ dev->battery_status = HID_BATTERY_REPORTED;
2875 + power_supply_changed(dev->battery);
2876 + }
2877 + }
2878 +@@ -1368,7 +1378,8 @@ static void hidinput_led_worker(struct work_struct *work)
2879 + led_work);
2880 + struct hid_field *field;
2881 + struct hid_report *report;
2882 +- int len, ret;
2883 ++ int ret;
2884 ++ u32 len;
2885 + __u8 *buf;
2886 +
2887 + field = hidinput_get_led_field(hid);
2888 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
2889 +index 3b4739bde05d..2e1736ba2444 100644
2890 +--- a/drivers/hid/hid-multitouch.c
2891 ++++ b/drivers/hid/hid-multitouch.c
2892 +@@ -370,7 +370,8 @@ static const struct attribute_group mt_attribute_group = {
2893 + static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
2894 + {
2895 + struct mt_device *td = hid_get_drvdata(hdev);
2896 +- int ret, size = hid_report_len(report);
2897 ++ int ret;
2898 ++ u32 size = hid_report_len(report);
2899 + u8 *buf;
2900 +
2901 + /*
2902 +@@ -1183,7 +1184,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
2903 + struct hid_report_enum *re;
2904 + struct mt_class *cls = &td->mtclass;
2905 + char *buf;
2906 +- int report_len;
2907 ++ u32 report_len;
2908 +
2909 + if (td->inputmode < 0)
2910 + return;
2911 +diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
2912 +index c6c05df3e8d2..9c9362149641 100644
2913 +--- a/drivers/hid/hid-rmi.c
2914 ++++ b/drivers/hid/hid-rmi.c
2915 +@@ -89,8 +89,8 @@ struct rmi_data {
2916 + u8 *writeReport;
2917 + u8 *readReport;
2918 +
2919 +- int input_report_size;
2920 +- int output_report_size;
2921 ++ u32 input_report_size;
2922 ++ u32 output_report_size;
2923 +
2924 + unsigned long flags;
2925 +
2926 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
2927 +index fbfcc8009432..b39844adea47 100644
2928 +--- a/drivers/hid/hidraw.c
2929 ++++ b/drivers/hid/hidraw.c
2930 +@@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
2931 + int ret = 0, len;
2932 + unsigned char report_number;
2933 +
2934 ++ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
2935 ++ ret = -ENODEV;
2936 ++ goto out;
2937 ++ }
2938 ++
2939 + dev = hidraw_table[minor]->hid;
2940 +
2941 + if (!dev->ll_driver->raw_request) {
2942 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
2943 +index 7230243b94d3..fd9f70a8b813 100644
2944 +--- a/drivers/hid/i2c-hid/i2c-hid.c
2945 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
2946 +@@ -47,6 +47,7 @@
2947 + /* quirks to control the device */
2948 + #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
2949 + #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
2950 ++#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
2951 +
2952 + /* flags */
2953 + #define I2C_HID_STARTED 0
2954 +@@ -144,10 +145,10 @@ struct i2c_hid {
2955 + * register of the HID
2956 + * descriptor. */
2957 + unsigned int bufsize; /* i2c buffer size */
2958 +- char *inbuf; /* Input buffer */
2959 +- char *rawbuf; /* Raw Input buffer */
2960 +- char *cmdbuf; /* Command buffer */
2961 +- char *argsbuf; /* Command arguments buffer */
2962 ++ u8 *inbuf; /* Input buffer */
2963 ++ u8 *rawbuf; /* Raw Input buffer */
2964 ++ u8 *cmdbuf; /* Command buffer */
2965 ++ u8 *argsbuf; /* Command arguments buffer */
2966 +
2967 + unsigned long flags; /* device flags */
2968 + unsigned long quirks; /* Various quirks */
2969 +@@ -171,6 +172,8 @@ static const struct i2c_hid_quirks {
2970 + I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
2971 + { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
2972 + I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
2973 ++ { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_3118,
2974 ++ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
2975 + { 0, 0 }
2976 + };
2977 +
2978 +@@ -455,7 +458,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
2979 +
2980 + static void i2c_hid_get_input(struct i2c_hid *ihid)
2981 + {
2982 +- int ret, ret_size;
2983 ++ int ret;
2984 ++ u32 ret_size;
2985 + int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
2986 +
2987 + if (size > ihid->bufsize)
2988 +@@ -480,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
2989 + return;
2990 + }
2991 +
2992 +- if (ret_size > size) {
2993 ++ if ((ret_size > size) || (ret_size <= 2)) {
2994 + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
2995 + __func__, size, ret_size);
2996 + return;
2997 +@@ -1219,6 +1223,16 @@ static int i2c_hid_resume(struct device *dev)
2998 + if (ret)
2999 + return ret;
3000 +
3001 ++ /* RAYDIUM device (2386:3118) need to re-send report descr cmd
3002 ++ * after resume, after this it will be back normal.
3003 ++ * otherwise it issues too many incomplete reports.
3004 ++ */
3005 ++ if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
3006 ++ ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
3007 ++ if (ret)
3008 ++ return ret;
3009 ++ }
3010 ++
3011 + if (hid->driver && hid->driver->reset_resume) {
3012 + ret = hid->driver->reset_resume(hid);
3013 + return ret;
3014 +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
3015 +index 409543160af7..b54ef1ffcbec 100644
3016 +--- a/drivers/hid/wacom_sys.c
3017 ++++ b/drivers/hid/wacom_sys.c
3018 +@@ -219,7 +219,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
3019 + unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
3020 + u8 *data;
3021 + int ret;
3022 +- int n;
3023 ++ u32 n;
3024 +
3025 + switch (equivalent_usage) {
3026 + case HID_DG_CONTACTMAX:
3027 +@@ -519,7 +519,7 @@ static int wacom_set_device_mode(struct hid_device *hdev,
3028 + u8 *rep_data;
3029 + struct hid_report *r;
3030 + struct hid_report_enum *re;
3031 +- int length;
3032 ++ u32 length;
3033 + int error = -ENOMEM, limit = 0;
3034 +
3035 + if (wacom_wac->mode_report < 0)
3036 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
3037 +index 90c38a0523e9..44b2c7b0838c 100644
3038 +--- a/drivers/hid/wacom_wac.c
3039 ++++ b/drivers/hid/wacom_wac.c
3040 +@@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
3041 + return tool_type;
3042 + }
3043 +
3044 ++static void wacom_exit_report(struct wacom_wac *wacom)
3045 ++{
3046 ++ struct input_dev *input = wacom->pen_input;
3047 ++ struct wacom_features *features = &wacom->features;
3048 ++ unsigned char *data = wacom->data;
3049 ++ int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
3050 ++
3051 ++ /*
3052 ++ * Reset all states otherwise we lose the initial states
3053 ++ * when in-prox next time
3054 ++ */
3055 ++ input_report_abs(input, ABS_X, 0);
3056 ++ input_report_abs(input, ABS_Y, 0);
3057 ++ input_report_abs(input, ABS_DISTANCE, 0);
3058 ++ input_report_abs(input, ABS_TILT_X, 0);
3059 ++ input_report_abs(input, ABS_TILT_Y, 0);
3060 ++ if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
3061 ++ input_report_key(input, BTN_LEFT, 0);
3062 ++ input_report_key(input, BTN_MIDDLE, 0);
3063 ++ input_report_key(input, BTN_RIGHT, 0);
3064 ++ input_report_key(input, BTN_SIDE, 0);
3065 ++ input_report_key(input, BTN_EXTRA, 0);
3066 ++ input_report_abs(input, ABS_THROTTLE, 0);
3067 ++ input_report_abs(input, ABS_RZ, 0);
3068 ++ } else {
3069 ++ input_report_abs(input, ABS_PRESSURE, 0);
3070 ++ input_report_key(input, BTN_STYLUS, 0);
3071 ++ input_report_key(input, BTN_STYLUS2, 0);
3072 ++ input_report_key(input, BTN_TOUCH, 0);
3073 ++ input_report_abs(input, ABS_WHEEL, 0);
3074 ++ if (features->type >= INTUOS3S)
3075 ++ input_report_abs(input, ABS_Z, 0);
3076 ++ }
3077 ++ input_report_key(input, wacom->tool[idx], 0);
3078 ++ input_report_abs(input, ABS_MISC, 0); /* reset tool id */
3079 ++ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
3080 ++ wacom->id[idx] = 0;
3081 ++}
3082 ++
3083 + static int wacom_intuos_inout(struct wacom_wac *wacom)
3084 + {
3085 + struct wacom_features *features = &wacom->features;
3086 +@@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
3087 + if (!wacom->id[idx])
3088 + return 1;
3089 +
3090 +- /*
3091 +- * Reset all states otherwise we lose the initial states
3092 +- * when in-prox next time
3093 +- */
3094 +- input_report_abs(input, ABS_X, 0);
3095 +- input_report_abs(input, ABS_Y, 0);
3096 +- input_report_abs(input, ABS_DISTANCE, 0);
3097 +- input_report_abs(input, ABS_TILT_X, 0);
3098 +- input_report_abs(input, ABS_TILT_Y, 0);
3099 +- if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
3100 +- input_report_key(input, BTN_LEFT, 0);
3101 +- input_report_key(input, BTN_MIDDLE, 0);
3102 +- input_report_key(input, BTN_RIGHT, 0);
3103 +- input_report_key(input, BTN_SIDE, 0);
3104 +- input_report_key(input, BTN_EXTRA, 0);
3105 +- input_report_abs(input, ABS_THROTTLE, 0);
3106 +- input_report_abs(input, ABS_RZ, 0);
3107 +- } else {
3108 +- input_report_abs(input, ABS_PRESSURE, 0);
3109 +- input_report_key(input, BTN_STYLUS, 0);
3110 +- input_report_key(input, BTN_STYLUS2, 0);
3111 +- input_report_key(input, BTN_TOUCH, 0);
3112 +- input_report_abs(input, ABS_WHEEL, 0);
3113 +- if (features->type >= INTUOS3S)
3114 +- input_report_abs(input, ABS_Z, 0);
3115 +- }
3116 +- input_report_key(input, wacom->tool[idx], 0);
3117 +- input_report_abs(input, ABS_MISC, 0); /* reset tool id */
3118 +- input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
3119 +- wacom->id[idx] = 0;
3120 ++ wacom_exit_report(wacom);
3121 + return 2;
3122 + }
3123 +
3124 +@@ -1226,6 +1236,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
3125 + if (!valid)
3126 + continue;
3127 +
3128 ++ if (!prox) {
3129 ++ wacom->shared->stylus_in_proximity = false;
3130 ++ wacom_exit_report(wacom);
3131 ++ input_sync(pen_input);
3132 ++ return;
3133 ++ }
3134 + if (range) {
3135 + /* Fix rotation alignment: userspace expects zero at left */
3136 + int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
3137 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
3138 +index 692b34125866..e0d59e9ff3c6 100644
3139 +--- a/drivers/i2c/busses/i2c-i801.c
3140 ++++ b/drivers/i2c/busses/i2c-i801.c
3141 +@@ -966,8 +966,6 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
3142 + if (!(priv->features & FEATURE_HOST_NOTIFY))
3143 + return;
3144 +
3145 +- priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
3146 +-
3147 + if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
3148 + outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
3149 + SMBSLVCMD(priv));
3150 +@@ -1615,6 +1613,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
3151 + outb_p(inb_p(SMBAUXCTL(priv)) &
3152 + ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
3153 +
3154 ++ /* Remember original Host Notify setting */
3155 ++ if (priv->features & FEATURE_HOST_NOTIFY)
3156 ++ priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
3157 ++
3158 + /* Default timeout in interrupt mode: 200 ms */
3159 + priv->adapter.timeout = HZ / 5;
3160 +
3161 +@@ -1699,6 +1701,15 @@ static void i801_remove(struct pci_dev *dev)
3162 + */
3163 + }
3164 +
3165 ++static void i801_shutdown(struct pci_dev *dev)
3166 ++{
3167 ++ struct i801_priv *priv = pci_get_drvdata(dev);
3168 ++
3169 ++ /* Restore config registers to avoid hard hang on some systems */
3170 ++ i801_disable_host_notify(priv);
3171 ++ pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
3172 ++}
3173 ++
3174 + #ifdef CONFIG_PM
3175 + static int i801_suspend(struct device *dev)
3176 + {
3177 +@@ -1728,6 +1739,7 @@ static struct pci_driver i801_driver = {
3178 + .id_table = i801_ids,
3179 + .probe = i801_probe,
3180 + .remove = i801_remove,
3181 ++ .shutdown = i801_shutdown,
3182 + .driver = {
3183 + .pm = &i801_pm_ops,
3184 + },
3185 +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
3186 +index d933336d7e01..5c21ae237f82 100644
3187 +--- a/drivers/infiniband/core/ucma.c
3188 ++++ b/drivers/infiniband/core/ucma.c
3189 +@@ -1241,6 +1241,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
3190 + if (!optlen)
3191 + return -EINVAL;
3192 +
3193 ++ if (!ctx->cm_id->device)
3194 ++ return -EINVAL;
3195 ++
3196 + memset(&sa_path, 0, sizeof(sa_path));
3197 +
3198 + sa_path.rec_type = SA_PATH_REC_TYPE_IB;
3199 +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
3200 +index 93025d2009b8..c715123742a4 100644
3201 +--- a/drivers/infiniband/core/verbs.c
3202 ++++ b/drivers/infiniband/core/verbs.c
3203 +@@ -2194,7 +2194,14 @@ static void __ib_drain_sq(struct ib_qp *qp)
3204 + struct ib_cq *cq = qp->send_cq;
3205 + struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
3206 + struct ib_drain_cqe sdrain;
3207 +- struct ib_send_wr swr = {}, *bad_swr;
3208 ++ struct ib_send_wr *bad_swr;
3209 ++ struct ib_rdma_wr swr = {
3210 ++ .wr = {
3211 ++ .next = NULL,
3212 ++ { .wr_cqe = &sdrain.cqe, },
3213 ++ .opcode = IB_WR_RDMA_WRITE,
3214 ++ },
3215 ++ };
3216 + int ret;
3217 +
3218 + ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
3219 +@@ -2203,11 +2210,10 @@ static void __ib_drain_sq(struct ib_qp *qp)
3220 + return;
3221 + }
3222 +
3223 +- swr.wr_cqe = &sdrain.cqe;
3224 + sdrain.cqe.done = ib_drain_qp_done;
3225 + init_completion(&sdrain.done);
3226 +
3227 +- ret = ib_post_send(qp, &swr, &bad_swr);
3228 ++ ret = ib_post_send(qp, &swr.wr, &bad_swr);
3229 + if (ret) {
3230 + WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
3231 + return;
3232 +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
3233 +index 3e0b3f0238d6..6857c61bdee1 100644
3234 +--- a/drivers/infiniband/hw/mlx5/mr.c
3235 ++++ b/drivers/infiniband/hw/mlx5/mr.c
3236 +@@ -1223,6 +1223,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
3237 + return ERR_PTR(-EINVAL);
3238 +
3239 + mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
3240 ++ if (IS_ERR(mr))
3241 ++ return ERR_CAST(mr);
3242 + return &mr->ibmr;
3243 + }
3244 + #endif
3245 +diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
3246 +index f4bab2cd0ec2..45594091353c 100644
3247 +--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
3248 ++++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
3249 +@@ -711,9 +711,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
3250 + memcpy(wqe->dma.sge, ibwr->sg_list,
3251 + num_sge * sizeof(struct ib_sge));
3252 +
3253 +- wqe->iova = (mask & WR_ATOMIC_MASK) ?
3254 +- atomic_wr(ibwr)->remote_addr :
3255 +- rdma_wr(ibwr)->remote_addr;
3256 ++ wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
3257 ++ mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
3258 + wqe->mask = mask;
3259 + wqe->dma.length = length;
3260 + wqe->dma.resid = length;
3261 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
3262 +index b48843833d69..4a1a489ce8bb 100644
3263 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
3264 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
3265 +@@ -2974,9 +2974,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
3266 + ret = FAST_IO_FAIL;
3267 + else
3268 + ret = FAILED;
3269 +- srp_free_req(ch, req, scmnd, 0);
3270 +- scmnd->result = DID_ABORT << 16;
3271 +- scmnd->scsi_done(scmnd);
3272 ++ if (ret == SUCCESS) {
3273 ++ srp_free_req(ch, req, scmnd, 0);
3274 ++ scmnd->result = DID_ABORT << 16;
3275 ++ scmnd->scsi_done(scmnd);
3276 ++ }
3277 +
3278 + return ret;
3279 + }
3280 +@@ -3871,12 +3873,10 @@ static ssize_t srp_create_target(struct device *dev,
3281 + num_online_nodes());
3282 + const int ch_end = ((node_idx + 1) * target->ch_count /
3283 + num_online_nodes());
3284 +- const int cv_start = (node_idx * ibdev->num_comp_vectors /
3285 +- num_online_nodes() + target->comp_vector)
3286 +- % ibdev->num_comp_vectors;
3287 +- const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3288 +- num_online_nodes() + target->comp_vector)
3289 +- % ibdev->num_comp_vectors;
3290 ++ const int cv_start = node_idx * ibdev->num_comp_vectors /
3291 ++ num_online_nodes();
3292 ++ const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
3293 ++ num_online_nodes();
3294 + int cpu_idx = 0;
3295 +
3296 + for_each_online_cpu(cpu) {
3297 +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
3298 +index 0373b7c40902..f1be280e701a 100644
3299 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
3300 ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
3301 +@@ -838,16 +838,20 @@ static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
3302 + */
3303 + static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
3304 + {
3305 +- struct ib_send_wr wr, *bad_wr;
3306 ++ struct ib_send_wr *bad_wr;
3307 ++ struct ib_rdma_wr wr = {
3308 ++ .wr = {
3309 ++ .next = NULL,
3310 ++ { .wr_cqe = &ch->zw_cqe, },
3311 ++ .opcode = IB_WR_RDMA_WRITE,
3312 ++ .send_flags = IB_SEND_SIGNALED,
3313 ++ }
3314 ++ };
3315 +
3316 + pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
3317 + ch->qp->qp_num);
3318 +
3319 +- memset(&wr, 0, sizeof(wr));
3320 +- wr.opcode = IB_WR_RDMA_WRITE;
3321 +- wr.wr_cqe = &ch->zw_cqe;
3322 +- wr.send_flags = IB_SEND_SIGNALED;
3323 +- return ib_post_send(ch->qp, &wr, &bad_wr);
3324 ++ return ib_post_send(ch->qp, &wr.wr, &bad_wr);
3325 + }
3326 +
3327 + static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
3328 +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
3329 +index 99bc9bd64b9e..9124a625fe83 100644
3330 +--- a/drivers/iommu/intel-svm.c
3331 ++++ b/drivers/iommu/intel-svm.c
3332 +@@ -396,6 +396,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
3333 + pasid_max - 1, GFP_KERNEL);
3334 + if (ret < 0) {
3335 + kfree(svm);
3336 ++ kfree(sdev);
3337 + goto out;
3338 + }
3339 + svm->pasid = ret;
3340 +diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
3341 +index 30017df5b54c..01e673c680cd 100644
3342 +--- a/drivers/irqchip/irq-gic-common.c
3343 ++++ b/drivers/irqchip/irq-gic-common.c
3344 +@@ -21,6 +21,8 @@
3345 +
3346 + #include "irq-gic-common.h"
3347 +
3348 ++static DEFINE_RAW_SPINLOCK(irq_controller_lock);
3349 ++
3350 + static const struct gic_kvm_info *gic_kvm_info;
3351 +
3352 + const struct gic_kvm_info *gic_get_kvm_info(void)
3353 +@@ -53,11 +55,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
3354 + u32 confoff = (irq / 16) * 4;
3355 + u32 val, oldval;
3356 + int ret = 0;
3357 ++ unsigned long flags;
3358 +
3359 + /*
3360 + * Read current configuration register, and insert the config
3361 + * for "irq", depending on "type".
3362 + */
3363 ++ raw_spin_lock_irqsave(&irq_controller_lock, flags);
3364 + val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
3365 + if (type & IRQ_TYPE_LEVEL_MASK)
3366 + val &= ~confmask;
3367 +@@ -65,8 +69,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
3368 + val |= confmask;
3369 +
3370 + /* If the current configuration is the same, then we are done */
3371 +- if (val == oldval)
3372 ++ if (val == oldval) {
3373 ++ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
3374 + return 0;
3375 ++ }
3376 +
3377 + /*
3378 + * Write back the new configuration, and possibly re-enable
3379 +@@ -84,6 +90,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
3380 + pr_warn("GIC: PPI%d is secure or misconfigured\n",
3381 + irq - 16);
3382 + }
3383 ++ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
3384 +
3385 + if (sync_access)
3386 + sync_access();
3387 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3388 +index 8168f737590e..e7b4a0256949 100644
3389 +--- a/drivers/md/dm-crypt.c
3390 ++++ b/drivers/md/dm-crypt.c
3391 +@@ -148,6 +148,8 @@ struct crypt_config {
3392 + mempool_t *tag_pool;
3393 + unsigned tag_pool_max_sectors;
3394 +
3395 ++ struct percpu_counter n_allocated_pages;
3396 ++
3397 + struct bio_set *bs;
3398 + struct mutex bio_alloc_lock;
3399 +
3400 +@@ -219,6 +221,12 @@ struct crypt_config {
3401 + #define MAX_TAG_SIZE 480
3402 + #define POOL_ENTRY_SIZE 512
3403 +
3404 ++static DEFINE_SPINLOCK(dm_crypt_clients_lock);
3405 ++static unsigned dm_crypt_clients_n = 0;
3406 ++static volatile unsigned long dm_crypt_pages_per_client;
3407 ++#define DM_CRYPT_MEMORY_PERCENT 2
3408 ++#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
3409 ++
3410 + static void clone_init(struct dm_crypt_io *, struct bio *);
3411 + static void kcryptd_queue_crypt(struct dm_crypt_io *io);
3412 + static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
3413 +@@ -2155,6 +2163,43 @@ static int crypt_wipe_key(struct crypt_config *cc)
3414 + return r;
3415 + }
3416 +
3417 ++static void crypt_calculate_pages_per_client(void)
3418 ++{
3419 ++ unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
3420 ++
3421 ++ if (!dm_crypt_clients_n)
3422 ++ return;
3423 ++
3424 ++ pages /= dm_crypt_clients_n;
3425 ++ if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
3426 ++ pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
3427 ++ dm_crypt_pages_per_client = pages;
3428 ++}
3429 ++
3430 ++static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
3431 ++{
3432 ++ struct crypt_config *cc = pool_data;
3433 ++ struct page *page;
3434 ++
3435 ++ if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
3436 ++ likely(gfp_mask & __GFP_NORETRY))
3437 ++ return NULL;
3438 ++
3439 ++ page = alloc_page(gfp_mask);
3440 ++ if (likely(page != NULL))
3441 ++ percpu_counter_add(&cc->n_allocated_pages, 1);
3442 ++
3443 ++ return page;
3444 ++}
3445 ++
3446 ++static void crypt_page_free(void *page, void *pool_data)
3447 ++{
3448 ++ struct crypt_config *cc = pool_data;
3449 ++
3450 ++ __free_page(page);
3451 ++ percpu_counter_sub(&cc->n_allocated_pages, 1);
3452 ++}
3453 ++
3454 + static void crypt_dtr(struct dm_target *ti)
3455 + {
3456 + struct crypt_config *cc = ti->private;
3457 +@@ -2181,6 +2226,10 @@ static void crypt_dtr(struct dm_target *ti)
3458 + mempool_destroy(cc->req_pool);
3459 + mempool_destroy(cc->tag_pool);
3460 +
3461 ++ if (cc->page_pool)
3462 ++ WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
3463 ++ percpu_counter_destroy(&cc->n_allocated_pages);
3464 ++
3465 + if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
3466 + cc->iv_gen_ops->dtr(cc);
3467 +
3468 +@@ -2197,6 +2246,12 @@ static void crypt_dtr(struct dm_target *ti)
3469 +
3470 + /* Must zero key material before freeing */
3471 + kzfree(cc);
3472 ++
3473 ++ spin_lock(&dm_crypt_clients_lock);
3474 ++ WARN_ON(!dm_crypt_clients_n);
3475 ++ dm_crypt_clients_n--;
3476 ++ crypt_calculate_pages_per_client();
3477 ++ spin_unlock(&dm_crypt_clients_lock);
3478 + }
3479 +
3480 + static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
3481 +@@ -2644,6 +2699,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3482 +
3483 + ti->private = cc;
3484 +
3485 ++ spin_lock(&dm_crypt_clients_lock);
3486 ++ dm_crypt_clients_n++;
3487 ++ crypt_calculate_pages_per_client();
3488 ++ spin_unlock(&dm_crypt_clients_lock);
3489 ++
3490 ++ ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
3491 ++ if (ret < 0)
3492 ++ goto bad;
3493 ++
3494 + /* Optional parameters need to be read before cipher constructor */
3495 + if (argc > 5) {
3496 + ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
3497 +@@ -2698,7 +2762,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
3498 + ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
3499 + ARCH_KMALLOC_MINALIGN);
3500 +
3501 +- cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
3502 ++ cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
3503 + if (!cc->page_pool) {
3504 + ti->error = "Cannot allocate page mempool";
3505 + goto bad;
3506 +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
3507 +index c1d1034ff7b7..335ebd46a986 100644
3508 +--- a/drivers/md/dm-raid.c
3509 ++++ b/drivers/md/dm-raid.c
3510 +@@ -3408,7 +3408,8 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
3511 + set_bit(RT_FLAG_RS_IN_SYNC, &rs->runtime_flags);
3512 +
3513 + } else {
3514 +- if (!test_bit(MD_RECOVERY_INTR, &recovery) &&
3515 ++ if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags) &&
3516 ++ !test_bit(MD_RECOVERY_INTR, &recovery) &&
3517 + (test_bit(MD_RECOVERY_NEEDED, &recovery) ||
3518 + test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
3519 + test_bit(MD_RECOVERY_RUNNING, &recovery)))
3520 +diff --git a/drivers/md/dm.c b/drivers/md/dm.c
3521 +index 353ea0ede091..038c7572fdd4 100644
3522 +--- a/drivers/md/dm.c
3523 ++++ b/drivers/md/dm.c
3524 +@@ -1477,6 +1477,23 @@ static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
3525 + return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios, NULL);
3526 + }
3527 +
3528 ++static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
3529 ++ int *result)
3530 ++{
3531 ++ struct bio *bio = ci->bio;
3532 ++
3533 ++ if (bio_op(bio) == REQ_OP_DISCARD)
3534 ++ *result = __send_discard(ci, ti);
3535 ++ else if (bio_op(bio) == REQ_OP_WRITE_SAME)
3536 ++ *result = __send_write_same(ci, ti);
3537 ++ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
3538 ++ *result = __send_write_zeroes(ci, ti);
3539 ++ else
3540 ++ return false;
3541 ++
3542 ++ return true;
3543 ++}
3544 ++
3545 + /*
3546 + * Select the correct strategy for processing a non-flush bio.
3547 + */
3548 +@@ -1491,12 +1508,8 @@ static int __split_and_process_non_flush(struct clone_info *ci)
3549 + if (!dm_target_is_valid(ti))
3550 + return -EIO;
3551 +
3552 +- if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
3553 +- return __send_discard(ci, ti);
3554 +- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
3555 +- return __send_write_same(ci, ti);
3556 +- else if (unlikely(bio_op(bio) == REQ_OP_WRITE_ZEROES))
3557 +- return __send_write_zeroes(ci, ti);
3558 ++ if (unlikely(__process_abnormal_io(ci, ti, &r)))
3559 ++ return r;
3560 +
3561 + if (bio_op(bio) == REQ_OP_ZONE_REPORT)
3562 + len = ci->sector_count;
3563 +@@ -1617,9 +1630,12 @@ static blk_qc_t __process_bio(struct mapped_device *md,
3564 + goto out;
3565 + }
3566 +
3567 +- tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
3568 + ci.bio = bio;
3569 + ci.sector_count = bio_sectors(bio);
3570 ++ if (unlikely(__process_abnormal_io(&ci, ti, &error)))
3571 ++ goto out;
3572 ++
3573 ++ tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
3574 + ret = __clone_and_map_simple_bio(&ci, tio, NULL);
3575 + }
3576 + out:
3577 +diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
3578 +index debe35fc66b4..d3f7bb33a54d 100644
3579 +--- a/drivers/media/common/videobuf2/videobuf2-core.c
3580 ++++ b/drivers/media/common/videobuf2/videobuf2-core.c
3581 +@@ -1696,6 +1696,15 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
3582 + for (i = 0; i < q->num_buffers; ++i) {
3583 + struct vb2_buffer *vb = q->bufs[i];
3584 +
3585 ++ if (vb->state == VB2_BUF_STATE_PREPARED ||
3586 ++ vb->state == VB2_BUF_STATE_QUEUED) {
3587 ++ unsigned int plane;
3588 ++
3589 ++ for (plane = 0; plane < vb->num_planes; ++plane)
3590 ++ call_void_memop(vb, finish,
3591 ++ vb->planes[plane].mem_priv);
3592 ++ }
3593 ++
3594 + if (vb->state != VB2_BUF_STATE_DEQUEUED) {
3595 + vb->state = VB2_BUF_STATE_PREPARED;
3596 + call_void_vb_qop(vb, buf_finish, vb);
3597 +diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
3598 +index a651527d80db..23888fdb94fb 100644
3599 +--- a/drivers/media/platform/vivid/vivid-vid-common.c
3600 ++++ b/drivers/media/platform/vivid/vivid-vid-common.c
3601 +@@ -874,7 +874,8 @@ int vidioc_g_edid(struct file *file, void *_fh,
3602 + return -EINVAL;
3603 + if (edid->start_block + edid->blocks > dev->edid_blocks)
3604 + edid->blocks = dev->edid_blocks - edid->start_block;
3605 +- cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
3606 ++ if (adap)
3607 ++ cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
3608 + memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
3609 + return 0;
3610 + }
3611 +diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
3612 +index f7f3b4b2c2de..8bd6b2f1af15 100644
3613 +--- a/drivers/media/platform/vsp1/vsp1_wpf.c
3614 ++++ b/drivers/media/platform/vsp1/vsp1_wpf.c
3615 +@@ -452,7 +452,7 @@ static void wpf_configure(struct vsp1_entity *entity,
3616 + : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
3617 + }
3618 +
3619 +- if (pipe->bru || pipe->num_inputs > 1)
3620 ++ if (pipe->bru)
3621 + srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU
3622 + ? VI6_WPF_SRCRPF_VIRACT_MST
3623 + : VI6_WPF_SRCRPF_VIRACT2_MST;
3624 +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
3625 +index 1db8d38fed7c..9b78818c0282 100644
3626 +--- a/drivers/media/rc/rc-main.c
3627 ++++ b/drivers/media/rc/rc-main.c
3628 +@@ -1929,12 +1929,12 @@ void rc_unregister_device(struct rc_dev *dev)
3629 + if (!dev)
3630 + return;
3631 +
3632 +- del_timer_sync(&dev->timer_keyup);
3633 +- del_timer_sync(&dev->timer_repeat);
3634 +-
3635 + if (dev->driver_type == RC_DRIVER_IR_RAW)
3636 + ir_raw_event_unregister(dev);
3637 +
3638 ++ del_timer_sync(&dev->timer_keyup);
3639 ++ del_timer_sync(&dev->timer_repeat);
3640 ++
3641 + rc_free_rx_device(dev);
3642 +
3643 + mutex_lock(&dev->lock);
3644 +diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
3645 +index 30ccba436b3b..55cd35d1a9cc 100644
3646 +--- a/drivers/misc/cxl/cxllib.c
3647 ++++ b/drivers/misc/cxl/cxllib.c
3648 +@@ -208,49 +208,74 @@ int cxllib_get_PE_attributes(struct task_struct *task,
3649 + }
3650 + EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
3651 +
3652 +-int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
3653 ++static int get_vma_info(struct mm_struct *mm, u64 addr,
3654 ++ u64 *vma_start, u64 *vma_end,
3655 ++ unsigned long *page_size)
3656 + {
3657 +- int rc;
3658 +- u64 dar;
3659 + struct vm_area_struct *vma = NULL;
3660 +- unsigned long page_size;
3661 +-
3662 +- if (mm == NULL)
3663 +- return -EFAULT;
3664 ++ int rc = 0;
3665 +
3666 + down_read(&mm->mmap_sem);
3667 +
3668 + vma = find_vma(mm, addr);
3669 + if (!vma) {
3670 +- pr_err("Can't find vma for addr %016llx\n", addr);
3671 + rc = -EFAULT;
3672 + goto out;
3673 + }
3674 +- /* get the size of the pages allocated */
3675 +- page_size = vma_kernel_pagesize(vma);
3676 +-
3677 +- for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
3678 +- if (dar < vma->vm_start || dar >= vma->vm_end) {
3679 +- vma = find_vma(mm, addr);
3680 +- if (!vma) {
3681 +- pr_err("Can't find vma for addr %016llx\n", addr);
3682 +- rc = -EFAULT;
3683 +- goto out;
3684 +- }
3685 +- /* get the size of the pages allocated */
3686 +- page_size = vma_kernel_pagesize(vma);
3687 ++ *page_size = vma_kernel_pagesize(vma);
3688 ++ *vma_start = vma->vm_start;
3689 ++ *vma_end = vma->vm_end;
3690 ++out:
3691 ++ up_read(&mm->mmap_sem);
3692 ++ return rc;
3693 ++}
3694 ++
3695 ++int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
3696 ++{
3697 ++ int rc;
3698 ++ u64 dar, vma_start, vma_end;
3699 ++ unsigned long page_size;
3700 ++
3701 ++ if (mm == NULL)
3702 ++ return -EFAULT;
3703 ++
3704 ++ /*
3705 ++ * The buffer we have to process can extend over several pages
3706 ++ * and may also cover several VMAs.
3707 ++ * We iterate over all the pages. The page size could vary
3708 ++ * between VMAs.
3709 ++ */
3710 ++ rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
3711 ++ if (rc)
3712 ++ return rc;
3713 ++
3714 ++ for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
3715 ++ dar += page_size) {
3716 ++ if (dar < vma_start || dar >= vma_end) {
3717 ++ /*
3718 ++ * We don't hold the mm->mmap_sem semaphore
3719 ++ * while iterating, since the semaphore is
3720 ++ * required by one of the lower-level page
3721 ++ * fault processing functions and it could
3722 ++ * create a deadlock.
3723 ++ *
3724 ++ * It means the VMAs can be altered between 2
3725 ++ * loop iterations and we could theoretically
3726 ++ * miss a page (however unlikely). But that's
3727 ++ * not really a problem, as the driver will
3728 ++ * retry access, get another page fault on the
3729 ++ * missing page and call us again.
3730 ++ */
3731 ++ rc = get_vma_info(mm, dar, &vma_start, &vma_end,
3732 ++ &page_size);
3733 ++ if (rc)
3734 ++ return rc;
3735 + }
3736 +
3737 + rc = cxl_handle_mm_fault(mm, flags, dar);
3738 +- if (rc) {
3739 +- pr_err("cxl_handle_mm_fault failed %d", rc);
3740 +- rc = -EFAULT;
3741 +- goto out;
3742 +- }
3743 ++ if (rc)
3744 ++ return -EFAULT;
3745 + }
3746 +- rc = 0;
3747 +-out:
3748 +- up_read(&mm->mmap_sem);
3749 +- return rc;
3750 ++ return 0;
3751 + }
3752 + EXPORT_SYMBOL_GPL(cxllib_handle_fault);
3753 +diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
3754 +index 2cfb963d9f37..9c6f639d8a57 100644
3755 +--- a/drivers/mmc/core/block.c
3756 ++++ b/drivers/mmc/core/block.c
3757 +@@ -3087,6 +3087,7 @@ static void __exit mmc_blk_exit(void)
3758 + mmc_unregister_driver(&mmc_driver);
3759 + unregister_blkdev(MMC_BLOCK_MAJOR, "mmc");
3760 + unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES);
3761 ++ bus_unregister(&mmc_rpmb_bus_type);
3762 + }
3763 +
3764 + module_init(mmc_blk_init);
3765 +diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
3766 +index 712e08d9a45e..a0168e9e4fce 100644
3767 +--- a/drivers/mmc/host/jz4740_mmc.c
3768 ++++ b/drivers/mmc/host/jz4740_mmc.c
3769 +@@ -362,9 +362,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
3770 + host->irq_mask &= ~irq;
3771 + else
3772 + host->irq_mask |= irq;
3773 +- spin_unlock_irqrestore(&host->lock, flags);
3774 +
3775 + writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
3776 ++ spin_unlock_irqrestore(&host->lock, flags);
3777 + }
3778 +
3779 + static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
3780 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
3781 +index 82c4f05f91d8..0a0852524491 100644
3782 +--- a/drivers/mmc/host/sdhci-pci-core.c
3783 ++++ b/drivers/mmc/host/sdhci-pci-core.c
3784 +@@ -1318,7 +1318,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
3785 + pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
3786 + }
3787 +
3788 +-static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
3789 ++static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
3790 + {
3791 + struct sdhci_pci_slot *slot = sdhci_priv(host);
3792 + struct pci_dev *pdev = slot->chip->pdev;
3793 +@@ -1357,6 +1357,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
3794 + return 0;
3795 + }
3796 +
3797 ++static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
3798 ++{
3799 ++ struct sdhci_host *host = mmc_priv(mmc);
3800 ++
3801 ++ /* AMD requires custom HS200 tuning */
3802 ++ if (host->timing == MMC_TIMING_MMC_HS200)
3803 ++ return amd_execute_tuning_hs200(host, opcode);
3804 ++
3805 ++ /* Otherwise perform standard SDHCI tuning */
3806 ++ return sdhci_execute_tuning(mmc, opcode);
3807 ++}
3808 ++
3809 ++static int amd_probe_slot(struct sdhci_pci_slot *slot)
3810 ++{
3811 ++ struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
3812 ++
3813 ++ ops->execute_tuning = amd_execute_tuning;
3814 ++
3815 ++ return 0;
3816 ++}
3817 ++
3818 + static int amd_probe(struct sdhci_pci_chip *chip)
3819 + {
3820 + struct pci_dev *smbus_dev;
3821 +@@ -1391,12 +1412,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
3822 + .set_bus_width = sdhci_set_bus_width,
3823 + .reset = sdhci_reset,
3824 + .set_uhs_signaling = sdhci_set_uhs_signaling,
3825 +- .platform_execute_tuning = amd_execute_tuning,
3826 + };
3827 +
3828 + static const struct sdhci_pci_fixes sdhci_amd = {
3829 + .probe = amd_probe,
3830 + .ops = &amd_sdhci_pci_ops,
3831 ++ .probe_slot = amd_probe_slot,
3832 + };
3833 +
3834 + static const struct pci_device_id pci_ids[] = {
3835 +diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
3836 +index 33494241245a..8fce18253465 100644
3837 +--- a/drivers/mmc/host/tmio_mmc_core.c
3838 ++++ b/drivers/mmc/host/tmio_mmc_core.c
3839 +@@ -911,7 +911,7 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
3840 + host->check_scc_error(host);
3841 +
3842 + /* If SET_BLOCK_COUNT, continue with main command */
3843 +- if (host->mrq) {
3844 ++ if (host->mrq && !mrq->cmd->error) {
3845 + tmio_process_mrq(host, mrq);
3846 + return;
3847 + }
3848 +diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
3849 +index b1fc28f63882..d0b63bbf46a7 100644
3850 +--- a/drivers/mtd/ubi/block.c
3851 ++++ b/drivers/mtd/ubi/block.c
3852 +@@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
3853 + * in any case.
3854 + */
3855 + if (mode & FMODE_WRITE) {
3856 +- ret = -EPERM;
3857 ++ ret = -EROFS;
3858 + goto out_unlock;
3859 + }
3860 +
3861 +diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
3862 +index e941395de3ae..753494e042d5 100644
3863 +--- a/drivers/mtd/ubi/build.c
3864 ++++ b/drivers/mtd/ubi/build.c
3865 +@@ -854,6 +854,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
3866 + return -EINVAL;
3867 + }
3868 +
3869 ++ /*
3870 ++ * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
3871 ++ * MLC NAND is different and needs special care, otherwise UBI or UBIFS
3872 ++ * will die soon and you will lose all your data.
3873 ++ */
3874 ++ if (mtd->type == MTD_MLCNANDFLASH) {
3875 ++ pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
3876 ++ mtd->index);
3877 ++ return -EINVAL;
3878 ++ }
3879 ++
3880 + if (ubi_num == UBI_DEV_NUM_AUTO) {
3881 + /* Search for an empty slot in the @ubi_devices array */
3882 + for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
3883 +diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
3884 +index 590d967011bb..98f7d6be8d1f 100644
3885 +--- a/drivers/mtd/ubi/fastmap-wl.c
3886 ++++ b/drivers/mtd/ubi/fastmap-wl.c
3887 +@@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
3888 + {
3889 + int i;
3890 +
3891 +- flush_work(&ubi->fm_work);
3892 + return_unused_pool_pebs(ubi, &ubi->fm_pool);
3893 + return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
3894 +
3895 +diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
3896 +index f8913b8124b6..233907889f96 100644
3897 +--- a/drivers/nvdimm/dimm.c
3898 ++++ b/drivers/nvdimm/dimm.c
3899 +@@ -67,9 +67,11 @@ static int nvdimm_probe(struct device *dev)
3900 + ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
3901 + nd_label_copy(ndd, to_next_namespace_index(ndd),
3902 + to_current_namespace_index(ndd));
3903 +- rc = nd_label_reserve_dpa(ndd);
3904 +- if (ndd->ns_current >= 0)
3905 +- nvdimm_set_aliasing(dev);
3906 ++ if (ndd->ns_current >= 0) {
3907 ++ rc = nd_label_reserve_dpa(ndd);
3908 ++ if (rc == 0)
3909 ++ nvdimm_set_aliasing(dev);
3910 ++ }
3911 + nvdimm_clear_locked(dev);
3912 + nvdimm_bus_unlock(dev);
3913 +
3914 +diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
3915 +index 097794d9f786..175e200939b0 100644
3916 +--- a/drivers/nvdimm/dimm_devs.c
3917 ++++ b/drivers/nvdimm/dimm_devs.c
3918 +@@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
3919 + int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
3920 + {
3921 + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
3922 ++ int rc = validate_dimm(ndd), cmd_rc = 0;
3923 + struct nd_cmd_get_config_data_hdr *cmd;
3924 + struct nvdimm_bus_descriptor *nd_desc;
3925 +- int rc = validate_dimm(ndd);
3926 + u32 max_cmd_size, config_size;
3927 + size_t offset;
3928 +
3929 +@@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
3930 + cmd->in_offset = offset;
3931 + rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
3932 + ND_CMD_GET_CONFIG_DATA, cmd,
3933 +- cmd->in_length + sizeof(*cmd), NULL);
3934 +- if (rc || cmd->status) {
3935 +- rc = -ENXIO;
3936 ++ cmd->in_length + sizeof(*cmd), &cmd_rc);
3937 ++ if (rc < 0)
3938 ++ break;
3939 ++ if (cmd_rc < 0) {
3940 ++ rc = cmd_rc;
3941 + break;
3942 + }
3943 + memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
3944 +@@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
3945 + int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
3946 + void *buf, size_t len)
3947 + {
3948 +- int rc = validate_dimm(ndd);
3949 + size_t max_cmd_size, buf_offset;
3950 + struct nd_cmd_set_config_hdr *cmd;
3951 ++ int rc = validate_dimm(ndd), cmd_rc = 0;
3952 + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
3953 + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
3954 +
3955 +@@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
3956 + for (buf_offset = 0; len; len -= cmd->in_length,
3957 + buf_offset += cmd->in_length) {
3958 + size_t cmd_size;
3959 +- u32 *status;
3960 +
3961 + cmd->in_offset = offset + buf_offset;
3962 + cmd->in_length = min(max_cmd_size, len);
3963 +@@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
3964 +
3965 + /* status is output in the last 4-bytes of the command buffer */
3966 + cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
3967 +- status = ((void *) cmd) + cmd_size - sizeof(u32);
3968 +
3969 + rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
3970 +- ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL);
3971 +- if (rc || *status) {
3972 +- rc = rc ? rc : -ENXIO;
3973 ++ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
3974 ++ if (rc < 0)
3975 ++ break;
3976 ++ if (cmd_rc < 0) {
3977 ++ rc = cmd_rc;
3978 + break;
3979 + }
3980 + }
3981 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
3982 +index 658ada497be0..6747d899f46e 100644
3983 +--- a/drivers/nvdimm/namespace_devs.c
3984 ++++ b/drivers/nvdimm/namespace_devs.c
3985 +@@ -1926,7 +1926,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
3986 + }
3987 +
3988 + if (i < nd_region->ndr_mappings) {
3989 +- struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
3990 ++ struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
3991 +
3992 + /*
3993 + * Give up if we don't find an instance of a uuid at each
3994 +@@ -1934,7 +1934,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
3995 + * find a dimm with two instances of the same uuid.
3996 + */
3997 + dev_err(&nd_region->dev, "%s missing label for %pUb\n",
3998 +- dev_name(ndd->dev), nd_label->uuid);
3999 ++ nvdimm_name(nvdimm), nd_label->uuid);
4000 + rc = -EINVAL;
4001 + goto err;
4002 + }
4003 +diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
4004 +index e2198a2feeca..b45b375c0e6c 100644
4005 +--- a/drivers/pci/hotplug/acpiphp_glue.c
4006 ++++ b/drivers/pci/hotplug/acpiphp_glue.c
4007 +@@ -541,6 +541,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
4008 + {
4009 + unsigned long long sta = 0;
4010 + struct acpiphp_func *func;
4011 ++ u32 dvid;
4012 +
4013 + list_for_each_entry(func, &slot->funcs, sibling) {
4014 + if (func->flags & FUNC_HAS_STA) {
4015 +@@ -551,19 +552,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
4016 + if (ACPI_SUCCESS(status) && sta)
4017 + break;
4018 + } else {
4019 +- u32 dvid;
4020 +-
4021 +- pci_bus_read_config_dword(slot->bus,
4022 +- PCI_DEVFN(slot->device,
4023 +- func->function),
4024 +- PCI_VENDOR_ID, &dvid);
4025 +- if (dvid != 0xffffffff) {
4026 ++ if (pci_bus_read_dev_vendor_id(slot->bus,
4027 ++ PCI_DEVFN(slot->device, func->function),
4028 ++ &dvid, 0)) {
4029 + sta = ACPI_STA_ALL;
4030 + break;
4031 + }
4032 + }
4033 + }
4034 +
4035 ++ if (!sta) {
4036 ++ /*
4037 ++ * Check for the slot itself since it may be that the
4038 ++ * ACPI slot is a device below PCIe upstream port so in
4039 ++ * that case it may not even be reachable yet.
4040 ++ */
4041 ++ if (pci_bus_read_dev_vendor_id(slot->bus,
4042 ++ PCI_DEVFN(slot->device, 0), &dvid, 0)) {
4043 ++ sta = ACPI_STA_ALL;
4044 ++ }
4045 ++ }
4046 ++
4047 + return (unsigned int)sta;
4048 + }
4049 +
4050 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
4051 +index 46d47bd6ca1f..81241f981ad7 100644
4052 +--- a/drivers/pci/quirks.c
4053 ++++ b/drivers/pci/quirks.c
4054 +@@ -4815,9 +4815,13 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
4055 +
4056 + pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
4057 + }
4058 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
4059 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
4060 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
4061 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
4062 + DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
4063 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
4064 ++DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
4065 +
4066 + #ifdef CONFIG_PCI_ATS
4067 + /*
4068 +diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
4069 +index aa857be692cf..d5ae307ef4e1 100644
4070 +--- a/drivers/phy/allwinner/phy-sun4i-usb.c
4071 ++++ b/drivers/phy/allwinner/phy-sun4i-usb.c
4072 +@@ -410,11 +410,13 @@ static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
4073 + return true;
4074 +
4075 + /*
4076 +- * The A31 companion pmic (axp221) does not generate vbus change
4077 +- * interrupts when the board is driving vbus, so we must poll
4078 ++ * The A31/A23/A33 companion pmics (AXP221/AXP223) do not
4079 ++ * generate vbus change interrupts when the board is driving
4080 ++ * vbus using the N_VBUSEN pin on the pmic, so we must poll
4081 + * when using the pmic for vbus-det _and_ we're driving vbus.
4082 + */
4083 +- if (data->cfg->type == sun6i_a31_phy &&
4084 ++ if ((data->cfg->type == sun6i_a31_phy ||
4085 ++ data->cfg->type == sun8i_a33_phy) &&
4086 + data->vbus_power_supply && data->phys[0].regulator_on)
4087 + return true;
4088 +
4089 +@@ -885,7 +887,7 @@ static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
4090 +
4091 + static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
4092 + .num_phys = 2,
4093 +- .type = sun4i_a10_phy,
4094 ++ .type = sun6i_a31_phy,
4095 + .disc_thresh = 3,
4096 + .phyctl_offset = REG_PHYCTL_A10,
4097 + .dedicated_clocks = true,
4098 +diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
4099 +index f5d97e0ad52b..98b0a933a946 100644
4100 +--- a/drivers/pwm/pwm-mediatek.c
4101 ++++ b/drivers/pwm/pwm-mediatek.c
4102 +@@ -29,7 +29,9 @@
4103 + #define PWMGDUR 0x0c
4104 + #define PWMWAVENUM 0x28
4105 + #define PWMDWIDTH 0x2c
4106 ++#define PWM45DWIDTH_FIXUP 0x30
4107 + #define PWMTHRES 0x30
4108 ++#define PWM45THRES_FIXUP 0x34
4109 +
4110 + #define PWM_CLK_DIV_MAX 7
4111 +
4112 +@@ -54,6 +56,7 @@ static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = {
4113 +
4114 + struct mtk_pwm_platform_data {
4115 + unsigned int num_pwms;
4116 ++ bool pwm45_fixup;
4117 + };
4118 +
4119 + /**
4120 +@@ -66,6 +69,7 @@ struct mtk_pwm_chip {
4121 + struct pwm_chip chip;
4122 + void __iomem *regs;
4123 + struct clk *clks[MTK_CLK_MAX];
4124 ++ const struct mtk_pwm_platform_data *soc;
4125 + };
4126 +
4127 + static const unsigned int mtk_pwm_reg_offset[] = {
4128 +@@ -131,18 +135,25 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
4129 + {
4130 + struct mtk_pwm_chip *pc = to_mtk_pwm_chip(chip);
4131 + struct clk *clk = pc->clks[MTK_CLK_PWM1 + pwm->hwpwm];
4132 +- u32 resolution, clkdiv = 0;
4133 ++ u32 clkdiv = 0, cnt_period, cnt_duty, reg_width = PWMDWIDTH,
4134 ++ reg_thres = PWMTHRES;
4135 ++ u64 resolution;
4136 + int ret;
4137 +
4138 + ret = mtk_pwm_clk_enable(chip, pwm);
4139 + if (ret < 0)
4140 + return ret;
4141 +
4142 +- resolution = NSEC_PER_SEC / clk_get_rate(clk);
4143 ++ /* Using resolution in picosecond gets accuracy higher */
4144 ++ resolution = (u64)NSEC_PER_SEC * 1000;
4145 ++ do_div(resolution, clk_get_rate(clk));
4146 +
4147 +- while (period_ns / resolution > 8191) {
4148 ++ cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000, resolution);
4149 ++ while (cnt_period > 8191) {
4150 + resolution *= 2;
4151 + clkdiv++;
4152 ++ cnt_period = DIV_ROUND_CLOSEST_ULL((u64)period_ns * 1000,
4153 ++ resolution);
4154 + }
4155 +
4156 + if (clkdiv > PWM_CLK_DIV_MAX) {
4157 +@@ -151,9 +162,19 @@ static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
4158 + return -EINVAL;
4159 + }
4160 +
4161 ++ if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) {
4162 ++ /*
4163 ++ * PWM[4,5] has distinct offset for PWMDWIDTH and PWMTHRES
4164 ++ * from the other PWMs on MT7623.
4165 ++ */
4166 ++ reg_width = PWM45DWIDTH_FIXUP;
4167 ++ reg_thres = PWM45THRES_FIXUP;
4168 ++ }
4169 ++
4170 ++ cnt_duty = DIV_ROUND_CLOSEST_ULL((u64)duty_ns * 1000, resolution);
4171 + mtk_pwm_writel(pc, pwm->hwpwm, PWMCON, BIT(15) | clkdiv);
4172 +- mtk_pwm_writel(pc, pwm->hwpwm, PWMDWIDTH, period_ns / resolution);
4173 +- mtk_pwm_writel(pc, pwm->hwpwm, PWMTHRES, duty_ns / resolution);
4174 ++ mtk_pwm_writel(pc, pwm->hwpwm, reg_width, cnt_period);
4175 ++ mtk_pwm_writel(pc, pwm->hwpwm, reg_thres, cnt_duty);
4176 +
4177 + mtk_pwm_clk_disable(chip, pwm);
4178 +
4179 +@@ -211,6 +232,7 @@ static int mtk_pwm_probe(struct platform_device *pdev)
4180 + data = of_device_get_match_data(&pdev->dev);
4181 + if (data == NULL)
4182 + return -EINVAL;
4183 ++ pc->soc = data;
4184 +
4185 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4186 + pc->regs = devm_ioremap_resource(&pdev->dev, res);
4187 +@@ -251,14 +273,17 @@ static int mtk_pwm_remove(struct platform_device *pdev)
4188 +
4189 + static const struct mtk_pwm_platform_data mt2712_pwm_data = {
4190 + .num_pwms = 8,
4191 ++ .pwm45_fixup = false,
4192 + };
4193 +
4194 + static const struct mtk_pwm_platform_data mt7622_pwm_data = {
4195 + .num_pwms = 6,
4196 ++ .pwm45_fixup = false,
4197 + };
4198 +
4199 + static const struct mtk_pwm_platform_data mt7623_pwm_data = {
4200 + .num_pwms = 5,
4201 ++ .pwm45_fixup = true,
4202 + };
4203 +
4204 + static const struct of_device_id mtk_pwm_of_match[] = {
4205 +diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
4206 +index 1c85ecc9e7ac..0fcf94ffad32 100644
4207 +--- a/drivers/pwm/pwm-rcar.c
4208 ++++ b/drivers/pwm/pwm-rcar.c
4209 +@@ -156,8 +156,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
4210 + if (div < 0)
4211 + return div;
4212 +
4213 +- /* Let the core driver set pwm->period if disabled and duty_ns == 0 */
4214 +- if (!pwm_is_enabled(pwm) && !duty_ns)
4215 ++ /*
4216 ++ * Let the core driver set pwm->period if disabled and duty_ns == 0.
4217 ++ * But, this driver should prevent to set the new duty_ns if current
4218 ++ * duty_cycle is not set
4219 ++ */
4220 ++ if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
4221 + return 0;
4222 +
4223 + rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
4224 +diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
4225 +index 435ce5ec648a..59bd749c2f25 100644
4226 +--- a/drivers/soc/mediatek/mtk-scpsys.c
4227 ++++ b/drivers/soc/mediatek/mtk-scpsys.c
4228 +@@ -992,7 +992,7 @@ static int scpsys_probe(struct platform_device *pdev)
4229 +
4230 + pd_data = &scp->pd_data;
4231 +
4232 +- for (i = 0, sd = soc->subdomains ; i < soc->num_subdomains ; i++) {
4233 ++ for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) {
4234 + ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin],
4235 + pd_data->domains[sd->subdomain]);
4236 + if (ret && IS_ENABLED(CONFIG_PM))
4237 +diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
4238 +index 4a11fc0d4136..b7936f815373 100644
4239 +--- a/drivers/spi/spi-atmel.c
4240 ++++ b/drivers/spi/spi-atmel.c
4241 +@@ -1512,6 +1512,11 @@ static void atmel_spi_init(struct atmel_spi *as)
4242 + {
4243 + spi_writel(as, CR, SPI_BIT(SWRST));
4244 + spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
4245 ++
4246 ++ /* It is recommended to enable FIFOs first thing after reset */
4247 ++ if (as->fifo_size)
4248 ++ spi_writel(as, CR, SPI_BIT(FIFOEN));
4249 ++
4250 + if (as->caps.has_wdrbt) {
4251 + spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
4252 + | SPI_BIT(MSTR));
4253 +@@ -1522,9 +1527,6 @@ static void atmel_spi_init(struct atmel_spi *as)
4254 + if (as->use_pdc)
4255 + spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
4256 + spi_writel(as, CR, SPI_BIT(SPIEN));
4257 +-
4258 +- if (as->fifo_size)
4259 +- spi_writel(as, CR, SPI_BIT(FIFOEN));
4260 + }
4261 +
4262 + static int atmel_spi_probe(struct platform_device *pdev)
4263 +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
4264 +index b33a727a0158..7b213faa0a2b 100644
4265 +--- a/drivers/spi/spi.c
4266 ++++ b/drivers/spi/spi.c
4267 +@@ -779,8 +779,14 @@ static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
4268 + for (i = 0; i < sgs; i++) {
4269 +
4270 + if (vmalloced_buf || kmap_buf) {
4271 +- min = min_t(size_t,
4272 +- len, desc_len - offset_in_page(buf));
4273 ++ /*
4274 ++ * Next scatterlist entry size is the minimum between
4275 ++ * the desc_len and the remaining buffer length that
4276 ++ * fits in a page.
4277 ++ */
4278 ++ min = min_t(size_t, desc_len,
4279 ++ min_t(size_t, len,
4280 ++ PAGE_SIZE - offset_in_page(buf)));
4281 + if (vmalloced_buf)
4282 + vm_page = vmalloc_to_page(buf);
4283 + else
4284 +@@ -2254,12 +2260,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
4285 + mutex_lock(&board_lock);
4286 + found = idr_find(&spi_master_idr, id);
4287 + mutex_unlock(&board_lock);
4288 +- if (found != ctlr) {
4289 +- dev_dbg(&ctlr->dev,
4290 +- "attempting to delete unregistered controller [%s]\n",
4291 +- dev_name(&ctlr->dev));
4292 +- return;
4293 +- }
4294 + if (ctlr->queued) {
4295 + if (spi_destroy_queue(ctlr))
4296 + dev_err(&ctlr->dev, "queue remove failed\n");
4297 +@@ -2272,7 +2272,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
4298 + device_unregister(&ctlr->dev);
4299 + /* free bus id */
4300 + mutex_lock(&board_lock);
4301 +- idr_remove(&spi_master_idr, id);
4302 ++ if (found == ctlr)
4303 ++ idr_remove(&spi_master_idr, id);
4304 + mutex_unlock(&board_lock);
4305 + }
4306 + EXPORT_SYMBOL_GPL(spi_unregister_controller);
4307 +diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
4308 +index 4f9f9dca5e6a..545ef024841d 100644
4309 +--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
4310 ++++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
4311 +@@ -1279,7 +1279,10 @@ const struct v4l2_file_operations atomisp_fops = {
4312 + .mmap = atomisp_mmap,
4313 + .unlocked_ioctl = video_ioctl2,
4314 + #ifdef CONFIG_COMPAT
4315 ++ /*
4316 ++ * There are problems with this code. Disable this for now.
4317 + .compat_ioctl32 = atomisp_compat_ioctl32,
4318 ++ */
4319 + #endif
4320 + .poll = atomisp_poll,
4321 + };
4322 +@@ -1291,7 +1294,10 @@ const struct v4l2_file_operations atomisp_file_fops = {
4323 + .mmap = atomisp_file_mmap,
4324 + .unlocked_ioctl = video_ioctl2,
4325 + #ifdef CONFIG_COMPAT
4326 ++ /*
4327 ++ * There are problems with this code. Disable this for now.
4328 + .compat_ioctl32 = atomisp_compat_ioctl32,
4329 ++ */
4330 + #endif
4331 + .poll = atomisp_poll,
4332 + };
4333 +diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
4334 +index a67781b7a0b2..ee3a215b333a 100644
4335 +--- a/drivers/thermal/imx_thermal.c
4336 ++++ b/drivers/thermal/imx_thermal.c
4337 +@@ -637,6 +637,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
4338 + regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
4339 + regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
4340 +
4341 ++ data->irq_enabled = true;
4342 ++ data->mode = THERMAL_DEVICE_ENABLED;
4343 ++
4344 + ret = devm_request_threaded_irq(&pdev->dev, data->irq,
4345 + imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
4346 + 0, "imx_thermal", data);
4347 +@@ -649,9 +652,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
4348 + return ret;
4349 + }
4350 +
4351 +- data->irq_enabled = true;
4352 +- data->mode = THERMAL_DEVICE_ENABLED;
4353 +-
4354 + return 0;
4355 + }
4356 +
4357 +diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
4358 +index ab02d13f40b7..3e12cb8a23cc 100644
4359 +--- a/drivers/thunderbolt/icm.c
4360 ++++ b/drivers/thunderbolt/icm.c
4361 +@@ -383,6 +383,15 @@ static void remove_switch(struct tb_switch *sw)
4362 + tb_switch_remove(sw);
4363 + }
4364 +
4365 ++static void remove_xdomain(struct tb_xdomain *xd)
4366 ++{
4367 ++ struct tb_switch *sw;
4368 ++
4369 ++ sw = tb_to_switch(xd->dev.parent);
4370 ++ tb_port_at(xd->route, sw)->xdomain = NULL;
4371 ++ tb_xdomain_remove(xd);
4372 ++}
4373 ++
4374 + static void
4375 + icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
4376 + {
4377 +@@ -391,6 +400,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
4378 + struct tb_switch *sw, *parent_sw;
4379 + struct icm *icm = tb_priv(tb);
4380 + bool authorized = false;
4381 ++ struct tb_xdomain *xd;
4382 + u8 link, depth;
4383 + u64 route;
4384 + int ret;
4385 +@@ -467,6 +477,13 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr)
4386 + tb_switch_put(sw);
4387 + }
4388 +
4389 ++ /* Remove existing XDomain connection if found */
4390 ++ xd = tb_xdomain_find_by_link_depth(tb, link, depth);
4391 ++ if (xd) {
4392 ++ remove_xdomain(xd);
4393 ++ tb_xdomain_put(xd);
4394 ++ }
4395 ++
4396 + parent_sw = tb_switch_find_by_link_depth(tb, link, depth - 1);
4397 + if (!parent_sw) {
4398 + tb_err(tb, "failed to find parent switch for %u.%u\n",
4399 +@@ -529,15 +546,6 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr)
4400 + tb_switch_put(sw);
4401 + }
4402 +
4403 +-static void remove_xdomain(struct tb_xdomain *xd)
4404 +-{
4405 +- struct tb_switch *sw;
4406 +-
4407 +- sw = tb_to_switch(xd->dev.parent);
4408 +- tb_port_at(xd->route, sw)->xdomain = NULL;
4409 +- tb_xdomain_remove(xd);
4410 +-}
4411 +-
4412 + static void
4413 + icm_fr_xdomain_connected(struct tb *tb, const struct icm_pkg_header *hdr)
4414 + {
4415 +@@ -728,14 +736,14 @@ static bool icm_ar_is_supported(struct tb *tb)
4416 + static int icm_ar_get_mode(struct tb *tb)
4417 + {
4418 + struct tb_nhi *nhi = tb->nhi;
4419 +- int retries = 5;
4420 ++ int retries = 60;
4421 + u32 val;
4422 +
4423 + do {
4424 + val = ioread32(nhi->iobase + REG_FW_STS);
4425 + if (val & REG_FW_STS_NVM_AUTH_DONE)
4426 + break;
4427 +- msleep(30);
4428 ++ msleep(50);
4429 + } while (--retries);
4430 +
4431 + if (!retries) {
4432 +@@ -915,6 +923,9 @@ static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
4433 + struct icm *icm = tb_priv(tb);
4434 + u32 val;
4435 +
4436 ++ if (!icm->upstream_port)
4437 ++ return -ENODEV;
4438 ++
4439 + /* Put ARC to wait for CIO reset event to happen */
4440 + val = ioread32(nhi->iobase + REG_FW_STS);
4441 + val |= REG_FW_STS_CIO_RESET_REQ;
4442 +@@ -1054,6 +1065,9 @@ static int icm_firmware_init(struct tb *tb)
4443 + break;
4444 +
4445 + default:
4446 ++ if (ret < 0)
4447 ++ return ret;
4448 ++
4449 + tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
4450 + return -ENODEV;
4451 + }
4452 +diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
4453 +index f45bcbc63738..80c33c7404f5 100644
4454 +--- a/drivers/thunderbolt/nhi.c
4455 ++++ b/drivers/thunderbolt/nhi.c
4456 +@@ -1064,6 +1064,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
4457 + * we just disable hotplug, the
4458 + * pci-tunnels stay alive.
4459 + */
4460 ++ .thaw_noirq = nhi_resume_noirq,
4461 + .restore_noirq = nhi_resume_noirq,
4462 + .suspend = nhi_suspend,
4463 + .freeze = nhi_suspend,
4464 +diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
4465 +index da54ace4dd2f..1cc79785ce42 100644
4466 +--- a/drivers/thunderbolt/switch.c
4467 ++++ b/drivers/thunderbolt/switch.c
4468 +@@ -716,6 +716,13 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
4469 + if (sw->authorized)
4470 + goto unlock;
4471 +
4472 ++ /*
4473 ++ * Make sure there is no PCIe rescan ongoing when a new PCIe
4474 ++ * tunnel is created. Otherwise the PCIe rescan code might find
4475 ++ * the new tunnel too early.
4476 ++ */
4477 ++ pci_lock_rescan_remove();
4478 ++
4479 + switch (val) {
4480 + /* Approve switch */
4481 + case 1:
4482 +@@ -735,6 +742,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
4483 + break;
4484 + }
4485 +
4486 ++ pci_unlock_rescan_remove();
4487 ++
4488 + if (!ret) {
4489 + sw->authorized = val;
4490 + /* Notify status change to the userspace */
4491 +diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
4492 +index 83c14dda6300..bc8242bc4564 100644
4493 +--- a/drivers/usb/core/generic.c
4494 ++++ b/drivers/usb/core/generic.c
4495 +@@ -210,8 +210,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
4496 + if (!udev->parent)
4497 + rc = hcd_bus_suspend(udev, msg);
4498 +
4499 +- /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
4500 +- else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
4501 ++ /*
4502 ++ * Non-root USB2 devices don't need to do anything for FREEZE
4503 ++ * or PRETHAW. USB3 devices don't support global suspend and
4504 ++ * needs to be selectively suspended.
4505 ++ */
4506 ++ else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
4507 ++ && (udev->speed < USB_SPEED_SUPER))
4508 + rc = 0;
4509 + else
4510 + rc = usb_port_suspend(udev, msg);
4511 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
4512 +index e94bf91cc58a..df4569df7eaf 100644
4513 +--- a/drivers/usb/dwc3/core.c
4514 ++++ b/drivers/usb/dwc3/core.c
4515 +@@ -119,6 +119,9 @@ static void __dwc3_set_mode(struct work_struct *work)
4516 + if (dwc->dr_mode != USB_DR_MODE_OTG)
4517 + return;
4518 +
4519 ++ if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG)
4520 ++ return;
4521 ++
4522 + switch (dwc->current_dr_role) {
4523 + case DWC3_GCTL_PRTCAP_HOST:
4524 + dwc3_host_exit(dwc);
4525 +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
4526 +index 3ba11136ebf0..c961a94d136b 100644
4527 +--- a/drivers/usb/dwc3/dwc3-pci.c
4528 ++++ b/drivers/usb/dwc3/dwc3-pci.c
4529 +@@ -222,7 +222,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
4530 + ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
4531 + if (ret) {
4532 + dev_err(dev, "couldn't add resources to dwc3 device\n");
4533 +- return ret;
4534 ++ goto err;
4535 + }
4536 +
4537 + dwc->pci = pci;
4538 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
4539 +index 2bda4eb1e9ac..100454c514d5 100644
4540 +--- a/drivers/usb/dwc3/gadget.c
4541 ++++ b/drivers/usb/dwc3/gadget.c
4542 +@@ -166,18 +166,8 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
4543 + dwc3_ep_inc_trb(&dep->trb_dequeue);
4544 + }
4545 +
4546 +-/**
4547 +- * dwc3_gadget_giveback - call struct usb_request's ->complete callback
4548 +- * @dep: The endpoint to whom the request belongs to
4549 +- * @req: The request we're giving back
4550 +- * @status: completion code for the request
4551 +- *
4552 +- * Must be called with controller's lock held and interrupts disabled. This
4553 +- * function will unmap @req and call its ->complete() callback to notify upper
4554 +- * layers that it has completed.
4555 +- */
4556 +-void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
4557 +- int status)
4558 ++void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
4559 ++ struct dwc3_request *req, int status)
4560 + {
4561 + struct dwc3 *dwc = dep->dwc;
4562 +
4563 +@@ -190,18 +180,35 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
4564 +
4565 + if (req->trb)
4566 + usb_gadget_unmap_request_by_dev(dwc->sysdev,
4567 +- &req->request, req->direction);
4568 ++ &req->request, req->direction);
4569 +
4570 + req->trb = NULL;
4571 +-
4572 + trace_dwc3_gadget_giveback(req);
4573 +
4574 ++ if (dep->number > 1)
4575 ++ pm_runtime_put(dwc->dev);
4576 ++}
4577 ++
4578 ++/**
4579 ++ * dwc3_gadget_giveback - call struct usb_request's ->complete callback
4580 ++ * @dep: The endpoint to whom the request belongs to
4581 ++ * @req: The request we're giving back
4582 ++ * @status: completion code for the request
4583 ++ *
4584 ++ * Must be called with controller's lock held and interrupts disabled. This
4585 ++ * function will unmap @req and call its ->complete() callback to notify upper
4586 ++ * layers that it has completed.
4587 ++ */
4588 ++void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
4589 ++ int status)
4590 ++{
4591 ++ struct dwc3 *dwc = dep->dwc;
4592 ++
4593 ++ dwc3_gadget_del_and_unmap_request(dep, req, status);
4594 ++
4595 + spin_unlock(&dwc->lock);
4596 + usb_gadget_giveback_request(&dep->endpoint, &req->request);
4597 + spin_lock(&dwc->lock);
4598 +-
4599 +- if (dep->number > 1)
4600 +- pm_runtime_put(dwc->dev);
4601 + }
4602 +
4603 + /**
4604 +@@ -1227,7 +1234,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
4605 + if (req->trb)
4606 + memset(req->trb, 0, sizeof(struct dwc3_trb));
4607 + dep->queued_requests--;
4608 +- dwc3_gadget_giveback(dep, req, ret);
4609 ++ dwc3_gadget_del_and_unmap_request(dep, req, ret);
4610 + return ret;
4611 + }
4612 +
4613 +diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
4614 +index 4eb96b91cc40..e8f35db42394 100644
4615 +--- a/drivers/usb/gadget/function/f_midi.c
4616 ++++ b/drivers/usb/gadget/function/f_midi.c
4617 +@@ -404,7 +404,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
4618 + if (err) {
4619 + ERROR(midi, "%s: couldn't enqueue request: %d\n",
4620 + midi->out_ep->name, err);
4621 +- free_ep_req(midi->out_ep, req);
4622 ++ if (req->buf != NULL)
4623 ++ free_ep_req(midi->out_ep, req);
4624 + return err;
4625 + }
4626 + }
4627 +diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
4628 +index c3fbef2bb5db..09f90447fed5 100644
4629 +--- a/drivers/usb/gadget/u_f.h
4630 ++++ b/drivers/usb/gadget/u_f.h
4631 +@@ -61,7 +61,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
4632 + /* Frees a usb_request previously allocated by alloc_ep_req() */
4633 + static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
4634 + {
4635 ++ WARN_ON(req->buf == NULL);
4636 + kfree(req->buf);
4637 ++ req->buf = NULL;
4638 + usb_ep_free_request(ep, req);
4639 + }
4640 +
4641 +diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
4642 +index 1f8b19d9cf97..6a266687ca99 100644
4643 +--- a/drivers/usb/gadget/udc/core.c
4644 ++++ b/drivers/usb/gadget/udc/core.c
4645 +@@ -238,6 +238,9 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request);
4646 + * arranges to poll once per interval, and the gadget driver usually will
4647 + * have queued some data to transfer at that time.
4648 + *
4649 ++ * Note that @req's ->complete() callback must never be called from
4650 ++ * within usb_ep_queue() as that can create deadlock situations.
4651 ++ *
4652 + * Returns zero, or a negative error code. Endpoints that are not enabled
4653 + * report errors; errors will also be
4654 + * reported when the usb peripheral is disconnected.
4655 +diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
4656 +index 18da4873e52e..91a5027b5c1f 100644
4657 +--- a/drivers/usb/musb/musb_gadget_ep0.c
4658 ++++ b/drivers/usb/musb/musb_gadget_ep0.c
4659 +@@ -89,15 +89,19 @@ static int service_tx_status_request(
4660 + }
4661 +
4662 + is_in = epnum & USB_DIR_IN;
4663 +- if (is_in) {
4664 +- epnum &= 0x0f;
4665 ++ epnum &= 0x0f;
4666 ++ if (epnum >= MUSB_C_NUM_EPS) {
4667 ++ handled = -EINVAL;
4668 ++ break;
4669 ++ }
4670 ++
4671 ++ if (is_in)
4672 + ep = &musb->endpoints[epnum].ep_in;
4673 +- } else {
4674 ++ else
4675 + ep = &musb->endpoints[epnum].ep_out;
4676 +- }
4677 + regs = musb->endpoints[epnum].regs;
4678 +
4679 +- if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
4680 ++ if (!ep->desc) {
4681 + handled = -EINVAL;
4682 + break;
4683 + }
4684 +diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
4685 +index 3a33c5344bd5..9a1c761258ce 100644
4686 +--- a/drivers/watchdog/f71808e_wdt.c
4687 ++++ b/drivers/watchdog/f71808e_wdt.c
4688 +@@ -496,7 +496,7 @@ static bool watchdog_is_running(void)
4689 +
4690 + is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
4691 + && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
4692 +- & F71808FG_FLAG_WD_EN);
4693 ++ & BIT(F71808FG_FLAG_WD_EN));
4694 +
4695 + superio_exit(watchdog.sioaddr);
4696 +
4697 +diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
4698 +index 82e8f6edfb48..b12e37f27530 100644
4699 +--- a/fs/autofs4/root.c
4700 ++++ b/fs/autofs4/root.c
4701 +@@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
4702 +
4703 + autofs4_del_active(dentry);
4704 +
4705 +- inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
4706 ++ inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
4707 + if (!inode)
4708 + return -ENOMEM;
4709 + d_add(dentry, inode);
4710 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
4711 +index c6ec5aa46100..236313efd347 100644
4712 +--- a/fs/ceph/inode.c
4713 ++++ b/fs/ceph/inode.c
4714 +@@ -660,13 +660,15 @@ void ceph_fill_file_time(struct inode *inode, int issued,
4715 + CEPH_CAP_FILE_BUFFER|
4716 + CEPH_CAP_AUTH_EXCL|
4717 + CEPH_CAP_XATTR_EXCL)) {
4718 +- if (timespec_compare(ctime, &inode->i_ctime) > 0) {
4719 ++ if (ci->i_version == 0 ||
4720 ++ timespec_compare(ctime, &inode->i_ctime) > 0) {
4721 + dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
4722 + inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
4723 + ctime->tv_sec, ctime->tv_nsec);
4724 + inode->i_ctime = *ctime;
4725 + }
4726 +- if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
4727 ++ if (ci->i_version == 0 ||
4728 ++ ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
4729 + /* the MDS did a utimes() */
4730 + dout("mtime %ld.%09ld -> %ld.%09ld "
4731 + "tw %d -> %d\n",
4732 +@@ -786,7 +788,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
4733 + new_issued = ~issued & le32_to_cpu(info->cap.caps);
4734 +
4735 + /* update inode */
4736 +- ci->i_version = le64_to_cpu(info->version);
4737 + inode->i_rdev = le32_to_cpu(info->rdev);
4738 + inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
4739 +
4740 +@@ -857,6 +858,9 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
4741 + xattr_blob = NULL;
4742 + }
4743 +
4744 ++ /* finally update i_version */
4745 ++ ci->i_version = le64_to_cpu(info->version);
4746 ++
4747 + inode->i_mapping->a_ops = &ceph_aops;
4748 +
4749 + switch (inode->i_mode & S_IFMT) {
4750 +diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
4751 +index 687da62daf4e..e901ef6a4813 100644
4752 +--- a/fs/cifs/Kconfig
4753 ++++ b/fs/cifs/Kconfig
4754 +@@ -189,6 +189,7 @@ config CIFS_NFSD_EXPORT
4755 + config CIFS_SMB311
4756 + bool "SMB3.1.1 network file system support (Experimental)"
4757 + depends on CIFS
4758 ++ select CRYPTO_SHA512
4759 +
4760 + help
4761 + This enables experimental support for the newest, SMB3.1.1, dialect.
4762 +diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
4763 +index f2b0a7f124da..a6ef088e057b 100644
4764 +--- a/fs/cifs/cifsencrypt.c
4765 ++++ b/fs/cifs/cifsencrypt.c
4766 +@@ -36,37 +36,6 @@
4767 + #include <crypto/skcipher.h>
4768 + #include <crypto/aead.h>
4769 +
4770 +-static int
4771 +-cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
4772 +-{
4773 +- int rc;
4774 +- unsigned int size;
4775 +-
4776 +- if (server->secmech.sdescmd5 != NULL)
4777 +- return 0; /* already allocated */
4778 +-
4779 +- server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
4780 +- if (IS_ERR(server->secmech.md5)) {
4781 +- cifs_dbg(VFS, "could not allocate crypto md5\n");
4782 +- rc = PTR_ERR(server->secmech.md5);
4783 +- server->secmech.md5 = NULL;
4784 +- return rc;
4785 +- }
4786 +-
4787 +- size = sizeof(struct shash_desc) +
4788 +- crypto_shash_descsize(server->secmech.md5);
4789 +- server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
4790 +- if (!server->secmech.sdescmd5) {
4791 +- crypto_free_shash(server->secmech.md5);
4792 +- server->secmech.md5 = NULL;
4793 +- return -ENOMEM;
4794 +- }
4795 +- server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
4796 +- server->secmech.sdescmd5->shash.flags = 0x0;
4797 +-
4798 +- return 0;
4799 +-}
4800 +-
4801 + int __cifs_calc_signature(struct smb_rqst *rqst,
4802 + struct TCP_Server_Info *server, char *signature,
4803 + struct shash_desc *shash)
4804 +@@ -132,13 +101,10 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
4805 + if (!rqst->rq_iov || !signature || !server)
4806 + return -EINVAL;
4807 +
4808 +- if (!server->secmech.sdescmd5) {
4809 +- rc = cifs_crypto_shash_md5_allocate(server);
4810 +- if (rc) {
4811 +- cifs_dbg(VFS, "%s: Can't alloc md5 crypto\n", __func__);
4812 +- return -1;
4813 +- }
4814 +- }
4815 ++ rc = cifs_alloc_hash("md5", &server->secmech.md5,
4816 ++ &server->secmech.sdescmd5);
4817 ++ if (rc)
4818 ++ return -1;
4819 +
4820 + rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
4821 + if (rc) {
4822 +@@ -663,37 +629,6 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
4823 + return rc;
4824 + }
4825 +
4826 +-static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
4827 +-{
4828 +- int rc;
4829 +- unsigned int size;
4830 +-
4831 +- /* check if already allocated */
4832 +- if (server->secmech.sdeschmacmd5)
4833 +- return 0;
4834 +-
4835 +- server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
4836 +- if (IS_ERR(server->secmech.hmacmd5)) {
4837 +- cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
4838 +- rc = PTR_ERR(server->secmech.hmacmd5);
4839 +- server->secmech.hmacmd5 = NULL;
4840 +- return rc;
4841 +- }
4842 +-
4843 +- size = sizeof(struct shash_desc) +
4844 +- crypto_shash_descsize(server->secmech.hmacmd5);
4845 +- server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
4846 +- if (!server->secmech.sdeschmacmd5) {
4847 +- crypto_free_shash(server->secmech.hmacmd5);
4848 +- server->secmech.hmacmd5 = NULL;
4849 +- return -ENOMEM;
4850 +- }
4851 +- server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5;
4852 +- server->secmech.sdeschmacmd5->shash.flags = 0x0;
4853 +-
4854 +- return 0;
4855 +-}
4856 +-
4857 + int
4858 + setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
4859 + {
4860 +@@ -757,9 +692,10 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
4861 +
4862 + mutex_lock(&ses->server->srv_mutex);
4863 +
4864 +- rc = crypto_hmacmd5_alloc(ses->server);
4865 ++ rc = cifs_alloc_hash("hmac(md5)",
4866 ++ &ses->server->secmech.hmacmd5,
4867 ++ &ses->server->secmech.sdeschmacmd5);
4868 + if (rc) {
4869 +- cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
4870 + goto unlock;
4871 + }
4872 +
4873 +@@ -893,6 +829,11 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
4874 + server->secmech.md5 = NULL;
4875 + }
4876 +
4877 ++ if (server->secmech.sha512) {
4878 ++ crypto_free_shash(server->secmech.sha512);
4879 ++ server->secmech.sha512 = NULL;
4880 ++ }
4881 ++
4882 + if (server->secmech.hmacmd5) {
4883 + crypto_free_shash(server->secmech.hmacmd5);
4884 + server->secmech.hmacmd5 = NULL;
4885 +@@ -916,4 +857,6 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
4886 + server->secmech.sdeschmacmd5 = NULL;
4887 + kfree(server->secmech.sdescmd5);
4888 + server->secmech.sdescmd5 = NULL;
4889 ++ kfree(server->secmech.sdescsha512);
4890 ++ server->secmech.sdescsha512 = NULL;
4891 + }
4892 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
4893 +index 32cdea67bbfd..f715609b13f3 100644
4894 +--- a/fs/cifs/cifsfs.c
4895 ++++ b/fs/cifs/cifsfs.c
4896 +@@ -1486,6 +1486,7 @@ MODULE_SOFTDEP("pre: nls");
4897 + MODULE_SOFTDEP("pre: aes");
4898 + MODULE_SOFTDEP("pre: cmac");
4899 + MODULE_SOFTDEP("pre: sha256");
4900 ++MODULE_SOFTDEP("pre: sha512");
4901 + MODULE_SOFTDEP("pre: aead2");
4902 + MODULE_SOFTDEP("pre: ccm");
4903 + module_init(init_cifs)
4904 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
4905 +index 48f7c197cd2d..edc640db0842 100644
4906 +--- a/fs/cifs/cifsglob.h
4907 ++++ b/fs/cifs/cifsglob.h
4908 +@@ -130,10 +130,12 @@ struct cifs_secmech {
4909 + struct crypto_shash *md5; /* md5 hash function */
4910 + struct crypto_shash *hmacsha256; /* hmac-sha256 hash function */
4911 + struct crypto_shash *cmacaes; /* block-cipher based MAC function */
4912 ++ struct crypto_shash *sha512; /* sha512 hash function */
4913 + struct sdesc *sdeschmacmd5; /* ctxt to generate ntlmv2 hash, CR1 */
4914 + struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */
4915 + struct sdesc *sdeschmacsha256; /* ctxt to generate smb2 signature */
4916 + struct sdesc *sdesccmacaes; /* ctxt to generate smb3 signature */
4917 ++ struct sdesc *sdescsha512; /* ctxt to generate smb3.11 signing key */
4918 + struct crypto_aead *ccmaesencrypt; /* smb3 encryption aead */
4919 + struct crypto_aead *ccmaesdecrypt; /* smb3 decryption aead */
4920 + };
4921 +@@ -673,7 +675,8 @@ struct TCP_Server_Info {
4922 + unsigned int max_read;
4923 + unsigned int max_write;
4924 + #ifdef CONFIG_CIFS_SMB311
4925 +- __u8 preauth_sha_hash[64]; /* save initital negprot hash */
4926 ++ /* save initital negprot hash */
4927 ++ __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
4928 + #endif /* 3.1.1 */
4929 + struct delayed_work reconnect; /* reconnect workqueue job */
4930 + struct mutex reconnect_mutex; /* prevent simultaneous reconnects */
4931 +@@ -862,7 +865,7 @@ struct cifs_ses {
4932 + __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
4933 + __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
4934 + #ifdef CONFIG_CIFS_SMB311
4935 +- __u8 preauth_sha_hash[64];
4936 ++ __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
4937 + #endif /* 3.1.1 */
4938 + };
4939 +
4940 +@@ -1466,6 +1469,7 @@ struct dfs_info3_param {
4941 + #define CIFS_FATTR_NEED_REVAL 0x4
4942 + #define CIFS_FATTR_INO_COLLISION 0x8
4943 + #define CIFS_FATTR_UNKNOWN_NLINK 0x10
4944 ++#define CIFS_FATTR_FAKE_ROOT_INO 0x20
4945 +
4946 + struct cifs_fattr {
4947 + u32 cf_flags;
4948 +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
4949 +index 93d565186698..365a414a75e9 100644
4950 +--- a/fs/cifs/cifsproto.h
4951 ++++ b/fs/cifs/cifsproto.h
4952 +@@ -542,4 +542,9 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
4953 + struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
4954 + void cifs_aio_ctx_release(struct kref *refcount);
4955 + int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
4956 ++
4957 ++int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
4958 ++ struct sdesc **sdesc);
4959 ++void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
4960 ++
4961 + #endif /* _CIFSPROTO_H */
4962 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
4963 +index 8f9a8cc7cc62..ef8580139cef 100644
4964 +--- a/fs/cifs/inode.c
4965 ++++ b/fs/cifs/inode.c
4966 +@@ -707,6 +707,18 @@ cifs_get_file_info(struct file *filp)
4967 + return rc;
4968 + }
4969 +
4970 ++/* Simple function to return a 64 bit hash of string. Rarely called */
4971 ++static __u64 simple_hashstr(const char *str)
4972 ++{
4973 ++ const __u64 hash_mult = 1125899906842597L; /* a big enough prime */
4974 ++ __u64 hash = 0;
4975 ++
4976 ++ while (*str)
4977 ++ hash = (hash + (__u64) *str++) * hash_mult;
4978 ++
4979 ++ return hash;
4980 ++}
4981 ++
4982 + int
4983 + cifs_get_inode_info(struct inode **inode, const char *full_path,
4984 + FILE_ALL_INFO *data, struct super_block *sb, int xid,
4985 +@@ -816,6 +828,14 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
4986 + tmprc);
4987 + fattr.cf_uniqueid = iunique(sb, ROOT_I);
4988 + cifs_autodisable_serverino(cifs_sb);
4989 ++ } else if ((fattr.cf_uniqueid == 0) &&
4990 ++ strlen(full_path) == 0) {
4991 ++ /* some servers ret bad root ino ie 0 */
4992 ++ cifs_dbg(FYI, "Invalid (0) inodenum\n");
4993 ++ fattr.cf_flags |=
4994 ++ CIFS_FATTR_FAKE_ROOT_INO;
4995 ++ fattr.cf_uniqueid =
4996 ++ simple_hashstr(tcon->treeName);
4997 + }
4998 + }
4999 + } else
5000 +@@ -832,6 +852,16 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
5001 + &fattr.cf_uniqueid, data);
5002 + if (tmprc)
5003 + fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
5004 ++ else if ((fattr.cf_uniqueid == 0) &&
5005 ++ strlen(full_path) == 0) {
5006 ++ /*
5007 ++ * Reuse existing root inode num since
5008 ++ * inum zero for root causes ls of . and .. to
5009 ++ * not be returned
5010 ++ */
5011 ++ cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
5012 ++ fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
5013 ++ }
5014 + } else
5015 + fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
5016 + }
5017 +@@ -893,6 +923,9 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
5018 + }
5019 +
5020 + cgii_exit:
5021 ++ if ((*inode) && ((*inode)->i_ino == 0))
5022 ++ cifs_dbg(FYI, "inode number of zero returned\n");
5023 ++
5024 + kfree(buf);
5025 + cifs_put_tlink(tlink);
5026 + return rc;
5027 +diff --git a/fs/cifs/link.c b/fs/cifs/link.c
5028 +index 60b5a11ee11b..889a840172eb 100644
5029 +--- a/fs/cifs/link.c
5030 ++++ b/fs/cifs/link.c
5031 +@@ -50,25 +50,12 @@ static int
5032 + symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
5033 + {
5034 + int rc;
5035 +- unsigned int size;
5036 +- struct crypto_shash *md5;
5037 +- struct sdesc *sdescmd5;
5038 +-
5039 +- md5 = crypto_alloc_shash("md5", 0, 0);
5040 +- if (IS_ERR(md5)) {
5041 +- rc = PTR_ERR(md5);
5042 +- cifs_dbg(VFS, "%s: Crypto md5 allocation error %d\n",
5043 +- __func__, rc);
5044 +- return rc;
5045 +- }
5046 +- size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
5047 +- sdescmd5 = kmalloc(size, GFP_KERNEL);
5048 +- if (!sdescmd5) {
5049 +- rc = -ENOMEM;
5050 ++ struct crypto_shash *md5 = NULL;
5051 ++ struct sdesc *sdescmd5 = NULL;
5052 ++
5053 ++ rc = cifs_alloc_hash("md5", &md5, &sdescmd5);
5054 ++ if (rc)
5055 + goto symlink_hash_err;
5056 +- }
5057 +- sdescmd5->shash.tfm = md5;
5058 +- sdescmd5->shash.flags = 0x0;
5059 +
5060 + rc = crypto_shash_init(&sdescmd5->shash);
5061 + if (rc) {
5062 +@@ -85,9 +72,7 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
5063 + cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
5064 +
5065 + symlink_hash_err:
5066 +- crypto_free_shash(md5);
5067 +- kfree(sdescmd5);
5068 +-
5069 ++ cifs_free_hash(&md5, &sdescmd5);
5070 + return rc;
5071 + }
5072 +
5073 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
5074 +index a0dbced4a45c..460084a8eac5 100644
5075 +--- a/fs/cifs/misc.c
5076 ++++ b/fs/cifs/misc.c
5077 +@@ -848,3 +848,57 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
5078 + iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
5079 + return 0;
5080 + }
5081 ++
5082 ++/**
5083 ++ * cifs_alloc_hash - allocate hash and hash context together
5084 ++ *
5085 ++ * The caller has to make sure @sdesc is initialized to either NULL or
5086 ++ * a valid context. Both can be freed via cifs_free_hash().
5087 ++ */
5088 ++int
5089 ++cifs_alloc_hash(const char *name,
5090 ++ struct crypto_shash **shash, struct sdesc **sdesc)
5091 ++{
5092 ++ int rc = 0;
5093 ++ size_t size;
5094 ++
5095 ++ if (*sdesc != NULL)
5096 ++ return 0;
5097 ++
5098 ++ *shash = crypto_alloc_shash(name, 0, 0);
5099 ++ if (IS_ERR(*shash)) {
5100 ++ cifs_dbg(VFS, "could not allocate crypto %s\n", name);
5101 ++ rc = PTR_ERR(*shash);
5102 ++ *shash = NULL;
5103 ++ *sdesc = NULL;
5104 ++ return rc;
5105 ++ }
5106 ++
5107 ++ size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
5108 ++ *sdesc = kmalloc(size, GFP_KERNEL);
5109 ++ if (*sdesc == NULL) {
5110 ++ cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
5111 ++ crypto_free_shash(*shash);
5112 ++ *shash = NULL;
5113 ++ return -ENOMEM;
5114 ++ }
5115 ++
5116 ++ (*sdesc)->shash.tfm = *shash;
5117 ++ (*sdesc)->shash.flags = 0x0;
5118 ++ return 0;
5119 ++}
5120 ++
5121 ++/**
5122 ++ * cifs_free_hash - free hash and hash context together
5123 ++ *
5124 ++ * Freeing a NULL hash or context is safe.
5125 ++ */
5126 ++void
5127 ++cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
5128 ++{
5129 ++ kfree(*sdesc);
5130 ++ *sdesc = NULL;
5131 ++ if (*shash)
5132 ++ crypto_free_shash(*shash);
5133 ++ *shash = NULL;
5134 ++}
5135 +diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
5136 +index 76d03abaa38c..da012c3ab700 100644
5137 +--- a/fs/cifs/smb2misc.c
5138 ++++ b/fs/cifs/smb2misc.c
5139 +@@ -706,3 +706,67 @@ smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
5140 +
5141 + return 0;
5142 + }
5143 ++
5144 ++#ifdef CONFIG_CIFS_SMB311
5145 ++/**
5146 ++ * smb311_update_preauth_hash - update @ses hash with the packet data in @iov
5147 ++ *
5148 ++ * Assumes @iov does not contain the rfc1002 length and iov[0] has the
5149 ++ * SMB2 header.
5150 ++ */
5151 ++int
5152 ++smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
5153 ++{
5154 ++ int i, rc;
5155 ++ struct sdesc *d;
5156 ++ struct smb2_sync_hdr *hdr;
5157 ++
5158 ++ if (ses->server->tcpStatus == CifsGood) {
5159 ++ /* skip non smb311 connections */
5160 ++ if (ses->server->dialect != SMB311_PROT_ID)
5161 ++ return 0;
5162 ++
5163 ++ /* skip last sess setup response */
5164 ++ hdr = (struct smb2_sync_hdr *)iov[0].iov_base;
5165 ++ if (hdr->Flags & SMB2_FLAGS_SIGNED)
5166 ++ return 0;
5167 ++ }
5168 ++
5169 ++ rc = smb311_crypto_shash_allocate(ses->server);
5170 ++ if (rc)
5171 ++ return rc;
5172 ++
5173 ++ d = ses->server->secmech.sdescsha512;
5174 ++ rc = crypto_shash_init(&d->shash);
5175 ++ if (rc) {
5176 ++ cifs_dbg(VFS, "%s: could not init sha512 shash\n", __func__);
5177 ++ return rc;
5178 ++ }
5179 ++
5180 ++ rc = crypto_shash_update(&d->shash, ses->preauth_sha_hash,
5181 ++ SMB2_PREAUTH_HASH_SIZE);
5182 ++ if (rc) {
5183 ++ cifs_dbg(VFS, "%s: could not update sha512 shash\n", __func__);
5184 ++ return rc;
5185 ++ }
5186 ++
5187 ++ for (i = 0; i < nvec; i++) {
5188 ++ rc = crypto_shash_update(&d->shash,
5189 ++ iov[i].iov_base, iov[i].iov_len);
5190 ++ if (rc) {
5191 ++ cifs_dbg(VFS, "%s: could not update sha512 shash\n",
5192 ++ __func__);
5193 ++ return rc;
5194 ++ }
5195 ++ }
5196 ++
5197 ++ rc = crypto_shash_final(&d->shash, ses->preauth_sha_hash);
5198 ++ if (rc) {
5199 ++ cifs_dbg(VFS, "%s: could not finalize sha512 shash\n",
5200 ++ __func__);
5201 ++ return rc;
5202 ++ }
5203 ++
5204 ++ return 0;
5205 ++}
5206 ++#endif
5207 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5208 +index eb68e2fcc500..dfd6fb02b7a3 100644
5209 +--- a/fs/cifs/smb2ops.c
5210 ++++ b/fs/cifs/smb2ops.c
5211 +@@ -2066,6 +2066,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, struct smb_rqst *old_rq)
5212 + inc_rfc1001_len(tr_hdr, orig_len);
5213 + }
5214 +
5215 ++/* We can not use the normal sg_set_buf() as we will sometimes pass a
5216 ++ * stack object as buf.
5217 ++ */
5218 ++static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
5219 ++ unsigned int buflen)
5220 ++{
5221 ++ sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
5222 ++}
5223 ++
5224 + static struct scatterlist *
5225 + init_sg(struct smb_rqst *rqst, u8 *sign)
5226 + {
5227 +@@ -2080,16 +2089,16 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
5228 + return NULL;
5229 +
5230 + sg_init_table(sg, sg_len);
5231 +- sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len);
5232 ++ smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len);
5233 + for (i = 1; i < rqst->rq_nvec; i++)
5234 +- sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
5235 ++ smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
5236 + rqst->rq_iov[i].iov_len);
5237 + for (j = 0; i < sg_len - 1; i++, j++) {
5238 + unsigned int len = (j < rqst->rq_npages - 1) ? rqst->rq_pagesz
5239 + : rqst->rq_tailsz;
5240 + sg_set_page(&sg[i], rqst->rq_pages[j], len, 0);
5241 + }
5242 +- sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
5243 ++ smb2_sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
5244 + return sg;
5245 + }
5246 +
5247 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5248 +index 63778ac22fd9..af62c75b17c4 100644
5249 +--- a/fs/cifs/smb2pdu.c
5250 ++++ b/fs/cifs/smb2pdu.c
5251 +@@ -453,6 +453,10 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
5252 + return rc;
5253 +
5254 + req->sync_hdr.SessionId = 0;
5255 ++#ifdef CONFIG_CIFS_SMB311
5256 ++ memset(server->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
5257 ++ memset(ses->preauth_sha_hash, 0, SMB2_PREAUTH_HASH_SIZE);
5258 ++#endif
5259 +
5260 + if (strcmp(ses->server->vals->version_string,
5261 + SMB3ANY_VERSION_STRING) == 0) {
5262 +@@ -564,6 +568,15 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
5263 +
5264 + /* BB: add check that dialect was valid given dialect(s) we asked for */
5265 +
5266 ++#ifdef CONFIG_CIFS_SMB311
5267 ++ /*
5268 ++ * Keep a copy of the hash after negprot. This hash will be
5269 ++ * the starting hash value for all sessions made from this
5270 ++ * server.
5271 ++ */
5272 ++ memcpy(server->preauth_sha_hash, ses->preauth_sha_hash,
5273 ++ SMB2_PREAUTH_HASH_SIZE);
5274 ++#endif
5275 + /* SMB2 only has an extended negflavor */
5276 + server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
5277 + /* set it to the maximum buffer size value we can send with 1 credit */
5278 +@@ -621,6 +634,10 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
5279 + return 0;
5280 + #endif
5281 +
5282 ++ /* In SMB3.11 preauth integrity supersedes validate negotiate */
5283 ++ if (tcon->ses->server->dialect == SMB311_PROT_ID)
5284 ++ return 0;
5285 ++
5286 + /*
5287 + * validation ioctl must be signed, so no point sending this if we
5288 + * can not sign it (ie are not known user). Even if signing is not
5289 +@@ -1148,6 +1165,14 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
5290 + sess_data->buf0_type = CIFS_NO_BUFFER;
5291 + sess_data->nls_cp = (struct nls_table *) nls_cp;
5292 +
5293 ++#ifdef CONFIG_CIFS_SMB311
5294 ++ /*
5295 ++ * Initialize the session hash with the server one.
5296 ++ */
5297 ++ memcpy(ses->preauth_sha_hash, ses->server->preauth_sha_hash,
5298 ++ SMB2_PREAUTH_HASH_SIZE);
5299 ++#endif
5300 ++
5301 + while (sess_data->func)
5302 + sess_data->func(sess_data);
5303 +
5304 +@@ -1280,6 +1305,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
5305 + iov[1].iov_base = unc_path;
5306 + iov[1].iov_len = unc_path_len;
5307 +
5308 ++ /* 3.11 tcon req must be signed if not encrypted. See MS-SMB2 3.2.4.1.1 */
5309 ++ if ((ses->server->dialect == SMB311_PROT_ID) &&
5310 ++ !encryption_required(tcon))
5311 ++ req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
5312 ++
5313 + rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
5314 + cifs_small_buf_release(req);
5315 + rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
5316 +@@ -1738,8 +1768,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
5317 + rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
5318 + &name_len,
5319 + tcon->treeName, path);
5320 +- if (rc)
5321 ++ if (rc) {
5322 ++ cifs_small_buf_release(req);
5323 + return rc;
5324 ++ }
5325 + req->NameLength = cpu_to_le16(name_len * 2);
5326 + uni_path_len = copy_size;
5327 + path = copy_path;
5328 +@@ -1750,8 +1782,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
5329 + if (uni_path_len % 8 != 0) {
5330 + copy_size = roundup(uni_path_len, 8);
5331 + copy_path = kzalloc(copy_size, GFP_KERNEL);
5332 +- if (!copy_path)
5333 ++ if (!copy_path) {
5334 ++ cifs_small_buf_release(req);
5335 + return -ENOMEM;
5336 ++ }
5337 + memcpy((char *)copy_path, (const char *)path,
5338 + uni_path_len);
5339 + uni_path_len = copy_size;
5340 +diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
5341 +index 2a2b34ccaf49..8b901c69a65a 100644
5342 +--- a/fs/cifs/smb2pdu.h
5343 ++++ b/fs/cifs/smb2pdu.h
5344 +@@ -264,6 +264,7 @@ struct smb2_negotiate_req {
5345 + #define SMB311_SALT_SIZE 32
5346 + /* Hash Algorithm Types */
5347 + #define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
5348 ++#define SMB2_PREAUTH_HASH_SIZE 64
5349 +
5350 + struct smb2_preauth_neg_context {
5351 + __le16 ContextType; /* 1 */
5352 +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
5353 +index 05287b01f596..cbcce3f7e86f 100644
5354 +--- a/fs/cifs/smb2proto.h
5355 ++++ b/fs/cifs/smb2proto.h
5356 +@@ -202,4 +202,9 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
5357 +
5358 + extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
5359 + enum securityEnum);
5360 ++#ifdef CONFIG_CIFS_SMB311
5361 ++extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
5362 ++extern int smb311_update_preauth_hash(struct cifs_ses *ses,
5363 ++ struct kvec *iov, int nvec);
5364 ++#endif
5365 + #endif /* _SMB2PROTO_H */
5366 +diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
5367 +index 99493946e2f9..bf49cb73b9e6 100644
5368 +--- a/fs/cifs/smb2transport.c
5369 ++++ b/fs/cifs/smb2transport.c
5370 +@@ -43,77 +43,62 @@
5371 + static int
5372 + smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
5373 + {
5374 +- int rc;
5375 +- unsigned int size;
5376 ++ return cifs_alloc_hash("hmac(sha256)",
5377 ++ &server->secmech.hmacsha256,
5378 ++ &server->secmech.sdeschmacsha256);
5379 ++}
5380 +
5381 +- if (server->secmech.sdeschmacsha256 != NULL)
5382 +- return 0; /* already allocated */
5383 ++static int
5384 ++smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
5385 ++{
5386 ++ struct cifs_secmech *p = &server->secmech;
5387 ++ int rc;
5388 +
5389 +- server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
5390 +- if (IS_ERR(server->secmech.hmacsha256)) {
5391 +- cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
5392 +- rc = PTR_ERR(server->secmech.hmacsha256);
5393 +- server->secmech.hmacsha256 = NULL;
5394 +- return rc;
5395 +- }
5396 ++ rc = cifs_alloc_hash("hmac(sha256)",
5397 ++ &p->hmacsha256,
5398 ++ &p->sdeschmacsha256);
5399 ++ if (rc)
5400 ++ goto err;
5401 +
5402 +- size = sizeof(struct shash_desc) +
5403 +- crypto_shash_descsize(server->secmech.hmacsha256);
5404 +- server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL);
5405 +- if (!server->secmech.sdeschmacsha256) {
5406 +- crypto_free_shash(server->secmech.hmacsha256);
5407 +- server->secmech.hmacsha256 = NULL;
5408 +- return -ENOMEM;
5409 +- }
5410 +- server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256;
5411 +- server->secmech.sdeschmacsha256->shash.flags = 0x0;
5412 ++ rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
5413 ++ if (rc)
5414 ++ goto err;
5415 +
5416 + return 0;
5417 ++err:
5418 ++ cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
5419 ++ return rc;
5420 + }
5421 +
5422 +-static int
5423 +-smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
5424 ++#ifdef CONFIG_CIFS_SMB311
5425 ++int
5426 ++smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
5427 + {
5428 +- unsigned int size;
5429 +- int rc;
5430 +-
5431 +- if (server->secmech.sdesccmacaes != NULL)
5432 +- return 0; /* already allocated */
5433 ++ struct cifs_secmech *p = &server->secmech;
5434 ++ int rc = 0;
5435 +
5436 +- rc = smb2_crypto_shash_allocate(server);
5437 ++ rc = cifs_alloc_hash("hmac(sha256)",
5438 ++ &p->hmacsha256,
5439 ++ &p->sdeschmacsha256);
5440 + if (rc)
5441 + return rc;
5442 +
5443 +- server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0);
5444 +- if (IS_ERR(server->secmech.cmacaes)) {
5445 +- cifs_dbg(VFS, "could not allocate crypto cmac-aes");
5446 +- kfree(server->secmech.sdeschmacsha256);
5447 +- server->secmech.sdeschmacsha256 = NULL;
5448 +- crypto_free_shash(server->secmech.hmacsha256);
5449 +- server->secmech.hmacsha256 = NULL;
5450 +- rc = PTR_ERR(server->secmech.cmacaes);
5451 +- server->secmech.cmacaes = NULL;
5452 +- return rc;
5453 +- }
5454 ++ rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
5455 ++ if (rc)
5456 ++ goto err;
5457 +
5458 +- size = sizeof(struct shash_desc) +
5459 +- crypto_shash_descsize(server->secmech.cmacaes);
5460 +- server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL);
5461 +- if (!server->secmech.sdesccmacaes) {
5462 +- cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__);
5463 +- kfree(server->secmech.sdeschmacsha256);
5464 +- server->secmech.sdeschmacsha256 = NULL;
5465 +- crypto_free_shash(server->secmech.hmacsha256);
5466 +- crypto_free_shash(server->secmech.cmacaes);
5467 +- server->secmech.hmacsha256 = NULL;
5468 +- server->secmech.cmacaes = NULL;
5469 +- return -ENOMEM;
5470 +- }
5471 +- server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes;
5472 +- server->secmech.sdesccmacaes->shash.flags = 0x0;
5473 ++ rc = cifs_alloc_hash("sha512", &p->sha512, &p->sdescsha512);
5474 ++ if (rc)
5475 ++ goto err;
5476 +
5477 + return 0;
5478 ++
5479 ++err:
5480 ++ cifs_free_hash(&p->cmacaes, &p->sdesccmacaes);
5481 ++ cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
5482 ++ return rc;
5483 + }
5484 ++#endif
5485 +
5486 + static struct cifs_ses *
5487 + smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
5488 +@@ -457,7 +442,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
5489 + cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
5490 + return rc;
5491 + }
5492 +-
5493 ++
5494 + rc = __cifs_calc_signature(rqst, server, sigptr,
5495 + &server->secmech.sdesccmacaes->shash);
5496 +
5497 +diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
5498 +index 91710eb571fb..52cccdbb7e14 100644
5499 +--- a/fs/cifs/smbdirect.c
5500 ++++ b/fs/cifs/smbdirect.c
5501 +@@ -862,6 +862,8 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
5502 + ib_dma_unmap_single(info->id->device, request->sge[0].addr,
5503 + request->sge[0].length, DMA_TO_DEVICE);
5504 +
5505 ++ smbd_disconnect_rdma_connection(info);
5506 ++
5507 + dma_mapping_failed:
5508 + mempool_free(request, info->request_mempool);
5509 + return rc;
5510 +@@ -1061,6 +1063,7 @@ static int smbd_post_send(struct smbd_connection *info,
5511 + if (atomic_dec_and_test(&info->send_pending))
5512 + wake_up(&info->wait_send_pending);
5513 + }
5514 ++ smbd_disconnect_rdma_connection(info);
5515 + } else
5516 + /* Reset timer for idle connection after packet is sent */
5517 + mod_delayed_work(info->workqueue, &info->idle_timer_work,
5518 +@@ -1202,7 +1205,7 @@ static int smbd_post_recv(
5519 + if (rc) {
5520 + ib_dma_unmap_single(info->id->device, response->sge.addr,
5521 + response->sge.length, DMA_FROM_DEVICE);
5522 +-
5523 ++ smbd_disconnect_rdma_connection(info);
5524 + log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
5525 + }
5526 +
5527 +@@ -1498,8 +1501,8 @@ int smbd_reconnect(struct TCP_Server_Info *server)
5528 + log_rdma_event(INFO, "reconnecting rdma session\n");
5529 +
5530 + if (!server->smbd_conn) {
5531 +- log_rdma_event(ERR, "rdma session already destroyed\n");
5532 +- return -EINVAL;
5533 ++ log_rdma_event(INFO, "rdma session already destroyed\n");
5534 ++ goto create_conn;
5535 + }
5536 +
5537 + /*
5538 +@@ -1512,15 +1515,19 @@ int smbd_reconnect(struct TCP_Server_Info *server)
5539 + }
5540 +
5541 + /* wait until the transport is destroyed */
5542 +- wait_event(server->smbd_conn->wait_destroy,
5543 +- server->smbd_conn->transport_status == SMBD_DESTROYED);
5544 ++ if (!wait_event_timeout(server->smbd_conn->wait_destroy,
5545 ++ server->smbd_conn->transport_status == SMBD_DESTROYED, 5*HZ))
5546 ++ return -EAGAIN;
5547 +
5548 + destroy_workqueue(server->smbd_conn->workqueue);
5549 + kfree(server->smbd_conn);
5550 +
5551 ++create_conn:
5552 + log_rdma_event(INFO, "creating rdma session\n");
5553 + server->smbd_conn = smbd_get_connection(
5554 + server, (struct sockaddr *) &server->dstaddr);
5555 ++ log_rdma_event(INFO, "created rdma session info=%p\n",
5556 ++ server->smbd_conn);
5557 +
5558 + return server->smbd_conn ? 0 : -ENOENT;
5559 + }
5560 +@@ -2542,6 +2549,8 @@ struct smbd_mr *smbd_register_mr(
5561 + if (atomic_dec_and_test(&info->mr_used_count))
5562 + wake_up(&info->wait_for_mr_cleanup);
5563 +
5564 ++ smbd_disconnect_rdma_connection(info);
5565 ++
5566 + return NULL;
5567 + }
5568 +
5569 +diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
5570 +index c12bffefa3c9..a0b80ac651a6 100644
5571 +--- a/fs/cifs/smbencrypt.c
5572 ++++ b/fs/cifs/smbencrypt.c
5573 +@@ -121,25 +121,12 @@ int
5574 + mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
5575 + {
5576 + int rc;
5577 +- unsigned int size;
5578 +- struct crypto_shash *md4;
5579 +- struct sdesc *sdescmd4;
5580 +-
5581 +- md4 = crypto_alloc_shash("md4", 0, 0);
5582 +- if (IS_ERR(md4)) {
5583 +- rc = PTR_ERR(md4);
5584 +- cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n",
5585 +- __func__, rc);
5586 +- return rc;
5587 +- }
5588 +- size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
5589 +- sdescmd4 = kmalloc(size, GFP_KERNEL);
5590 +- if (!sdescmd4) {
5591 +- rc = -ENOMEM;
5592 ++ struct crypto_shash *md4 = NULL;
5593 ++ struct sdesc *sdescmd4 = NULL;
5594 ++
5595 ++ rc = cifs_alloc_hash("md4", &md4, &sdescmd4);
5596 ++ if (rc)
5597 + goto mdfour_err;
5598 +- }
5599 +- sdescmd4->shash.tfm = md4;
5600 +- sdescmd4->shash.flags = 0x0;
5601 +
5602 + rc = crypto_shash_init(&sdescmd4->shash);
5603 + if (rc) {
5604 +@@ -156,9 +143,7 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
5605 + cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__);
5606 +
5607 + mdfour_err:
5608 +- crypto_free_shash(md4);
5609 +- kfree(sdescmd4);
5610 +-
5611 ++ cifs_free_hash(&md4, &sdescmd4);
5612 + return rc;
5613 + }
5614 +
5615 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
5616 +index 9779b3292d8e..665661464067 100644
5617 +--- a/fs/cifs/transport.c
5618 ++++ b/fs/cifs/transport.c
5619 +@@ -37,6 +37,7 @@
5620 + #include "cifsglob.h"
5621 + #include "cifsproto.h"
5622 + #include "cifs_debug.h"
5623 ++#include "smb2proto.h"
5624 + #include "smbdirect.h"
5625 +
5626 + /* Max number of iovectors we can use off the stack when sending requests. */
5627 +@@ -751,6 +752,12 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
5628 + if (rc < 0)
5629 + goto out;
5630 +
5631 ++#ifdef CONFIG_CIFS_SMB311
5632 ++ if (ses->status == CifsNew)
5633 ++ smb311_update_preauth_hash(ses, rqst->rq_iov+1,
5634 ++ rqst->rq_nvec-1);
5635 ++#endif
5636 ++
5637 + if (timeout == CIFS_ASYNC_OP)
5638 + goto out;
5639 +
5640 +@@ -789,6 +796,16 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
5641 + else
5642 + *resp_buf_type = CIFS_SMALL_BUFFER;
5643 +
5644 ++#ifdef CONFIG_CIFS_SMB311
5645 ++ if (ses->status == CifsNew) {
5646 ++ struct kvec iov = {
5647 ++ .iov_base = buf + 4,
5648 ++ .iov_len = get_rfc1002_length(buf)
5649 ++ };
5650 ++ smb311_update_preauth_hash(ses, &iov, 1);
5651 ++ }
5652 ++#endif
5653 ++
5654 + credits = ses->server->ops->get_credits(midQ);
5655 +
5656 + rc = ses->server->ops->check_receive(midQ, ses->server,
5657 +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
5658 +index f9b3e0a83526..f82c4966f4ce 100644
5659 +--- a/fs/ext4/balloc.c
5660 ++++ b/fs/ext4/balloc.c
5661 +@@ -243,8 +243,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
5662 + */
5663 + ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
5664 + sb->s_blocksize * 8, bh->b_data);
5665 +- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
5666 +- ext4_group_desc_csum_set(sb, block_group, gdp);
5667 + return 0;
5668 + }
5669 +
5670 +@@ -448,6 +446,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
5671 + err = ext4_init_block_bitmap(sb, bh, block_group, desc);
5672 + set_bitmap_uptodate(bh);
5673 + set_buffer_uptodate(bh);
5674 ++ set_buffer_verified(bh);
5675 + ext4_unlock_group(sb, block_group);
5676 + unlock_buffer(bh);
5677 + if (err) {
5678 +diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
5679 +index da87cf757f7d..e2902d394f1b 100644
5680 +--- a/fs/ext4/dir.c
5681 ++++ b/fs/ext4/dir.c
5682 +@@ -365,13 +365,15 @@ static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
5683 + {
5684 + struct inode *inode = file->f_mapping->host;
5685 + int dx_dir = is_dx_dir(inode);
5686 +- loff_t htree_max = ext4_get_htree_eof(file);
5687 ++ loff_t ret, htree_max = ext4_get_htree_eof(file);
5688 +
5689 + if (likely(dx_dir))
5690 +- return generic_file_llseek_size(file, offset, whence,
5691 ++ ret = generic_file_llseek_size(file, offset, whence,
5692 + htree_max, htree_max);
5693 + else
5694 +- return ext4_llseek(file, offset, whence);
5695 ++ ret = ext4_llseek(file, offset, whence);
5696 ++ file->f_version = inode_peek_iversion(inode) - 1;
5697 ++ return ret;
5698 + }
5699 +
5700 + /*
5701 +diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
5702 +index 2d593201cf7a..7c70b08d104c 100644
5703 +--- a/fs/ext4/ext4_jbd2.c
5704 ++++ b/fs/ext4/ext4_jbd2.c
5705 +@@ -166,13 +166,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
5706 + might_sleep();
5707 +
5708 + if (ext4_handle_valid(handle)) {
5709 +- struct super_block *sb;
5710 +-
5711 +- sb = handle->h_transaction->t_journal->j_private;
5712 +- if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) {
5713 +- jbd2_journal_abort_handle(handle);
5714 +- return -EIO;
5715 +- }
5716 + err = jbd2_journal_get_write_access(handle, bh);
5717 + if (err)
5718 + ext4_journal_abort_handle(where, line, __func__, bh,
5719 +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
5720 +index 7830d28df331..3fa93665b4a3 100644
5721 +--- a/fs/ext4/ialloc.c
5722 ++++ b/fs/ext4/ialloc.c
5723 +@@ -66,44 +66,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
5724 + memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
5725 + }
5726 +
5727 +-/* Initializes an uninitialized inode bitmap */
5728 +-static int ext4_init_inode_bitmap(struct super_block *sb,
5729 +- struct buffer_head *bh,
5730 +- ext4_group_t block_group,
5731 +- struct ext4_group_desc *gdp)
5732 +-{
5733 +- struct ext4_group_info *grp;
5734 +- struct ext4_sb_info *sbi = EXT4_SB(sb);
5735 +- J_ASSERT_BH(bh, buffer_locked(bh));
5736 +-
5737 +- /* If checksum is bad mark all blocks and inodes use to prevent
5738 +- * allocation, essentially implementing a per-group read-only flag. */
5739 +- if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
5740 +- grp = ext4_get_group_info(sb, block_group);
5741 +- if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
5742 +- percpu_counter_sub(&sbi->s_freeclusters_counter,
5743 +- grp->bb_free);
5744 +- set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
5745 +- if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
5746 +- int count;
5747 +- count = ext4_free_inodes_count(sb, gdp);
5748 +- percpu_counter_sub(&sbi->s_freeinodes_counter,
5749 +- count);
5750 +- }
5751 +- set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
5752 +- return -EFSBADCRC;
5753 +- }
5754 +-
5755 +- memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
5756 +- ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
5757 +- bh->b_data);
5758 +- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
5759 +- EXT4_INODES_PER_GROUP(sb) / 8);
5760 +- ext4_group_desc_csum_set(sb, block_group, gdp);
5761 +-
5762 +- return 0;
5763 +-}
5764 +-
5765 + void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
5766 + {
5767 + if (uptodate) {
5768 +@@ -187,17 +149,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
5769 +
5770 + ext4_lock_group(sb, block_group);
5771 + if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
5772 +- err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
5773 ++ memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
5774 ++ ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
5775 ++ sb->s_blocksize * 8, bh->b_data);
5776 + set_bitmap_uptodate(bh);
5777 + set_buffer_uptodate(bh);
5778 + set_buffer_verified(bh);
5779 + ext4_unlock_group(sb, block_group);
5780 + unlock_buffer(bh);
5781 +- if (err) {
5782 +- ext4_error(sb, "Failed to init inode bitmap for group "
5783 +- "%u: %d", block_group, err);
5784 +- goto out;
5785 +- }
5786 + return bh;
5787 + }
5788 + ext4_unlock_group(sb, block_group);
5789 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5790 +index c94780075b04..3350454fc5a7 100644
5791 +--- a/fs/ext4/inode.c
5792 ++++ b/fs/ext4/inode.c
5793 +@@ -3524,7 +3524,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
5794 + iomap->flags |= IOMAP_F_DIRTY;
5795 + iomap->bdev = inode->i_sb->s_bdev;
5796 + iomap->dax_dev = sbi->s_daxdev;
5797 +- iomap->offset = first_block << blkbits;
5798 ++ iomap->offset = (u64)first_block << blkbits;
5799 + iomap->length = (u64)map.m_len << blkbits;
5800 +
5801 + if (ret == 0) {
5802 +@@ -3658,7 +3658,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
5803 + {
5804 + struct file *file = iocb->ki_filp;
5805 + struct inode *inode = file->f_mapping->host;
5806 +- struct ext4_inode_info *ei = EXT4_I(inode);
5807 + ssize_t ret;
5808 + loff_t offset = iocb->ki_pos;
5809 + size_t count = iov_iter_count(iter);
5810 +@@ -3682,7 +3681,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
5811 + goto out;
5812 + }
5813 + orphan = 1;
5814 +- ei->i_disksize = inode->i_size;
5815 ++ ext4_update_i_disksize(inode, inode->i_size);
5816 + ext4_journal_stop(handle);
5817 + }
5818 +
5819 +@@ -3790,7 +3789,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
5820 + if (ret > 0) {
5821 + loff_t end = offset + ret;
5822 + if (end > inode->i_size) {
5823 +- ei->i_disksize = end;
5824 ++ ext4_update_i_disksize(inode, end);
5825 + i_size_write(inode, end);
5826 + /*
5827 + * We're going to return a positive `ret'
5828 +@@ -4746,6 +4745,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
5829 + goto bad_inode;
5830 + raw_inode = ext4_raw_inode(&iloc);
5831 +
5832 ++ if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
5833 ++ EXT4_ERROR_INODE(inode, "root inode unallocated");
5834 ++ ret = -EFSCORRUPTED;
5835 ++ goto bad_inode;
5836 ++ }
5837 ++
5838 + if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
5839 + ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
5840 + if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
5841 +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
5842 +index 7e99ad02f1ba..be8d78472ef8 100644
5843 +--- a/fs/ext4/ioctl.c
5844 ++++ b/fs/ext4/ioctl.c
5845 +@@ -492,15 +492,13 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
5846 + set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
5847 + if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
5848 + (void) ext4_force_commit(sb);
5849 +- jbd2_journal_abort(sbi->s_journal, 0);
5850 ++ jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
5851 + }
5852 + break;
5853 + case EXT4_GOING_FLAGS_NOLOGFLUSH:
5854 + set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
5855 +- if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
5856 +- msleep(100);
5857 +- jbd2_journal_abort(sbi->s_journal, 0);
5858 +- }
5859 ++ if (sbi->s_journal && !is_journal_aborted(sbi->s_journal))
5860 ++ jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
5861 + break;
5862 + default:
5863 + return -EINVAL;
5864 +diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5865 +index 39bf464c35f1..192c5ad09d71 100644
5866 +--- a/fs/ext4/super.c
5867 ++++ b/fs/ext4/super.c
5868 +@@ -2333,6 +2333,8 @@ static int ext4_check_descriptors(struct super_block *sb,
5869 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
5870 + "Block bitmap for group %u overlaps "
5871 + "superblock", i);
5872 ++ if (!sb_rdonly(sb))
5873 ++ return 0;
5874 + }
5875 + if (block_bitmap < first_block || block_bitmap > last_block) {
5876 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
5877 +@@ -2345,6 +2347,8 @@ static int ext4_check_descriptors(struct super_block *sb,
5878 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
5879 + "Inode bitmap for group %u overlaps "
5880 + "superblock", i);
5881 ++ if (!sb_rdonly(sb))
5882 ++ return 0;
5883 + }
5884 + if (inode_bitmap < first_block || inode_bitmap > last_block) {
5885 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
5886 +@@ -2357,6 +2361,8 @@ static int ext4_check_descriptors(struct super_block *sb,
5887 + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
5888 + "Inode table for group %u overlaps "
5889 + "superblock", i);
5890 ++ if (!sb_rdonly(sb))
5891 ++ return 0;
5892 + }
5893 + if (inode_table < first_block ||
5894 + inode_table + sbi->s_itb_per_group - 1 > last_block) {
5895 +@@ -3490,15 +3496,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
5896 + }
5897 +
5898 + /* Load the checksum driver */
5899 +- if (ext4_has_feature_metadata_csum(sb) ||
5900 +- ext4_has_feature_ea_inode(sb)) {
5901 +- sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
5902 +- if (IS_ERR(sbi->s_chksum_driver)) {
5903 +- ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
5904 +- ret = PTR_ERR(sbi->s_chksum_driver);
5905 +- sbi->s_chksum_driver = NULL;
5906 +- goto failed_mount;
5907 +- }
5908 ++ sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
5909 ++ if (IS_ERR(sbi->s_chksum_driver)) {
5910 ++ ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
5911 ++ ret = PTR_ERR(sbi->s_chksum_driver);
5912 ++ sbi->s_chksum_driver = NULL;
5913 ++ goto failed_mount;
5914 + }
5915 +
5916 + /* Check superblock checksum */
5917 +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
5918 +index 63656dbafdc4..499cb4b1fbd2 100644
5919 +--- a/fs/ext4/xattr.c
5920 ++++ b/fs/ext4/xattr.c
5921 +@@ -195,10 +195,13 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
5922 +
5923 + /* Check the values */
5924 + while (!IS_LAST_ENTRY(entry)) {
5925 +- if (entry->e_value_size != 0 &&
5926 +- entry->e_value_inum == 0) {
5927 ++ u32 size = le32_to_cpu(entry->e_value_size);
5928 ++
5929 ++ if (size > EXT4_XATTR_SIZE_MAX)
5930 ++ return -EFSCORRUPTED;
5931 ++
5932 ++ if (size != 0 && entry->e_value_inum == 0) {
5933 + u16 offs = le16_to_cpu(entry->e_value_offs);
5934 +- u32 size = le32_to_cpu(entry->e_value_size);
5935 + void *value;
5936 +
5937 + /*
5938 +@@ -222,25 +225,36 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
5939 + }
5940 +
5941 + static inline int
5942 +-ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
5943 ++__ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
5944 ++ const char *function, unsigned int line)
5945 + {
5946 +- int error;
5947 ++ int error = -EFSCORRUPTED;
5948 +
5949 + if (buffer_verified(bh))
5950 + return 0;
5951 +
5952 + if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
5953 + BHDR(bh)->h_blocks != cpu_to_le32(1))
5954 +- return -EFSCORRUPTED;
5955 ++ goto errout;
5956 ++ error = -EFSBADCRC;
5957 + if (!ext4_xattr_block_csum_verify(inode, bh))
5958 +- return -EFSBADCRC;
5959 ++ goto errout;
5960 + error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
5961 + bh->b_data);
5962 +- if (!error)
5963 ++errout:
5964 ++ if (error)
5965 ++ __ext4_error_inode(inode, function, line, 0,
5966 ++ "corrupted xattr block %llu",
5967 ++ (unsigned long long) bh->b_blocknr);
5968 ++ else
5969 + set_buffer_verified(bh);
5970 + return error;
5971 + }
5972 +
5973 ++#define ext4_xattr_check_block(inode, bh) \
5974 ++ __ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
5975 ++
5976 ++
5977 + static int
5978 + __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
5979 + void *end, const char *function, unsigned int line)
5980 +@@ -262,18 +276,22 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
5981 + __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
5982 +
5983 + static int
5984 +-ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
5985 +- const char *name, int sorted)
5986 ++xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
5987 ++ void *end, int name_index, const char *name, int sorted)
5988 + {
5989 +- struct ext4_xattr_entry *entry;
5990 ++ struct ext4_xattr_entry *entry, *next;
5991 + size_t name_len;
5992 + int cmp = 1;
5993 +
5994 + if (name == NULL)
5995 + return -EINVAL;
5996 + name_len = strlen(name);
5997 +- entry = *pentry;
5998 +- for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
5999 ++ for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) {
6000 ++ next = EXT4_XATTR_NEXT(entry);
6001 ++ if ((void *) next >= end) {
6002 ++ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
6003 ++ return -EFSCORRUPTED;
6004 ++ }
6005 + cmp = name_index - entry->e_name_index;
6006 + if (!cmp)
6007 + cmp = name_len - entry->e_name_len;
6008 +@@ -495,6 +513,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
6009 + struct buffer_head *bh = NULL;
6010 + struct ext4_xattr_entry *entry;
6011 + size_t size;
6012 ++ void *end;
6013 + int error;
6014 + struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
6015 +
6016 +@@ -511,20 +530,20 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
6017 + goto cleanup;
6018 + ea_bdebug(bh, "b_count=%d, refcount=%d",
6019 + atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
6020 +- if (ext4_xattr_check_block(inode, bh)) {
6021 +- EXT4_ERROR_INODE(inode, "bad block %llu",
6022 +- EXT4_I(inode)->i_file_acl);
6023 +- error = -EFSCORRUPTED;
6024 ++ error = ext4_xattr_check_block(inode, bh);
6025 ++ if (error)
6026 + goto cleanup;
6027 +- }
6028 + ext4_xattr_block_cache_insert(ea_block_cache, bh);
6029 + entry = BFIRST(bh);
6030 +- error = ext4_xattr_find_entry(&entry, name_index, name, 1);
6031 ++ end = bh->b_data + bh->b_size;
6032 ++ error = xattr_find_entry(inode, &entry, end, name_index, name, 1);
6033 + if (error)
6034 + goto cleanup;
6035 + size = le32_to_cpu(entry->e_value_size);
6036 ++ error = -ERANGE;
6037 ++ if (unlikely(size > EXT4_XATTR_SIZE_MAX))
6038 ++ goto cleanup;
6039 + if (buffer) {
6040 +- error = -ERANGE;
6041 + if (size > buffer_size)
6042 + goto cleanup;
6043 + if (entry->e_value_inum) {
6044 +@@ -533,8 +552,12 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
6045 + if (error)
6046 + goto cleanup;
6047 + } else {
6048 +- memcpy(buffer, bh->b_data +
6049 +- le16_to_cpu(entry->e_value_offs), size);
6050 ++ u16 offset = le16_to_cpu(entry->e_value_offs);
6051 ++ void *p = bh->b_data + offset;
6052 ++
6053 ++ if (unlikely(p + size > end))
6054 ++ goto cleanup;
6055 ++ memcpy(buffer, p, size);
6056 + }
6057 + }
6058 + error = size;
6059 +@@ -568,12 +591,14 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
6060 + if (error)
6061 + goto cleanup;
6062 + entry = IFIRST(header);
6063 +- error = ext4_xattr_find_entry(&entry, name_index, name, 0);
6064 ++ error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
6065 + if (error)
6066 + goto cleanup;
6067 + size = le32_to_cpu(entry->e_value_size);
6068 ++ error = -ERANGE;
6069 ++ if (unlikely(size > EXT4_XATTR_SIZE_MAX))
6070 ++ goto cleanup;
6071 + if (buffer) {
6072 +- error = -ERANGE;
6073 + if (size > buffer_size)
6074 + goto cleanup;
6075 + if (entry->e_value_inum) {
6076 +@@ -582,8 +607,12 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
6077 + if (error)
6078 + goto cleanup;
6079 + } else {
6080 +- memcpy(buffer, (void *)IFIRST(header) +
6081 +- le16_to_cpu(entry->e_value_offs), size);
6082 ++ u16 offset = le16_to_cpu(entry->e_value_offs);
6083 ++ void *p = (void *)IFIRST(header) + offset;
6084 ++
6085 ++ if (unlikely(p + size > end))
6086 ++ goto cleanup;
6087 ++ memcpy(buffer, p, size);
6088 + }
6089 + }
6090 + error = size;
6091 +@@ -676,12 +705,9 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
6092 + goto cleanup;
6093 + ea_bdebug(bh, "b_count=%d, refcount=%d",
6094 + atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
6095 +- if (ext4_xattr_check_block(inode, bh)) {
6096 +- EXT4_ERROR_INODE(inode, "bad block %llu",
6097 +- EXT4_I(inode)->i_file_acl);
6098 +- error = -EFSCORRUPTED;
6099 ++ error = ext4_xattr_check_block(inode, bh);
6100 ++ if (error)
6101 + goto cleanup;
6102 +- }
6103 + ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
6104 + error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
6105 +
6106 +@@ -808,10 +834,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
6107 + goto out;
6108 + }
6109 +
6110 +- if (ext4_xattr_check_block(inode, bh)) {
6111 +- ret = -EFSCORRUPTED;
6112 ++ ret = ext4_xattr_check_block(inode, bh);
6113 ++ if (ret)
6114 + goto out;
6115 +- }
6116 +
6117 + for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
6118 + entry = EXT4_XATTR_NEXT(entry))
6119 +@@ -1793,19 +1818,16 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
6120 + ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
6121 + atomic_read(&(bs->bh->b_count)),
6122 + le32_to_cpu(BHDR(bs->bh)->h_refcount));
6123 +- if (ext4_xattr_check_block(inode, bs->bh)) {
6124 +- EXT4_ERROR_INODE(inode, "bad block %llu",
6125 +- EXT4_I(inode)->i_file_acl);
6126 +- error = -EFSCORRUPTED;
6127 ++ error = ext4_xattr_check_block(inode, bs->bh);
6128 ++ if (error)
6129 + goto cleanup;
6130 +- }
6131 + /* Find the named attribute. */
6132 + bs->s.base = BHDR(bs->bh);
6133 + bs->s.first = BFIRST(bs->bh);
6134 + bs->s.end = bs->bh->b_data + bs->bh->b_size;
6135 + bs->s.here = bs->s.first;
6136 +- error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
6137 +- i->name, 1);
6138 ++ error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
6139 ++ i->name_index, i->name, 1);
6140 + if (error && error != -ENODATA)
6141 + goto cleanup;
6142 + bs->s.not_found = error;
6143 +@@ -2164,8 +2186,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
6144 + if (error)
6145 + return error;
6146 + /* Find the named attribute. */
6147 +- error = ext4_xattr_find_entry(&is->s.here, i->name_index,
6148 +- i->name, 0);
6149 ++ error = xattr_find_entry(inode, &is->s.here, is->s.end,
6150 ++ i->name_index, i->name, 0);
6151 + if (error && error != -ENODATA)
6152 + return error;
6153 + is->s.not_found = error;
6154 +@@ -2721,13 +2743,9 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
6155 + error = -EIO;
6156 + if (!bh)
6157 + goto cleanup;
6158 +- if (ext4_xattr_check_block(inode, bh)) {
6159 +- EXT4_ERROR_INODE(inode, "bad block %llu",
6160 +- EXT4_I(inode)->i_file_acl);
6161 +- error = -EFSCORRUPTED;
6162 +- brelse(bh);
6163 ++ error = ext4_xattr_check_block(inode, bh);
6164 ++ if (error)
6165 + goto cleanup;
6166 +- }
6167 + base = BHDR(bh);
6168 + end = bh->b_data + bh->b_size;
6169 + min_offs = end - base;
6170 +@@ -2884,11 +2902,8 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
6171 + goto cleanup;
6172 + }
6173 + error = ext4_xattr_check_block(inode, bh);
6174 +- if (error) {
6175 +- EXT4_ERROR_INODE(inode, "bad block %llu (error %d)",
6176 +- EXT4_I(inode)->i_file_acl, error);
6177 ++ if (error)
6178 + goto cleanup;
6179 +- }
6180 +
6181 + if (ext4_has_feature_ea_inode(inode->i_sb)) {
6182 + for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
6183 +diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
6184 +index dd54c4f995c8..f39cad2abe2a 100644
6185 +--- a/fs/ext4/xattr.h
6186 ++++ b/fs/ext4/xattr.h
6187 +@@ -70,6 +70,17 @@ struct ext4_xattr_entry {
6188 + EXT4_I(inode)->i_extra_isize))
6189 + #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
6190 +
6191 ++/*
6192 ++ * XATTR_SIZE_MAX is currently 64k, but for the purposes of checking
6193 ++ * for file system consistency errors, we use a somewhat bigger value.
6194 ++ * This allows XATTR_SIZE_MAX to grow in the future, but by using this
6195 ++ * instead of INT_MAX for certain consistency checks, we don't need to
6196 ++ * worry about arithmetic overflows. (Actually XATTR_SIZE_MAX is
6197 ++ * defined in include/uapi/linux/limits.h, so changing it is going
6198 ++ * not going to be trivial....)
6199 ++ */
6200 ++#define EXT4_XATTR_SIZE_MAX (1 << 24)
6201 ++
6202 + /*
6203 + * The minimum size of EA value when you start storing it in an external inode
6204 + * size of block - size of header - size of 1 entry - 4 null bytes
6205 +diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
6206 +index d4d04fee568a..40c34a0ef58a 100644
6207 +--- a/fs/fs-writeback.c
6208 ++++ b/fs/fs-writeback.c
6209 +@@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
6210 + */
6211 + if (inode && inode_to_wb_is_valid(inode)) {
6212 + struct bdi_writeback *wb;
6213 +- bool locked, congested;
6214 ++ struct wb_lock_cookie lock_cookie = {};
6215 ++ bool congested;
6216 +
6217 +- wb = unlocked_inode_to_wb_begin(inode, &locked);
6218 ++ wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
6219 + congested = wb_congested(wb, cong_bits);
6220 +- unlocked_inode_to_wb_end(inode, locked);
6221 ++ unlocked_inode_to_wb_end(inode, &lock_cookie);
6222 + return congested;
6223 + }
6224 +
6225 +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
6226 +index 3fbf48ec2188..dfb057900e79 100644
6227 +--- a/fs/jbd2/journal.c
6228 ++++ b/fs/jbd2/journal.c
6229 +@@ -974,7 +974,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
6230 + }
6231 +
6232 + /*
6233 +- * This is a variaon of __jbd2_update_log_tail which checks for validity of
6234 ++ * This is a variation of __jbd2_update_log_tail which checks for validity of
6235 + * provided log tail and locks j_checkpoint_mutex. So it is safe against races
6236 + * with other threads updating log tail.
6237 + */
6238 +@@ -1417,6 +1417,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
6239 + journal_superblock_t *sb = journal->j_superblock;
6240 + int ret;
6241 +
6242 ++ if (is_journal_aborted(journal))
6243 ++ return -EIO;
6244 ++
6245 + BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
6246 + jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
6247 + tail_block, tail_tid);
6248 +@@ -1483,12 +1486,15 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
6249 + void jbd2_journal_update_sb_errno(journal_t *journal)
6250 + {
6251 + journal_superblock_t *sb = journal->j_superblock;
6252 ++ int errcode;
6253 +
6254 + read_lock(&journal->j_state_lock);
6255 +- jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
6256 +- journal->j_errno);
6257 +- sb->s_errno = cpu_to_be32(journal->j_errno);
6258 ++ errcode = journal->j_errno;
6259 + read_unlock(&journal->j_state_lock);
6260 ++ if (errcode == -ESHUTDOWN)
6261 ++ errcode = 0;
6262 ++ jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
6263 ++ sb->s_errno = cpu_to_be32(errcode);
6264 +
6265 + jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
6266 + }
6267 +@@ -2105,12 +2111,22 @@ void __jbd2_journal_abort_hard(journal_t *journal)
6268 + * but don't do any other IO. */
6269 + static void __journal_abort_soft (journal_t *journal, int errno)
6270 + {
6271 +- if (journal->j_flags & JBD2_ABORT)
6272 +- return;
6273 ++ int old_errno;
6274 +
6275 +- if (!journal->j_errno)
6276 ++ write_lock(&journal->j_state_lock);
6277 ++ old_errno = journal->j_errno;
6278 ++ if (!journal->j_errno || errno == -ESHUTDOWN)
6279 + journal->j_errno = errno;
6280 +
6281 ++ if (journal->j_flags & JBD2_ABORT) {
6282 ++ write_unlock(&journal->j_state_lock);
6283 ++ if (!old_errno && old_errno != -ESHUTDOWN &&
6284 ++ errno == -ESHUTDOWN)
6285 ++ jbd2_journal_update_sb_errno(journal);
6286 ++ return;
6287 ++ }
6288 ++ write_unlock(&journal->j_state_lock);
6289 ++
6290 + __jbd2_journal_abort_hard(journal);
6291 +
6292 + if (errno) {
6293 +diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
6294 +index f60dee7faf03..87bdf0f4cba1 100644
6295 +--- a/fs/jffs2/super.c
6296 ++++ b/fs/jffs2/super.c
6297 +@@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb)
6298 + static void jffs2_kill_sb(struct super_block *sb)
6299 + {
6300 + struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
6301 +- if (!sb_rdonly(sb))
6302 ++ if (c && !sb_rdonly(sb))
6303 + jffs2_stop_garbage_collect_thread(c);
6304 + kill_mtd_super(sb);
6305 + kfree(c);
6306 +diff --git a/fs/namespace.c b/fs/namespace.c
6307 +index 9d1374ab6e06..c3ed9dc78655 100644
6308 +--- a/fs/namespace.c
6309 ++++ b/fs/namespace.c
6310 +@@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
6311 + goto out_free;
6312 + }
6313 +
6314 +- mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
6315 ++ mnt->mnt.mnt_flags = old->mnt.mnt_flags;
6316 ++ mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
6317 + /* Don't allow unprivileged users to change mount flags */
6318 + if (flag & CL_UNPRIVILEGED) {
6319 + mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
6320 +diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
6321 +index 6702a6a0bbb5..e0e6a9d627df 100644
6322 +--- a/fs/notify/fanotify/fanotify.c
6323 ++++ b/fs/notify/fanotify/fanotify.c
6324 +@@ -92,7 +92,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
6325 + u32 event_mask,
6326 + const void *data, int data_type)
6327 + {
6328 +- __u32 marks_mask, marks_ignored_mask;
6329 ++ __u32 marks_mask = 0, marks_ignored_mask = 0;
6330 + const struct path *path = data;
6331 +
6332 + pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
6333 +@@ -108,24 +108,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
6334 + !d_can_lookup(path->dentry))
6335 + return false;
6336 +
6337 +- if (inode_mark && vfsmnt_mark) {
6338 +- marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
6339 +- marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
6340 +- } else if (inode_mark) {
6341 +- /*
6342 +- * if the event is for a child and this inode doesn't care about
6343 +- * events on the child, don't send it!
6344 +- */
6345 +- if ((event_mask & FS_EVENT_ON_CHILD) &&
6346 +- !(inode_mark->mask & FS_EVENT_ON_CHILD))
6347 +- return false;
6348 +- marks_mask = inode_mark->mask;
6349 +- marks_ignored_mask = inode_mark->ignored_mask;
6350 +- } else if (vfsmnt_mark) {
6351 +- marks_mask = vfsmnt_mark->mask;
6352 +- marks_ignored_mask = vfsmnt_mark->ignored_mask;
6353 +- } else {
6354 +- BUG();
6355 ++ /*
6356 ++ * if the event is for a child and this inode doesn't care about
6357 ++ * events on the child, don't send it!
6358 ++ */
6359 ++ if (inode_mark &&
6360 ++ (!(event_mask & FS_EVENT_ON_CHILD) ||
6361 ++ (inode_mark->mask & FS_EVENT_ON_CHILD))) {
6362 ++ marks_mask |= inode_mark->mask;
6363 ++ marks_ignored_mask |= inode_mark->ignored_mask;
6364 ++ }
6365 ++
6366 ++ if (vfsmnt_mark) {
6367 ++ marks_mask |= vfsmnt_mark->mask;
6368 ++ marks_ignored_mask |= vfsmnt_mark->ignored_mask;
6369 + }
6370 +
6371 + if (d_is_dir(path->dentry) &&
6372 +diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
6373 +index 3ae5fdba0225..10796d3fe27d 100644
6374 +--- a/fs/orangefs/super.c
6375 ++++ b/fs/orangefs/super.c
6376 +@@ -579,6 +579,11 @@ void orangefs_kill_sb(struct super_block *sb)
6377 + /* provided sb cleanup */
6378 + kill_anon_super(sb);
6379 +
6380 ++ if (!ORANGEFS_SB(sb)) {
6381 ++ mutex_lock(&orangefs_request_mutex);
6382 ++ mutex_unlock(&orangefs_request_mutex);
6383 ++ return;
6384 ++ }
6385 + /*
6386 + * issue the unmount to userspace to tell it to remove the
6387 + * dynamic mount info it has for this superblock
6388 +diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
6389 +index 70057359fbaf..23148c3ed675 100644
6390 +--- a/fs/reiserfs/journal.c
6391 ++++ b/fs/reiserfs/journal.c
6392 +@@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
6393 + if (IS_ERR(journal->j_dev_bd)) {
6394 + result = PTR_ERR(journal->j_dev_bd);
6395 + journal->j_dev_bd = NULL;
6396 +- reiserfs_warning(super,
6397 ++ reiserfs_warning(super, "sh-457",
6398 + "journal_init_dev: Cannot open '%s': %i",
6399 + jdev_name, result);
6400 + return result;
6401 +diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
6402 +index b16ef162344a..6c397a389105 100644
6403 +--- a/fs/ubifs/super.c
6404 ++++ b/fs/ubifs/super.c
6405 +@@ -1737,8 +1737,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
6406 +
6407 + dbg_save_space_info(c);
6408 +
6409 +- for (i = 0; i < c->jhead_cnt; i++)
6410 +- ubifs_wbuf_sync(&c->jheads[i].wbuf);
6411 ++ for (i = 0; i < c->jhead_cnt; i++) {
6412 ++ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
6413 ++ if (err)
6414 ++ ubifs_ro_mode(c, err);
6415 ++ }
6416 +
6417 + c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
6418 + c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
6419 +@@ -1804,8 +1807,11 @@ static void ubifs_put_super(struct super_block *sb)
6420 + int err;
6421 +
6422 + /* Synchronize write-buffers */
6423 +- for (i = 0; i < c->jhead_cnt; i++)
6424 +- ubifs_wbuf_sync(&c->jheads[i].wbuf);
6425 ++ for (i = 0; i < c->jhead_cnt; i++) {
6426 ++ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
6427 ++ if (err)
6428 ++ ubifs_ro_mode(c, err);
6429 ++ }
6430 +
6431 + /*
6432 + * We are being cleanly unmounted which means the
6433 +diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
6434 +index f897e55f2cd0..16a8ad21b77e 100644
6435 +--- a/fs/udf/unicode.c
6436 ++++ b/fs/udf/unicode.c
6437 +@@ -28,6 +28,9 @@
6438 +
6439 + #include "udf_sb.h"
6440 +
6441 ++#define SURROGATE_MASK 0xfffff800
6442 ++#define SURROGATE_PAIR 0x0000d800
6443 ++
6444 + static int udf_uni2char_utf8(wchar_t uni,
6445 + unsigned char *out,
6446 + int boundlen)
6447 +@@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni,
6448 + if (boundlen <= 0)
6449 + return -ENAMETOOLONG;
6450 +
6451 ++ if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
6452 ++ return -EINVAL;
6453 ++
6454 + if (uni < 0x80) {
6455 + out[u_len++] = (unsigned char)uni;
6456 + } else if (uni < 0x800) {
6457 +diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
6458 +index 551f7600ab58..24e93dfcee9f 100644
6459 +--- a/include/dt-bindings/clock/mt2701-clk.h
6460 ++++ b/include/dt-bindings/clock/mt2701-clk.h
6461 +@@ -176,7 +176,8 @@
6462 + #define CLK_TOP_AUD_EXT1 156
6463 + #define CLK_TOP_AUD_EXT2 157
6464 + #define CLK_TOP_NFI1X_PAD 158
6465 +-#define CLK_TOP_NR 159
6466 ++#define CLK_TOP_AXISEL_D4 159
6467 ++#define CLK_TOP_NR 160
6468 +
6469 + /* APMIXEDSYS */
6470 +
6471 +diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
6472 +index bfe86b54f6c1..0bd432a4d7bd 100644
6473 +--- a/include/linux/backing-dev-defs.h
6474 ++++ b/include/linux/backing-dev-defs.h
6475 +@@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
6476 + set_wb_congested(bdi->wb.congested, sync);
6477 + }
6478 +
6479 ++struct wb_lock_cookie {
6480 ++ bool locked;
6481 ++ unsigned long flags;
6482 ++};
6483 ++
6484 + #ifdef CONFIG_CGROUP_WRITEBACK
6485 +
6486 + /**
6487 +diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
6488 +index 3e4ce54d84ab..82e8b73117d1 100644
6489 +--- a/include/linux/backing-dev.h
6490 ++++ b/include/linux/backing-dev.h
6491 +@@ -346,7 +346,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
6492 + /**
6493 + * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
6494 + * @inode: target inode
6495 +- * @lockedp: temp bool output param, to be passed to the end function
6496 ++ * @cookie: output param, to be passed to the end function
6497 + *
6498 + * The caller wants to access the wb associated with @inode but isn't
6499 + * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
6500 +@@ -354,12 +354,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
6501 + * association doesn't change until the transaction is finished with
6502 + * unlocked_inode_to_wb_end().
6503 + *
6504 +- * The caller must call unlocked_inode_to_wb_end() with *@lockdep
6505 +- * afterwards and can't sleep during transaction. IRQ may or may not be
6506 +- * disabled on return.
6507 ++ * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
6508 ++ * can't sleep during the transaction. IRQs may or may not be disabled on
6509 ++ * return.
6510 + */
6511 + static inline struct bdi_writeback *
6512 +-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
6513 ++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
6514 + {
6515 + rcu_read_lock();
6516 +
6517 +@@ -367,10 +367,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
6518 + * Paired with store_release in inode_switch_wb_work_fn() and
6519 + * ensures that we see the new wb if we see cleared I_WB_SWITCH.
6520 + */
6521 +- *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
6522 ++ cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
6523 +
6524 +- if (unlikely(*lockedp))
6525 +- spin_lock_irq(&inode->i_mapping->tree_lock);
6526 ++ if (unlikely(cookie->locked))
6527 ++ spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
6528 +
6529 + /*
6530 + * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
6531 +@@ -382,12 +382,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
6532 + /**
6533 + * unlocked_inode_to_wb_end - end inode wb access transaction
6534 + * @inode: target inode
6535 +- * @locked: *@lockedp from unlocked_inode_to_wb_begin()
6536 ++ * @cookie: @cookie from unlocked_inode_to_wb_begin()
6537 + */
6538 +-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
6539 ++static inline void unlocked_inode_to_wb_end(struct inode *inode,
6540 ++ struct wb_lock_cookie *cookie)
6541 + {
6542 +- if (unlikely(locked))
6543 +- spin_unlock_irq(&inode->i_mapping->tree_lock);
6544 ++ if (unlikely(cookie->locked))
6545 ++ spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
6546 +
6547 + rcu_read_unlock();
6548 + }
6549 +@@ -434,12 +435,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
6550 + }
6551 +
6552 + static inline struct bdi_writeback *
6553 +-unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
6554 ++unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
6555 + {
6556 + return inode_to_wb(inode);
6557 + }
6558 +
6559 +-static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
6560 ++static inline void unlocked_inode_to_wb_end(struct inode *inode,
6561 ++ struct wb_lock_cookie *cookie)
6562 + {
6563 + }
6564 +
6565 +diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
6566 +index bf18b95ed92d..17b18b91ebac 100644
6567 +--- a/include/linux/blk_types.h
6568 ++++ b/include/linux/blk_types.h
6569 +@@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *);
6570 +
6571 + /*
6572 + * Block error status values. See block/blk-core:blk_errors for the details.
6573 ++ * Alpha cannot write a byte atomically, so we need to use 32-bit value.
6574 + */
6575 ++#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
6576 ++typedef u32 __bitwise blk_status_t;
6577 ++#else
6578 + typedef u8 __bitwise blk_status_t;
6579 ++#endif
6580 + #define BLK_STS_OK 0
6581 + #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
6582 + #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
6583 +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
6584 +index d3f264a5b04d..ceb96ecab96e 100644
6585 +--- a/include/linux/compiler-clang.h
6586 ++++ b/include/linux/compiler-clang.h
6587 +@@ -17,9 +17,6 @@
6588 + */
6589 + #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
6590 +
6591 +-#define randomized_struct_fields_start struct {
6592 +-#define randomized_struct_fields_end };
6593 +-
6594 + /* all clang versions usable with the kernel support KASAN ABI version 5 */
6595 + #define KASAN_ABI_VERSION 5
6596 +
6597 +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
6598 +index e2c7f4369eff..b4bf73f5e38f 100644
6599 +--- a/include/linux/compiler-gcc.h
6600 ++++ b/include/linux/compiler-gcc.h
6601 +@@ -242,6 +242,9 @@
6602 + #if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__)
6603 + #define __randomize_layout __attribute__((randomize_layout))
6604 + #define __no_randomize_layout __attribute__((no_randomize_layout))
6605 ++/* This anon struct can add padding, so only enable it under randstruct. */
6606 ++#define randomized_struct_fields_start struct {
6607 ++#define randomized_struct_fields_end } __randomize_layout;
6608 + #endif
6609 +
6610 + #endif /* GCC_VERSION >= 40500 */
6611 +@@ -256,15 +259,6 @@
6612 + */
6613 + #define __visible __attribute__((externally_visible))
6614 +
6615 +-/*
6616 +- * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
6617 +- * possible since GCC 4.6. To provide as much build testing coverage
6618 +- * as possible, this is used for all GCC 4.6+ builds, and not just on
6619 +- * RANDSTRUCT_PLUGIN builds.
6620 +- */
6621 +-#define randomized_struct_fields_start struct {
6622 +-#define randomized_struct_fields_end } __randomize_layout;
6623 +-
6624 + #endif /* GCC_VERSION >= 40600 */
6625 +
6626 +
6627 +diff --git a/include/linux/hid.h b/include/linux/hid.h
6628 +index 091a81cf330f..29b981b1694d 100644
6629 +--- a/include/linux/hid.h
6630 ++++ b/include/linux/hid.h
6631 +@@ -515,6 +515,12 @@ enum hid_type {
6632 + HID_TYPE_USBNONE
6633 + };
6634 +
6635 ++enum hid_battery_status {
6636 ++ HID_BATTERY_UNKNOWN = 0,
6637 ++ HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */
6638 ++ HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
6639 ++};
6640 ++
6641 + struct hid_driver;
6642 + struct hid_ll_driver;
6643 +
6644 +@@ -557,7 +563,8 @@ struct hid_device { /* device report descriptor */
6645 + __s32 battery_max;
6646 + __s32 battery_report_type;
6647 + __s32 battery_report_id;
6648 +- bool battery_reported;
6649 ++ enum hid_battery_status battery_status;
6650 ++ bool battery_avoid_query;
6651 + #endif
6652 +
6653 + unsigned int status; /* see STAT flags above */
6654 +@@ -851,7 +858,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
6655 + extern void hidinput_disconnect(struct hid_device *);
6656 +
6657 + int hid_set_field(struct hid_field *, unsigned, __s32);
6658 +-int hid_input_report(struct hid_device *, int type, u8 *, int, int);
6659 ++int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
6660 + int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
6661 + struct hid_field *hidinput_get_led_field(struct hid_device *hid);
6662 + unsigned int hidinput_count_leds(struct hid_device *hid);
6663 +@@ -1102,13 +1109,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
6664 + *
6665 + * @report: the report we want to know the length
6666 + */
6667 +-static inline int hid_report_len(struct hid_report *report)
6668 ++static inline u32 hid_report_len(struct hid_report *report)
6669 + {
6670 + /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
6671 + return ((report->size - 1) >> 3) + 1 + (report->id > 0);
6672 + }
6673 +
6674 +-int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
6675 ++int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
6676 + int interrupt);
6677 +
6678 + /* HID quirks API */
6679 +diff --git a/include/linux/hmm.h b/include/linux/hmm.h
6680 +index 325017ad9311..36dd21fe5caf 100644
6681 +--- a/include/linux/hmm.h
6682 ++++ b/include/linux/hmm.h
6683 +@@ -498,23 +498,16 @@ struct hmm_device {
6684 + struct hmm_device *hmm_device_new(void *drvdata);
6685 + void hmm_device_put(struct hmm_device *hmm_device);
6686 + #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
6687 +-#endif /* IS_ENABLED(CONFIG_HMM) */
6688 +
6689 + /* Below are for HMM internal use only! Not to be used by device driver! */
6690 +-#if IS_ENABLED(CONFIG_HMM_MIRROR)
6691 + void hmm_mm_destroy(struct mm_struct *mm);
6692 +
6693 + static inline void hmm_mm_init(struct mm_struct *mm)
6694 + {
6695 + mm->hmm = NULL;
6696 + }
6697 +-#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
6698 +-static inline void hmm_mm_destroy(struct mm_struct *mm) {}
6699 +-static inline void hmm_mm_init(struct mm_struct *mm) {}
6700 +-#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
6701 +-
6702 +-
6703 + #else /* IS_ENABLED(CONFIG_HMM) */
6704 + static inline void hmm_mm_destroy(struct mm_struct *mm) {}
6705 + static inline void hmm_mm_init(struct mm_struct *mm) {}
6706 ++#endif /* IS_ENABLED(CONFIG_HMM) */
6707 + #endif /* LINUX_HMM_H */
6708 +diff --git a/include/linux/mm.h b/include/linux/mm.h
6709 +index ad06d42adb1a..95a2d748e978 100644
6710 +--- a/include/linux/mm.h
6711 ++++ b/include/linux/mm.h
6712 +@@ -2604,6 +2604,7 @@ enum mf_action_page_type {
6713 + MF_MSG_POISONED_HUGE,
6714 + MF_MSG_HUGE,
6715 + MF_MSG_FREE_HUGE,
6716 ++ MF_MSG_NON_PMD_HUGE,
6717 + MF_MSG_UNMAP_FAILED,
6718 + MF_MSG_DIRTY_SWAPCACHE,
6719 + MF_MSG_CLEAN_SWAPCACHE,
6720 +diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
6721 +index 760c969d885d..12bbf8c81112 100644
6722 +--- a/include/sound/pcm_oss.h
6723 ++++ b/include/sound/pcm_oss.h
6724 +@@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
6725 + char *buffer; /* vmallocated period */
6726 + size_t buffer_used; /* used length from period buffer */
6727 + struct mutex params_lock;
6728 ++ atomic_t rw_ref; /* concurrent read/write accesses */
6729 + #ifdef CONFIG_SND_PCM_OSS_PLUGINS
6730 + struct snd_pcm_plugin *plugin_first;
6731 + struct snd_pcm_plugin *plugin_last;
6732 +diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
6733 +index c34f4490d025..26ee91300e3e 100644
6734 +--- a/include/uapi/linux/random.h
6735 ++++ b/include/uapi/linux/random.h
6736 +@@ -35,6 +35,9 @@
6737 + /* Clear the entropy pool and associated counters. (Superuser only.) */
6738 + #define RNDCLEARPOOL _IO( 'R', 0x06 )
6739 +
6740 ++/* Reseed CRNG. (Superuser only.) */
6741 ++#define RNDRESEEDCRNG _IO( 'R', 0x07 )
6742 ++
6743 + struct rand_pool_info {
6744 + int entropy_count;
6745 + int buf_size;
6746 +diff --git a/ipc/shm.c b/ipc/shm.c
6747 +index 93e0e3a4d009..f68420b1ad93 100644
6748 +--- a/ipc/shm.c
6749 ++++ b/ipc/shm.c
6750 +@@ -203,6 +203,12 @@ static int __shm_open(struct vm_area_struct *vma)
6751 + if (IS_ERR(shp))
6752 + return PTR_ERR(shp);
6753 +
6754 ++ if (shp->shm_file != sfd->file) {
6755 ++ /* ID was reused */
6756 ++ shm_unlock(shp);
6757 ++ return -EINVAL;
6758 ++ }
6759 ++
6760 + shp->shm_atim = ktime_get_real_seconds();
6761 + shp->shm_lprid = task_tgid_vnr(current);
6762 + shp->shm_nattch++;
6763 +@@ -431,8 +437,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
6764 + int ret;
6765 +
6766 + /*
6767 +- * In case of remap_file_pages() emulation, the file can represent
6768 +- * removed IPC ID: propogate shm_lock() error to caller.
6769 ++ * In case of remap_file_pages() emulation, the file can represent an
6770 ++ * IPC ID that was removed, and possibly even reused by another shm
6771 ++ * segment already. Propagate this case as an error to caller.
6772 + */
6773 + ret = __shm_open(vma);
6774 + if (ret)
6775 +@@ -456,6 +463,7 @@ static int shm_release(struct inode *ino, struct file *file)
6776 + struct shm_file_data *sfd = shm_file_data(file);
6777 +
6778 + put_ipc_ns(sfd->ns);
6779 ++ fput(sfd->file);
6780 + shm_file_data(file) = NULL;
6781 + kfree(sfd);
6782 + return 0;
6783 +@@ -1402,7 +1410,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
6784 + file->f_mapping = shp->shm_file->f_mapping;
6785 + sfd->id = shp->shm_perm.id;
6786 + sfd->ns = get_ipc_ns(ns);
6787 +- sfd->file = shp->shm_file;
6788 ++ /*
6789 ++ * We need to take a reference to the real shm file to prevent the
6790 ++ * pointer from becoming stale in cases where the lifetime of the outer
6791 ++ * file extends beyond that of the shm segment. It's not usually
6792 ++ * possible, but it can happen during remap_file_pages() emulation as
6793 ++ * that unmaps the memory, then does ->mmap() via file reference only.
6794 ++ * We'll deny the ->mmap() if the shm segment was since removed, but to
6795 ++ * detect shm ID reuse we need to compare the file pointers.
6796 ++ */
6797 ++ sfd->file = get_file(shp->shm_file);
6798 + sfd->vm_ops = NULL;
6799 +
6800 + err = security_mmap_file(file, prot, flags);
6801 +diff --git a/kernel/resource.c b/kernel/resource.c
6802 +index e270b5048988..2af6c03858b9 100644
6803 +--- a/kernel/resource.c
6804 ++++ b/kernel/resource.c
6805 +@@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old,
6806 + alloc.start = constraint->alignf(constraint->alignf_data, &avail,
6807 + size, constraint->align);
6808 + alloc.end = alloc.start + size - 1;
6809 +- if (resource_contains(&avail, &alloc)) {
6810 ++ if (alloc.start <= alloc.end &&
6811 ++ resource_contains(&avail, &alloc)) {
6812 + new->start = alloc.start;
6813 + new->end = alloc.end;
6814 + return 0;
6815 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
6816 +index dcf1c4dd3efe..7ac7b08b563a 100644
6817 +--- a/kernel/trace/ring_buffer.c
6818 ++++ b/kernel/trace/ring_buffer.c
6819 +@@ -1136,6 +1136,11 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
6820 + struct buffer_page *bpage, *tmp;
6821 + long i;
6822 +
6823 ++ /* Check if the available memory is there first */
6824 ++ i = si_mem_available();
6825 ++ if (i < nr_pages)
6826 ++ return -ENOMEM;
6827 ++
6828 + for (i = 0; i < nr_pages; i++) {
6829 + struct page *page;
6830 + /*
6831 +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
6832 +index 268029ae1be6..df08863e6d53 100644
6833 +--- a/kernel/trace/trace_uprobe.c
6834 ++++ b/kernel/trace/trace_uprobe.c
6835 +@@ -608,7 +608,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
6836 +
6837 + /* Don't print "0x (null)" when offset is 0 */
6838 + if (tu->offset) {
6839 +- seq_printf(m, "0x%px", (void *)tu->offset);
6840 ++ seq_printf(m, "0x%0*lx", (int)(sizeof(void *) * 2), tu->offset);
6841 + } else {
6842 + switch (sizeof(void *)) {
6843 + case 4:
6844 +diff --git a/lib/swiotlb.c b/lib/swiotlb.c
6845 +index c43ec2271469..44f7eb408fdb 100644
6846 +--- a/lib/swiotlb.c
6847 ++++ b/lib/swiotlb.c
6848 +@@ -732,7 +732,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
6849 + goto out_warn;
6850 +
6851 + *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
6852 +- if (dma_coherent_ok(dev, *dma_handle, size))
6853 ++ if (!dma_coherent_ok(dev, *dma_handle, size))
6854 + goto out_unmap;
6855 +
6856 + memset(phys_to_virt(phys_addr), 0, size);
6857 +diff --git a/lib/vsprintf.c b/lib/vsprintf.c
6858 +index d7a708f82559..89f8a4a4b770 100644
6859 +--- a/lib/vsprintf.c
6860 ++++ b/lib/vsprintf.c
6861 +@@ -2591,6 +2591,8 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
6862 + case 's':
6863 + case 'F':
6864 + case 'f':
6865 ++ case 'x':
6866 ++ case 'K':
6867 + save_arg(void *);
6868 + break;
6869 + default:
6870 +@@ -2765,6 +2767,8 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
6871 + case 's':
6872 + case 'F':
6873 + case 'f':
6874 ++ case 'x':
6875 ++ case 'K':
6876 + process = true;
6877 + break;
6878 + default:
6879 +diff --git a/mm/filemap.c b/mm/filemap.c
6880 +index 693f62212a59..787ff18663bf 100644
6881 +--- a/mm/filemap.c
6882 ++++ b/mm/filemap.c
6883 +@@ -785,7 +785,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
6884 + VM_BUG_ON_PAGE(!PageLocked(new), new);
6885 + VM_BUG_ON_PAGE(new->mapping, new);
6886 +
6887 +- error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
6888 ++ error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
6889 + if (!error) {
6890 + struct address_space *mapping = old->mapping;
6891 + void (*freepage)(struct page *);
6892 +@@ -841,7 +841,7 @@ static int __add_to_page_cache_locked(struct page *page,
6893 + return error;
6894 + }
6895 +
6896 +- error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
6897 ++ error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
6898 + if (error) {
6899 + if (!huge)
6900 + mem_cgroup_cancel_charge(page, memcg, false);
6901 +@@ -1584,8 +1584,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
6902 + if (fgp_flags & FGP_ACCESSED)
6903 + __SetPageReferenced(page);
6904 +
6905 +- err = add_to_page_cache_lru(page, mapping, offset,
6906 +- gfp_mask & GFP_RECLAIM_MASK);
6907 ++ err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
6908 + if (unlikely(err)) {
6909 + put_page(page);
6910 + page = NULL;
6911 +@@ -2388,7 +2387,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
6912 + if (!page)
6913 + return -ENOMEM;
6914 +
6915 +- ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
6916 ++ ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
6917 + if (ret == 0)
6918 + ret = mapping->a_ops->readpage(file, page);
6919 + else if (ret == -EEXIST)
6920 +diff --git a/mm/hmm.c b/mm/hmm.c
6921 +index 320545b98ff5..91d3f062dd28 100644
6922 +--- a/mm/hmm.c
6923 ++++ b/mm/hmm.c
6924 +@@ -277,7 +277,8 @@ static int hmm_pfns_bad(unsigned long addr,
6925 + unsigned long end,
6926 + struct mm_walk *walk)
6927 + {
6928 +- struct hmm_range *range = walk->private;
6929 ++ struct hmm_vma_walk *hmm_vma_walk = walk->private;
6930 ++ struct hmm_range *range = hmm_vma_walk->range;
6931 + hmm_pfn_t *pfns = range->pfns;
6932 + unsigned long i;
6933 +
6934 +diff --git a/mm/ksm.c b/mm/ksm.c
6935 +index 293721f5da70..2d6b35234926 100644
6936 +--- a/mm/ksm.c
6937 ++++ b/mm/ksm.c
6938 +@@ -1131,6 +1131,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
6939 + } else {
6940 + newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
6941 + vma->vm_page_prot));
6942 ++ /*
6943 ++ * We're replacing an anonymous page with a zero page, which is
6944 ++ * not anonymous. We need to do proper accounting otherwise we
6945 ++ * will get wrong values in /proc, and a BUG message in dmesg
6946 ++ * when tearing down the mm.
6947 ++ */
6948 ++ dec_mm_counter(mm, MM_ANONPAGES);
6949 + }
6950 +
6951 + flush_cache_page(vma, addr, pte_pfn(*ptep));
6952 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
6953 +index 8291b75f42c8..2d4bf647cf01 100644
6954 +--- a/mm/memory-failure.c
6955 ++++ b/mm/memory-failure.c
6956 +@@ -502,6 +502,7 @@ static const char * const action_page_types[] = {
6957 + [MF_MSG_POISONED_HUGE] = "huge page already hardware poisoned",
6958 + [MF_MSG_HUGE] = "huge page",
6959 + [MF_MSG_FREE_HUGE] = "free huge page",
6960 ++ [MF_MSG_NON_PMD_HUGE] = "non-pmd-sized huge page",
6961 + [MF_MSG_UNMAP_FAILED] = "unmapping failed page",
6962 + [MF_MSG_DIRTY_SWAPCACHE] = "dirty swapcache page",
6963 + [MF_MSG_CLEAN_SWAPCACHE] = "clean swapcache page",
6964 +@@ -1084,6 +1085,21 @@ static int memory_failure_hugetlb(unsigned long pfn, int flags)
6965 + return 0;
6966 + }
6967 +
6968 ++ /*
6969 ++ * TODO: hwpoison for pud-sized hugetlb doesn't work right now, so
6970 ++ * simply disable it. In order to make it work properly, we need
6971 ++ * make sure that:
6972 ++ * - conversion of a pud that maps an error hugetlb into hwpoison
6973 ++ * entry properly works, and
6974 ++ * - other mm code walking over page table is aware of pud-aligned
6975 ++ * hwpoison entries.
6976 ++ */
6977 ++ if (huge_page_size(page_hstate(head)) > PMD_SIZE) {
6978 ++ action_result(pfn, MF_MSG_NON_PMD_HUGE, MF_IGNORED);
6979 ++ res = -EBUSY;
6980 ++ goto out;
6981 ++ }
6982 ++
6983 + if (!hwpoison_user_mappings(p, pfn, flags, &head)) {
6984 + action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
6985 + res = -EBUSY;
6986 +diff --git a/mm/page-writeback.c b/mm/page-writeback.c
6987 +index 586f31261c83..8369572e1f7d 100644
6988 +--- a/mm/page-writeback.c
6989 ++++ b/mm/page-writeback.c
6990 +@@ -2501,13 +2501,13 @@ void account_page_redirty(struct page *page)
6991 + if (mapping && mapping_cap_account_dirty(mapping)) {
6992 + struct inode *inode = mapping->host;
6993 + struct bdi_writeback *wb;
6994 +- bool locked;
6995 ++ struct wb_lock_cookie cookie = {};
6996 +
6997 +- wb = unlocked_inode_to_wb_begin(inode, &locked);
6998 ++ wb = unlocked_inode_to_wb_begin(inode, &cookie);
6999 + current->nr_dirtied--;
7000 + dec_node_page_state(page, NR_DIRTIED);
7001 + dec_wb_stat(wb, WB_DIRTIED);
7002 +- unlocked_inode_to_wb_end(inode, locked);
7003 ++ unlocked_inode_to_wb_end(inode, &cookie);
7004 + }
7005 + }
7006 + EXPORT_SYMBOL(account_page_redirty);
7007 +@@ -2613,15 +2613,15 @@ void __cancel_dirty_page(struct page *page)
7008 + if (mapping_cap_account_dirty(mapping)) {
7009 + struct inode *inode = mapping->host;
7010 + struct bdi_writeback *wb;
7011 +- bool locked;
7012 ++ struct wb_lock_cookie cookie = {};
7013 +
7014 + lock_page_memcg(page);
7015 +- wb = unlocked_inode_to_wb_begin(inode, &locked);
7016 ++ wb = unlocked_inode_to_wb_begin(inode, &cookie);
7017 +
7018 + if (TestClearPageDirty(page))
7019 + account_page_cleaned(page, mapping, wb);
7020 +
7021 +- unlocked_inode_to_wb_end(inode, locked);
7022 ++ unlocked_inode_to_wb_end(inode, &cookie);
7023 + unlock_page_memcg(page);
7024 + } else {
7025 + ClearPageDirty(page);
7026 +@@ -2653,7 +2653,7 @@ int clear_page_dirty_for_io(struct page *page)
7027 + if (mapping && mapping_cap_account_dirty(mapping)) {
7028 + struct inode *inode = mapping->host;
7029 + struct bdi_writeback *wb;
7030 +- bool locked;
7031 ++ struct wb_lock_cookie cookie = {};
7032 +
7033 + /*
7034 + * Yes, Virginia, this is indeed insane.
7035 +@@ -2690,14 +2690,14 @@ int clear_page_dirty_for_io(struct page *page)
7036 + * always locked coming in here, so we get the desired
7037 + * exclusion.
7038 + */
7039 +- wb = unlocked_inode_to_wb_begin(inode, &locked);
7040 ++ wb = unlocked_inode_to_wb_begin(inode, &cookie);
7041 + if (TestClearPageDirty(page)) {
7042 + dec_lruvec_page_state(page, NR_FILE_DIRTY);
7043 + dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
7044 + dec_wb_stat(wb, WB_RECLAIMABLE);
7045 + ret = 1;
7046 + }
7047 +- unlocked_inode_to_wb_end(inode, locked);
7048 ++ unlocked_inode_to_wb_end(inode, &cookie);
7049 + return ret;
7050 + }
7051 + return TestClearPageDirty(page);
7052 +diff --git a/mm/slab.c b/mm/slab.c
7053 +index 9095c3945425..a76006aae857 100644
7054 +--- a/mm/slab.c
7055 ++++ b/mm/slab.c
7056 +@@ -4074,7 +4074,8 @@ static void cache_reap(struct work_struct *w)
7057 + next_reap_node();
7058 + out:
7059 + /* Set up the next iteration */
7060 +- schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
7061 ++ schedule_delayed_work_on(smp_processor_id(), work,
7062 ++ round_jiffies_relative(REAPTIMEOUT_AC));
7063 + }
7064 +
7065 + void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
7066 +diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
7067 +index fc97fc3ed637..0178ea878b75 100644
7068 +--- a/net/sunrpc/rpc_pipe.c
7069 ++++ b/net/sunrpc/rpc_pipe.c
7070 +@@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
7071 + struct dentry *clnt_dir = pipe_dentry->d_parent;
7072 + struct dentry *gssd_dir = clnt_dir->d_parent;
7073 +
7074 ++ dget(pipe_dentry);
7075 + __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
7076 + __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
7077 + __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
7078 +diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
7079 +index f0855a959a27..4bc0f4d94a01 100644
7080 +--- a/net/sunrpc/xprtrdma/rpc_rdma.c
7081 ++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
7082 +@@ -1366,7 +1366,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
7083 +
7084 + trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
7085 +
7086 +- queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work);
7087 ++ queue_work(rpcrdma_receive_wq, &rep->rr_work);
7088 + return;
7089 +
7090 + out_badstatus:
7091 +diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
7092 +index 4b1ecfe979cf..f86021e3b853 100644
7093 +--- a/net/sunrpc/xprtrdma/transport.c
7094 ++++ b/net/sunrpc/xprtrdma/transport.c
7095 +@@ -52,7 +52,6 @@
7096 + #include <linux/slab.h>
7097 + #include <linux/seq_file.h>
7098 + #include <linux/sunrpc/addr.h>
7099 +-#include <linux/smp.h>
7100 +
7101 + #include "xprt_rdma.h"
7102 +
7103 +@@ -651,7 +650,6 @@ xprt_rdma_allocate(struct rpc_task *task)
7104 + if (!rpcrdma_get_recvbuf(r_xprt, req, rqst->rq_rcvsize, flags))
7105 + goto out_fail;
7106 +
7107 +- req->rl_cpu = smp_processor_id();
7108 + req->rl_connect_cookie = 0; /* our reserved value */
7109 + rpcrdma_set_xprtdata(rqst, req);
7110 + rqst->rq_buffer = req->rl_sendbuf->rg_base;
7111 +diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
7112 +index e6f84a6434a0..25b0ecbd37e2 100644
7113 +--- a/net/sunrpc/xprtrdma/verbs.c
7114 ++++ b/net/sunrpc/xprtrdma/verbs.c
7115 +@@ -250,7 +250,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
7116 + wait_for_completion(&ia->ri_remove_done);
7117 +
7118 + ia->ri_id = NULL;
7119 +- ia->ri_pd = NULL;
7120 + ia->ri_device = NULL;
7121 + /* Return 1 to ensure the core destroys the id. */
7122 + return 1;
7123 +@@ -445,7 +444,9 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
7124 + ia->ri_id->qp = NULL;
7125 + }
7126 + ib_free_cq(ep->rep_attr.recv_cq);
7127 ++ ep->rep_attr.recv_cq = NULL;
7128 + ib_free_cq(ep->rep_attr.send_cq);
7129 ++ ep->rep_attr.send_cq = NULL;
7130 +
7131 + /* The ULP is responsible for ensuring all DMA
7132 + * mappings and MRs are gone.
7133 +@@ -458,6 +459,8 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
7134 + rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
7135 + }
7136 + rpcrdma_mrs_destroy(buf);
7137 ++ ib_dealloc_pd(ia->ri_pd);
7138 ++ ia->ri_pd = NULL;
7139 +
7140 + /* Allow waiters to continue */
7141 + complete(&ia->ri_remove_done);
7142 +@@ -628,14 +631,16 @@ rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
7143 + {
7144 + cancel_delayed_work_sync(&ep->rep_connect_worker);
7145 +
7146 +- if (ia->ri_id->qp) {
7147 ++ if (ia->ri_id && ia->ri_id->qp) {
7148 + rpcrdma_ep_disconnect(ep, ia);
7149 + rdma_destroy_qp(ia->ri_id);
7150 + ia->ri_id->qp = NULL;
7151 + }
7152 +
7153 +- ib_free_cq(ep->rep_attr.recv_cq);
7154 +- ib_free_cq(ep->rep_attr.send_cq);
7155 ++ if (ep->rep_attr.recv_cq)
7156 ++ ib_free_cq(ep->rep_attr.recv_cq);
7157 ++ if (ep->rep_attr.send_cq)
7158 ++ ib_free_cq(ep->rep_attr.send_cq);
7159 + }
7160 +
7161 + /* Re-establish a connection after a device removal event.
7162 +diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
7163 +index 69883a960a3f..430a6de8300e 100644
7164 +--- a/net/sunrpc/xprtrdma/xprt_rdma.h
7165 ++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
7166 +@@ -334,7 +334,6 @@ enum {
7167 + struct rpcrdma_buffer;
7168 + struct rpcrdma_req {
7169 + struct list_head rl_list;
7170 +- int rl_cpu;
7171 + unsigned int rl_connect_cookie;
7172 + struct rpcrdma_buffer *rl_buffer;
7173 + struct rpcrdma_rep *rl_reply;
7174 +diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
7175 +index 441405081195..1980f68246cb 100644
7176 +--- a/sound/core/oss/pcm_oss.c
7177 ++++ b/sound/core/oss/pcm_oss.c
7178 +@@ -823,8 +823,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
7179 + return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
7180 + }
7181 +
7182 +-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
7183 +- bool trylock)
7184 ++/* parameter locking: returns immediately if tried during streaming */
7185 ++static int lock_params(struct snd_pcm_runtime *runtime)
7186 ++{
7187 ++ if (mutex_lock_interruptible(&runtime->oss.params_lock))
7188 ++ return -ERESTARTSYS;
7189 ++ if (atomic_read(&runtime->oss.rw_ref)) {
7190 ++ mutex_unlock(&runtime->oss.params_lock);
7191 ++ return -EBUSY;
7192 ++ }
7193 ++ return 0;
7194 ++}
7195 ++
7196 ++static void unlock_params(struct snd_pcm_runtime *runtime)
7197 ++{
7198 ++ mutex_unlock(&runtime->oss.params_lock);
7199 ++}
7200 ++
7201 ++/* call with params_lock held */
7202 ++static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
7203 + {
7204 + struct snd_pcm_runtime *runtime = substream->runtime;
7205 + struct snd_pcm_hw_params *params, *sparams;
7206 +@@ -838,11 +855,8 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
7207 + const struct snd_mask *sformat_mask;
7208 + struct snd_mask mask;
7209 +
7210 +- if (trylock) {
7211 +- if (!(mutex_trylock(&runtime->oss.params_lock)))
7212 +- return -EAGAIN;
7213 +- } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
7214 +- return -ERESTARTSYS;
7215 ++ if (!runtime->oss.params)
7216 ++ return 0;
7217 + sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
7218 + params = kmalloc(sizeof(*params), GFP_KERNEL);
7219 + sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
7220 +@@ -1068,6 +1082,23 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
7221 + kfree(sw_params);
7222 + kfree(params);
7223 + kfree(sparams);
7224 ++ return err;
7225 ++}
7226 ++
7227 ++/* this one takes the lock by itself */
7228 ++static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
7229 ++ bool trylock)
7230 ++{
7231 ++ struct snd_pcm_runtime *runtime = substream->runtime;
7232 ++ int err;
7233 ++
7234 ++ if (trylock) {
7235 ++ if (!(mutex_trylock(&runtime->oss.params_lock)))
7236 ++ return -EAGAIN;
7237 ++ } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
7238 ++ return -ERESTARTSYS;
7239 ++
7240 ++ err = snd_pcm_oss_change_params_locked(substream);
7241 + mutex_unlock(&runtime->oss.params_lock);
7242 + return err;
7243 + }
7244 +@@ -1096,6 +1127,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
7245 + return 0;
7246 + }
7247 +
7248 ++/* call with params_lock held */
7249 ++/* NOTE: this always call PREPARE unconditionally no matter whether
7250 ++ * runtime->oss.prepare is set or not
7251 ++ */
7252 + static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
7253 + {
7254 + int err;
7255 +@@ -1120,14 +1155,35 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
7256 + struct snd_pcm_runtime *runtime;
7257 + int err;
7258 +
7259 +- if (substream == NULL)
7260 +- return 0;
7261 + runtime = substream->runtime;
7262 + if (runtime->oss.params) {
7263 + err = snd_pcm_oss_change_params(substream, false);
7264 + if (err < 0)
7265 + return err;
7266 + }
7267 ++ if (runtime->oss.prepare) {
7268 ++ if (mutex_lock_interruptible(&runtime->oss.params_lock))
7269 ++ return -ERESTARTSYS;
7270 ++ err = snd_pcm_oss_prepare(substream);
7271 ++ mutex_unlock(&runtime->oss.params_lock);
7272 ++ if (err < 0)
7273 ++ return err;
7274 ++ }
7275 ++ return 0;
7276 ++}
7277 ++
7278 ++/* call with params_lock held */
7279 ++static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
7280 ++{
7281 ++ struct snd_pcm_runtime *runtime;
7282 ++ int err;
7283 ++
7284 ++ runtime = substream->runtime;
7285 ++ if (runtime->oss.params) {
7286 ++ err = snd_pcm_oss_change_params_locked(substream);
7287 ++ if (err < 0)
7288 ++ return err;
7289 ++ }
7290 + if (runtime->oss.prepare) {
7291 + err = snd_pcm_oss_prepare(substream);
7292 + if (err < 0)
7293 +@@ -1332,13 +1388,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
7294 + if (atomic_read(&substream->mmap_count))
7295 + return -ENXIO;
7296 +
7297 +- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
7298 +- return tmp;
7299 ++ atomic_inc(&runtime->oss.rw_ref);
7300 + while (bytes > 0) {
7301 + if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
7302 + tmp = -ERESTARTSYS;
7303 + break;
7304 + }
7305 ++ tmp = snd_pcm_oss_make_ready_locked(substream);
7306 ++ if (tmp < 0)
7307 ++ goto err;
7308 + if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
7309 + tmp = bytes;
7310 + if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
7311 +@@ -1394,6 +1452,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
7312 + }
7313 + tmp = 0;
7314 + }
7315 ++ atomic_dec(&runtime->oss.rw_ref);
7316 + return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
7317 + }
7318 +
7319 +@@ -1439,13 +1498,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
7320 + if (atomic_read(&substream->mmap_count))
7321 + return -ENXIO;
7322 +
7323 +- if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
7324 +- return tmp;
7325 ++ atomic_inc(&runtime->oss.rw_ref);
7326 + while (bytes > 0) {
7327 + if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
7328 + tmp = -ERESTARTSYS;
7329 + break;
7330 + }
7331 ++ tmp = snd_pcm_oss_make_ready_locked(substream);
7332 ++ if (tmp < 0)
7333 ++ goto err;
7334 + if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
7335 + if (runtime->oss.buffer_used == 0) {
7336 + tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
7337 +@@ -1486,6 +1547,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
7338 + }
7339 + tmp = 0;
7340 + }
7341 ++ atomic_dec(&runtime->oss.rw_ref);
7342 + return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
7343 + }
7344 +
7345 +@@ -1501,10 +1563,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
7346 + continue;
7347 + runtime = substream->runtime;
7348 + snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
7349 ++ mutex_lock(&runtime->oss.params_lock);
7350 + runtime->oss.prepare = 1;
7351 + runtime->oss.buffer_used = 0;
7352 + runtime->oss.prev_hw_ptr_period = 0;
7353 + runtime->oss.period_ptr = 0;
7354 ++ mutex_unlock(&runtime->oss.params_lock);
7355 + }
7356 + return 0;
7357 + }
7358 +@@ -1590,9 +1654,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
7359 + goto __direct;
7360 + if ((err = snd_pcm_oss_make_ready(substream)) < 0)
7361 + return err;
7362 ++ atomic_inc(&runtime->oss.rw_ref);
7363 ++ if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
7364 ++ atomic_dec(&runtime->oss.rw_ref);
7365 ++ return -ERESTARTSYS;
7366 ++ }
7367 + format = snd_pcm_oss_format_from(runtime->oss.format);
7368 + width = snd_pcm_format_physical_width(format);
7369 +- mutex_lock(&runtime->oss.params_lock);
7370 + if (runtime->oss.buffer_used > 0) {
7371 + #ifdef OSS_DEBUG
7372 + pcm_dbg(substream->pcm, "sync: buffer_used\n");
7373 +@@ -1602,10 +1670,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
7374 + runtime->oss.buffer + runtime->oss.buffer_used,
7375 + size);
7376 + err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
7377 +- if (err < 0) {
7378 +- mutex_unlock(&runtime->oss.params_lock);
7379 +- return err;
7380 +- }
7381 ++ if (err < 0)
7382 ++ goto unlock;
7383 + } else if (runtime->oss.period_ptr > 0) {
7384 + #ifdef OSS_DEBUG
7385 + pcm_dbg(substream->pcm, "sync: period_ptr\n");
7386 +@@ -1615,10 +1681,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
7387 + runtime->oss.buffer,
7388 + size * 8 / width);
7389 + err = snd_pcm_oss_sync1(substream, size);
7390 +- if (err < 0) {
7391 +- mutex_unlock(&runtime->oss.params_lock);
7392 +- return err;
7393 +- }
7394 ++ if (err < 0)
7395 ++ goto unlock;
7396 + }
7397 + /*
7398 + * The ALSA's period might be a bit large than OSS one.
7399 +@@ -1632,7 +1696,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
7400 + else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
7401 + snd_pcm_lib_writev(substream, NULL, size);
7402 + }
7403 ++unlock:
7404 + mutex_unlock(&runtime->oss.params_lock);
7405 ++ atomic_dec(&runtime->oss.rw_ref);
7406 ++ if (err < 0)
7407 ++ return err;
7408 + /*
7409 + * finish sync: drain the buffer
7410 + */
7411 +@@ -1643,7 +1711,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
7412 + substream->f_flags = saved_f_flags;
7413 + if (err < 0)
7414 + return err;
7415 ++ mutex_lock(&runtime->oss.params_lock);
7416 + runtime->oss.prepare = 1;
7417 ++ mutex_unlock(&runtime->oss.params_lock);
7418 + }
7419 +
7420 + substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
7421 +@@ -1654,8 +1724,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
7422 + err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
7423 + if (err < 0)
7424 + return err;
7425 ++ mutex_lock(&runtime->oss.params_lock);
7426 + runtime->oss.buffer_used = 0;
7427 + runtime->oss.prepare = 1;
7428 ++ mutex_unlock(&runtime->oss.params_lock);
7429 + }
7430 + return 0;
7431 + }
7432 +@@ -1667,6 +1739,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
7433 + for (idx = 1; idx >= 0; --idx) {
7434 + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
7435 + struct snd_pcm_runtime *runtime;
7436 ++ int err;
7437 ++
7438 + if (substream == NULL)
7439 + continue;
7440 + runtime = substream->runtime;
7441 +@@ -1674,10 +1748,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
7442 + rate = 1000;
7443 + else if (rate > 192000)
7444 + rate = 192000;
7445 ++ err = lock_params(runtime);
7446 ++ if (err < 0)
7447 ++ return err;
7448 + if (runtime->oss.rate != rate) {
7449 + runtime->oss.params = 1;
7450 + runtime->oss.rate = rate;
7451 + }
7452 ++ unlock_params(runtime);
7453 + }
7454 + return snd_pcm_oss_get_rate(pcm_oss_file);
7455 + }
7456 +@@ -1702,13 +1780,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
7457 + for (idx = 1; idx >= 0; --idx) {
7458 + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
7459 + struct snd_pcm_runtime *runtime;
7460 ++ int err;
7461 ++
7462 + if (substream == NULL)
7463 + continue;
7464 + runtime = substream->runtime;
7465 ++ err = lock_params(runtime);
7466 ++ if (err < 0)
7467 ++ return err;
7468 + if (runtime->oss.channels != channels) {
7469 + runtime->oss.params = 1;
7470 + runtime->oss.channels = channels;
7471 + }
7472 ++ unlock_params(runtime);
7473 + }
7474 + return snd_pcm_oss_get_channels(pcm_oss_file);
7475 + }
7476 +@@ -1781,6 +1865,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
7477 + static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
7478 + {
7479 + int formats, idx;
7480 ++ int err;
7481 +
7482 + if (format != AFMT_QUERY) {
7483 + formats = snd_pcm_oss_get_formats(pcm_oss_file);
7484 +@@ -1794,10 +1879,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
7485 + if (substream == NULL)
7486 + continue;
7487 + runtime = substream->runtime;
7488 ++ err = lock_params(runtime);
7489 ++ if (err < 0)
7490 ++ return err;
7491 + if (runtime->oss.format != format) {
7492 + runtime->oss.params = 1;
7493 + runtime->oss.format = format;
7494 + }
7495 ++ unlock_params(runtime);
7496 + }
7497 + }
7498 + return snd_pcm_oss_get_format(pcm_oss_file);
7499 +@@ -1817,8 +1906,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
7500 + {
7501 + struct snd_pcm_runtime *runtime;
7502 +
7503 +- if (substream == NULL)
7504 +- return 0;
7505 + runtime = substream->runtime;
7506 + if (subdivide == 0) {
7507 + subdivide = runtime->oss.subdivision;
7508 +@@ -1842,9 +1929,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
7509 +
7510 + for (idx = 1; idx >= 0; --idx) {
7511 + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
7512 ++ struct snd_pcm_runtime *runtime;
7513 ++
7514 + if (substream == NULL)
7515 + continue;
7516 +- if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
7517 ++ runtime = substream->runtime;
7518 ++ err = lock_params(runtime);
7519 ++ if (err < 0)
7520 ++ return err;
7521 ++ err = snd_pcm_oss_set_subdivide1(substream, subdivide);
7522 ++ unlock_params(runtime);
7523 ++ if (err < 0)
7524 + return err;
7525 + }
7526 + return err;
7527 +@@ -1854,8 +1949,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
7528 + {
7529 + struct snd_pcm_runtime *runtime;
7530 +
7531 +- if (substream == NULL)
7532 +- return 0;
7533 + runtime = substream->runtime;
7534 + if (runtime->oss.subdivision || runtime->oss.fragshift)
7535 + return -EINVAL;
7536 +@@ -1875,9 +1968,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
7537 +
7538 + for (idx = 1; idx >= 0; --idx) {
7539 + struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
7540 ++ struct snd_pcm_runtime *runtime;
7541 ++
7542 + if (substream == NULL)
7543 + continue;
7544 +- if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
7545 ++ runtime = substream->runtime;
7546 ++ err = lock_params(runtime);
7547 ++ if (err < 0)
7548 ++ return err;
7549 ++ err = snd_pcm_oss_set_fragment1(substream, val);
7550 ++ unlock_params(runtime);
7551 ++ if (err < 0)
7552 + return err;
7553 + }
7554 + return err;
7555 +@@ -1961,6 +2062,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
7556 + }
7557 + if (psubstream) {
7558 + runtime = psubstream->runtime;
7559 ++ cmd = 0;
7560 ++ if (mutex_lock_interruptible(&runtime->oss.params_lock))
7561 ++ return -ERESTARTSYS;
7562 + if (trigger & PCM_ENABLE_OUTPUT) {
7563 + if (runtime->oss.trigger)
7564 + goto _skip1;
7565 +@@ -1978,13 +2082,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
7566 + cmd = SNDRV_PCM_IOCTL_DROP;
7567 + runtime->oss.prepare = 1;
7568 + }
7569 +- err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
7570 +- if (err < 0)
7571 +- return err;
7572 +- }
7573 + _skip1:
7574 ++ mutex_unlock(&runtime->oss.params_lock);
7575 ++ if (cmd) {
7576 ++ err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
7577 ++ if (err < 0)
7578 ++ return err;
7579 ++ }
7580 ++ }
7581 + if (csubstream) {
7582 + runtime = csubstream->runtime;
7583 ++ cmd = 0;
7584 ++ if (mutex_lock_interruptible(&runtime->oss.params_lock))
7585 ++ return -ERESTARTSYS;
7586 + if (trigger & PCM_ENABLE_INPUT) {
7587 + if (runtime->oss.trigger)
7588 + goto _skip2;
7589 +@@ -1999,11 +2109,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
7590 + cmd = SNDRV_PCM_IOCTL_DROP;
7591 + runtime->oss.prepare = 1;
7592 + }
7593 +- err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
7594 +- if (err < 0)
7595 +- return err;
7596 +- }
7597 + _skip2:
7598 ++ mutex_unlock(&runtime->oss.params_lock);
7599 ++ if (cmd) {
7600 ++ err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
7601 ++ if (err < 0)
7602 ++ return err;
7603 ++ }
7604 ++ }
7605 + return 0;
7606 + }
7607 +
7608 +@@ -2255,6 +2368,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
7609 + runtime->oss.maxfrags = 0;
7610 + runtime->oss.subdivision = 0;
7611 + substream->pcm_release = snd_pcm_oss_release_substream;
7612 ++ atomic_set(&runtime->oss.rw_ref, 0);
7613 + }
7614 +
7615 + static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
7616 +diff --git a/sound/core/pcm.c b/sound/core/pcm.c
7617 +index 09ee8c6b9f75..66ac89aad681 100644
7618 +--- a/sound/core/pcm.c
7619 ++++ b/sound/core/pcm.c
7620 +@@ -28,6 +28,7 @@
7621 + #include <sound/core.h>
7622 + #include <sound/minors.h>
7623 + #include <sound/pcm.h>
7624 ++#include <sound/timer.h>
7625 + #include <sound/control.h>
7626 + #include <sound/info.h>
7627 +
7628 +@@ -1054,8 +1055,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
7629 + snd_free_pages((void*)runtime->control,
7630 + PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
7631 + kfree(runtime->hw_constraints.rules);
7632 +- kfree(runtime);
7633 ++ /* Avoid concurrent access to runtime via PCM timer interface */
7634 ++ if (substream->timer)
7635 ++ spin_lock_irq(&substream->timer->lock);
7636 + substream->runtime = NULL;
7637 ++ if (substream->timer)
7638 ++ spin_unlock_irq(&substream->timer->lock);
7639 ++ kfree(runtime);
7640 + put_pid(substream->pid);
7641 + substream->pid = NULL;
7642 + substream->pstr->substream_opened--;
7643 +diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
7644 +index f69764d7cdd7..e30e30ba6e39 100644
7645 +--- a/sound/core/rawmidi_compat.c
7646 ++++ b/sound/core/rawmidi_compat.c
7647 +@@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
7648 + struct snd_rawmidi_params params;
7649 + unsigned int val;
7650 +
7651 +- if (rfile->output == NULL)
7652 +- return -EINVAL;
7653 + if (get_user(params.stream, &src->stream) ||
7654 + get_user(params.buffer_size, &src->buffer_size) ||
7655 + get_user(params.avail_min, &src->avail_min) ||
7656 +@@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
7657 + params.no_active_sensing = val;
7658 + switch (params.stream) {
7659 + case SNDRV_RAWMIDI_STREAM_OUTPUT:
7660 ++ if (!rfile->output)
7661 ++ return -EINVAL;
7662 + return snd_rawmidi_output_params(rfile->output, &params);
7663 + case SNDRV_RAWMIDI_STREAM_INPUT:
7664 ++ if (!rfile->input)
7665 ++ return -EINVAL;
7666 + return snd_rawmidi_input_params(rfile->input, &params);
7667 + }
7668 + return -EINVAL;
7669 +@@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
7670 + int err;
7671 + struct snd_rawmidi_status status;
7672 +
7673 +- if (rfile->output == NULL)
7674 +- return -EINVAL;
7675 + if (get_user(status.stream, &src->stream))
7676 + return -EFAULT;
7677 +
7678 + switch (status.stream) {
7679 + case SNDRV_RAWMIDI_STREAM_OUTPUT:
7680 ++ if (!rfile->output)
7681 ++ return -EINVAL;
7682 + err = snd_rawmidi_output_status(rfile->output, &status);
7683 + break;
7684 + case SNDRV_RAWMIDI_STREAM_INPUT:
7685 ++ if (!rfile->input)
7686 ++ return -EINVAL;
7687 + err = snd_rawmidi_input_status(rfile->input, &status);
7688 + break;
7689 + default:
7690 +@@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
7691 + int err;
7692 + struct snd_rawmidi_status status;
7693 +
7694 +- if (rfile->output == NULL)
7695 +- return -EINVAL;
7696 + if (get_user(status.stream, &src->stream))
7697 + return -EFAULT;
7698 +
7699 + switch (status.stream) {
7700 + case SNDRV_RAWMIDI_STREAM_OUTPUT:
7701 ++ if (!rfile->output)
7702 ++ return -EINVAL;
7703 + err = snd_rawmidi_output_status(rfile->output, &status);
7704 + break;
7705 + case SNDRV_RAWMIDI_STREAM_INPUT:
7706 ++ if (!rfile->input)
7707 ++ return -EINVAL;
7708 + err = snd_rawmidi_input_status(rfile->input, &status);
7709 + break;
7710 + default:
7711 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7712 +index c507c69029e3..738e1fe90312 100644
7713 +--- a/sound/pci/hda/hda_intel.c
7714 ++++ b/sound/pci/hda/hda_intel.c
7715 +@@ -1645,7 +1645,8 @@ static void azx_check_snoop_available(struct azx *chip)
7716 + */
7717 + u8 val;
7718 + pci_read_config_byte(chip->pci, 0x42, &val);
7719 +- if (!(val & 0x80) && chip->pci->revision == 0x30)
7720 ++ if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
7721 ++ chip->pci->revision == 0x20))
7722 + snoop = false;
7723 + }
7724 +
7725 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7726 +index aef1f52db7d9..fc77bf7a1544 100644
7727 +--- a/sound/pci/hda/patch_realtek.c
7728 ++++ b/sound/pci/hda/patch_realtek.c
7729 +@@ -6370,6 +6370,8 @@ static const struct hda_fixup alc269_fixups[] = {
7730 + { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
7731 + { }
7732 + },
7733 ++ .chained = true,
7734 ++ .chain_id = ALC269_FIXUP_HEADSET_MIC
7735 + },
7736 + };
7737 +
7738 +@@ -6573,6 +6575,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7739 + SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7740 + SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7741 + SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7742 ++ SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7743 + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7744 + SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
7745 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
7746 +diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
7747 +index 9b341c23f62b..5e80867d09ef 100644
7748 +--- a/sound/soc/codecs/ssm2602.c
7749 ++++ b/sound/soc/codecs/ssm2602.c
7750 +@@ -54,10 +54,17 @@ struct ssm2602_priv {
7751 + * using 2 wire for device control, so we cache them instead.
7752 + * There is no point in caching the reset register
7753 + */
7754 +-static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
7755 +- 0x0097, 0x0097, 0x0079, 0x0079,
7756 +- 0x000a, 0x0008, 0x009f, 0x000a,
7757 +- 0x0000, 0x0000
7758 ++static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
7759 ++ { .reg = 0x00, .def = 0x0097 },
7760 ++ { .reg = 0x01, .def = 0x0097 },
7761 ++ { .reg = 0x02, .def = 0x0079 },
7762 ++ { .reg = 0x03, .def = 0x0079 },
7763 ++ { .reg = 0x04, .def = 0x000a },
7764 ++ { .reg = 0x05, .def = 0x0008 },
7765 ++ { .reg = 0x06, .def = 0x009f },
7766 ++ { .reg = 0x07, .def = 0x000a },
7767 ++ { .reg = 0x08, .def = 0x0000 },
7768 ++ { .reg = 0x09, .def = 0x0000 }
7769 + };
7770 +
7771 +
7772 +@@ -620,8 +627,8 @@ const struct regmap_config ssm2602_regmap_config = {
7773 + .volatile_reg = ssm2602_register_volatile,
7774 +
7775 + .cache_type = REGCACHE_RBTREE,
7776 +- .reg_defaults_raw = ssm2602_reg,
7777 +- .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
7778 ++ .reg_defaults = ssm2602_reg,
7779 ++ .num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
7780 + };
7781 + EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
7782 +
7783 +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
7784 +index 01a50413c66f..782c580b7aa3 100644
7785 +--- a/sound/soc/soc-topology.c
7786 ++++ b/sound/soc/soc-topology.c
7787 +@@ -523,6 +523,7 @@ static void remove_widget(struct snd_soc_component *comp,
7788 + kfree(se->dobj.control.dtexts[j]);
7789 +
7790 + kfree(se);
7791 ++ kfree(w->kcontrol_news[i].name);
7792 + }
7793 + kfree(w->kcontrol_news);
7794 + } else {
7795 +@@ -540,6 +541,7 @@ static void remove_widget(struct snd_soc_component *comp,
7796 + */
7797 + kfree((void *)kcontrol->private_value);
7798 + snd_ctl_remove(card, kcontrol);
7799 ++ kfree(w->kcontrol_news[i].name);
7800 + }
7801 + kfree(w->kcontrol_news);
7802 + }
7803 +@@ -1233,7 +1235,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
7804 + dev_dbg(tplg->dev, " adding DAPM widget mixer control %s at %d\n",
7805 + mc->hdr.name, i);
7806 +
7807 +- kc[i].name = mc->hdr.name;
7808 ++ kc[i].name = kstrdup(mc->hdr.name, GFP_KERNEL);
7809 ++ if (kc[i].name == NULL)
7810 ++ goto err_str;
7811 + kc[i].private_value = (long)sm;
7812 + kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
7813 + kc[i].access = mc->hdr.access;
7814 +@@ -1278,8 +1282,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
7815 + err_str:
7816 + kfree(sm);
7817 + err:
7818 +- for (--i; i >= 0; i--)
7819 ++ for (--i; i >= 0; i--) {
7820 + kfree((void *)kc[i].private_value);
7821 ++ kfree(kc[i].name);
7822 ++ }
7823 + kfree(kc);
7824 + return NULL;
7825 + }
7826 +@@ -1310,7 +1316,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
7827 + dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
7828 + ec->hdr.name);
7829 +
7830 +- kc[i].name = ec->hdr.name;
7831 ++ kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL);
7832 ++ if (kc[i].name == NULL)
7833 ++ goto err_se;
7834 + kc[i].private_value = (long)se;
7835 + kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
7836 + kc[i].access = ec->hdr.access;
7837 +@@ -1386,6 +1394,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
7838 + kfree(se->dobj.control.dtexts[j]);
7839 +
7840 + kfree(se);
7841 ++ kfree(kc[i].name);
7842 + }
7843 + err:
7844 + kfree(kc);
7845 +@@ -1424,7 +1433,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
7846 + "ASoC: adding bytes kcontrol %s with access 0x%x\n",
7847 + be->hdr.name, be->hdr.access);
7848 +
7849 +- kc[i].name = be->hdr.name;
7850 ++ kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL);
7851 ++ if (kc[i].name == NULL)
7852 ++ goto err;
7853 + kc[i].private_value = (long)sbe;
7854 + kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
7855 + kc[i].access = be->hdr.access;
7856 +@@ -1454,8 +1465,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
7857 + return kc;
7858 +
7859 + err:
7860 +- for (--i; i >= 0; i--)
7861 ++ for (--i; i >= 0; i--) {
7862 + kfree((void *)kc[i].private_value);
7863 ++ kfree(kc[i].name);
7864 ++ }
7865 +
7866 + kfree(kc);
7867 + return NULL;
7868 +diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
7869 +index 6d7cde56a355..e2cf55c53ea8 100644
7870 +--- a/sound/usb/line6/midi.c
7871 ++++ b/sound/usb/line6/midi.c
7872 +@@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
7873 + }
7874 +
7875 + usb_fill_int_urb(urb, line6->usbdev,
7876 +- usb_sndbulkpipe(line6->usbdev,
7877 ++ usb_sndintpipe(line6->usbdev,
7878 + line6->properties->ep_ctrl_w),
7879 + transfer_buffer, length, midi_sent, line6,
7880 + line6->interval);
7881 +diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
7882 +index 465095355666..a8f07243aa9f 100644
7883 +--- a/virt/kvm/arm/vgic/vgic-its.c
7884 ++++ b/virt/kvm/arm/vgic/vgic-its.c
7885 +@@ -316,21 +316,24 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
7886 + struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
7887 + struct vgic_irq *irq;
7888 + u32 *intids;
7889 +- int irq_count = dist->lpi_list_count, i = 0;
7890 ++ int irq_count, i = 0;
7891 +
7892 + /*
7893 +- * We use the current value of the list length, which may change
7894 +- * after the kmalloc. We don't care, because the guest shouldn't
7895 +- * change anything while the command handling is still running,
7896 +- * and in the worst case we would miss a new IRQ, which one wouldn't
7897 +- * expect to be covered by this command anyway.
7898 ++ * There is an obvious race between allocating the array and LPIs
7899 ++ * being mapped/unmapped. If we ended up here as a result of a
7900 ++ * command, we're safe (locks are held, preventing another
7901 ++ * command). If coming from another path (such as enabling LPIs),
7902 ++ * we must be careful not to overrun the array.
7903 + */
7904 ++ irq_count = READ_ONCE(dist->lpi_list_count);
7905 + intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
7906 + if (!intids)
7907 + return -ENOMEM;
7908 +
7909 + spin_lock(&dist->lpi_list_lock);
7910 + list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
7911 ++ if (i == irq_count)
7912 ++ break;
7913 + /* We don't need to "get" the IRQ, as we hold the list lock. */
7914 + if (irq->target_vcpu != vcpu)
7915 + continue;