Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Mon, 16 Sep 2019 12:26:14
Message-Id: 1568636755.145454b6a808a552cf3e80041ce442cbae29d912.mpagano@gentoo
1 commit: 145454b6a808a552cf3e80041ce442cbae29d912
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 16 12:25:55 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 16 12:25:55 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=145454b6
7
8 Linux patch 4.19.73
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 8 +-
13 1072_linux-4.19.73.patch | 8877 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 8883 insertions(+), 2 deletions(-)
15
16 diff --git a/0000_README b/0000_README
17 index 5a202ee..d5d2e47 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -323,9 +323,13 @@ Patch: 1070_linux-4.19.70.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.70
23
24 -Patch: 1071_linux-4.19.71.patch
25 +Patch: 1071_linux-4.19.72.patch
26 From: https://www.kernel.org
27 -Desc: Linux 4.19.71
28 +Desc: Linux 4.19.72
29 +
30 +Patch: 1072_linux-4.19.73.patch
31 +From: https://www.kernel.org
32 +Desc: Linux 4.19.73
33
34 Patch: 1500_XATTR_USER_PREFIX.patch
35 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
36
37 diff --git a/1072_linux-4.19.73.patch b/1072_linux-4.19.73.patch
38 new file mode 100644
39 index 0000000..0364fc7
40 --- /dev/null
41 +++ b/1072_linux-4.19.73.patch
42 @@ -0,0 +1,8877 @@
43 +diff --git a/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
44 +new file mode 100644
45 +index 000000000000..a30d63db3c8f
46 +--- /dev/null
47 ++++ b/Documentation/devicetree/bindings/display/panel/armadeus,st0700-adapt.txt
48 +@@ -0,0 +1,9 @@
49 ++Armadeus ST0700 Adapt. A Santek ST0700I5Y-RBSLW 7.0" WVGA (800x480) TFT with
50 ++an adapter board.
51 ++
52 ++Required properties:
53 ++- compatible: "armadeus,st0700-adapt"
54 ++- power-supply: see panel-common.txt
55 ++
56 ++Optional properties:
57 ++- backlight: see panel-common.txt
58 +diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
59 +index 6c49db7f8ad2..e1fe02f3e3e9 100644
60 +--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
61 ++++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.txt
62 +@@ -11,11 +11,13 @@ New driver handles the following
63 +
64 + Required properties:
65 + - compatible: Must be "samsung,exynos-adc-v1"
66 +- for exynos4412/5250 and s5pv210 controllers.
67 ++ for Exynos5250 controllers.
68 + Must be "samsung,exynos-adc-v2" for
69 + future controllers.
70 + Must be "samsung,exynos3250-adc" for
71 + controllers compatible with ADC of Exynos3250.
72 ++ Must be "samsung,exynos4212-adc" for
73 ++ controllers compatible with ADC of Exynos4212 and Exynos4412.
74 + Must be "samsung,exynos7-adc" for
75 + the ADC in Exynos7 and compatibles
76 + Must be "samsung,s3c2410-adc" for
77 +@@ -28,6 +30,8 @@ Required properties:
78 + the ADC in s3c2443 and compatibles
79 + Must be "samsung,s3c6410-adc" for
80 + the ADC in s3c6410 and compatibles
81 ++ Must be "samsung,s5pv210-adc" for
82 ++ the ADC in s5pv210 and compatibles
83 + - reg: List of ADC register address range
84 + - The base address and range of ADC register
85 + - The base address and range of ADC_PHY register (every
86 +diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
87 +index f5a0923b34ca..c269dbe384fe 100644
88 +--- a/Documentation/devicetree/bindings/mmc/mmc.txt
89 ++++ b/Documentation/devicetree/bindings/mmc/mmc.txt
90 +@@ -62,6 +62,10 @@ Optional properties:
91 + be referred to mmc-pwrseq-simple.txt. But now it's reused as a tunable delay
92 + waiting for I/O signalling and card power supply to be stable, regardless of
93 + whether pwrseq-simple is used. Default to 10ms if no available.
94 ++- supports-cqe : The presence of this property indicates that the corresponding
95 ++ MMC host controller supports HW command queue feature.
96 ++- disable-cqe-dcmd: This property indicates that the MMC controller's command
97 ++ queue engine (CQE) does not support direct commands (DCMDs).
98 +
99 + *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
100 + polarity properties, we have to fix the meaning of the "normal" and "inverted"
101 +diff --git a/Makefile b/Makefile
102 +index ef80b1dfb753..9748fa3704bc 100644
103 +--- a/Makefile
104 ++++ b/Makefile
105 +@@ -1,7 +1,7 @@
106 + # SPDX-License-Identifier: GPL-2.0
107 + VERSION = 4
108 + PATCHLEVEL = 19
109 +-SUBLEVEL = 72
110 ++SUBLEVEL = 73
111 + EXTRAVERSION =
112 + NAME = "People's Front"
113 +
114 +diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
115 +index 5c6663321e87..215f515442e0 100644
116 +--- a/arch/arc/kernel/troubleshoot.c
117 ++++ b/arch/arc/kernel/troubleshoot.c
118 +@@ -179,6 +179,12 @@ void show_regs(struct pt_regs *regs)
119 + struct task_struct *tsk = current;
120 + struct callee_regs *cregs;
121 +
122 ++ /*
123 ++ * generic code calls us with preemption disabled, but some calls
124 ++ * here could sleep, so re-enable to avoid lockdep splat
125 ++ */
126 ++ preempt_enable();
127 ++
128 + print_task_path_n_nm(tsk);
129 + show_regs_print_info(KERN_INFO);
130 +
131 +@@ -221,6 +227,8 @@ void show_regs(struct pt_regs *regs)
132 + cregs = (struct callee_regs *)current->thread.callee_reg;
133 + if (cregs)
134 + show_callee_regs(cregs);
135 ++
136 ++ preempt_disable();
137 + }
138 +
139 + void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
140 +diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
141 +index db6913094be3..4e8143de32e7 100644
142 +--- a/arch/arc/mm/fault.c
143 ++++ b/arch/arc/mm/fault.c
144 +@@ -66,14 +66,12 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
145 + struct vm_area_struct *vma = NULL;
146 + struct task_struct *tsk = current;
147 + struct mm_struct *mm = tsk->mm;
148 +- siginfo_t info;
149 ++ int si_code = SEGV_MAPERR;
150 + int ret;
151 + vm_fault_t fault;
152 + int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
153 + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
154 +
155 +- clear_siginfo(&info);
156 +-
157 + /*
158 + * We fault-in kernel-space virtual memory on-demand. The
159 + * 'reference' page table is init_mm.pgd.
160 +@@ -83,16 +81,14 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
161 + * only copy the information from the master page table,
162 + * nothing more.
163 + */
164 +- if (address >= VMALLOC_START) {
165 ++ if (address >= VMALLOC_START && !user_mode(regs)) {
166 + ret = handle_kernel_vaddr_fault(address);
167 + if (unlikely(ret))
168 +- goto bad_area_nosemaphore;
169 ++ goto no_context;
170 + else
171 + return;
172 + }
173 +
174 +- info.si_code = SEGV_MAPERR;
175 +-
176 + /*
177 + * If we're in an interrupt or have no user
178 + * context, we must not take the fault..
179 +@@ -119,7 +115,7 @@ retry:
180 + * we can handle it..
181 + */
182 + good_area:
183 +- info.si_code = SEGV_ACCERR;
184 ++ si_code = SEGV_ACCERR;
185 +
186 + /* Handle protection violation, execute on heap or stack */
187 +
188 +@@ -143,12 +139,17 @@ good_area:
189 + */
190 + fault = handle_mm_fault(vma, address, flags);
191 +
192 +- /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
193 + if (unlikely(fatal_signal_pending(current))) {
194 +- if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
195 +- up_read(&mm->mmap_sem);
196 +- if (user_mode(regs))
197 ++
198 ++ /*
199 ++ * if fault retry, mmap_sem already relinquished by core mm
200 ++ * so OK to return to user mode (with signal handled first)
201 ++ */
202 ++ if (fault & VM_FAULT_RETRY) {
203 ++ if (!user_mode(regs))
204 ++ goto no_context;
205 + return;
206 ++ }
207 + }
208 +
209 + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
210 +@@ -195,15 +196,10 @@ good_area:
211 + bad_area:
212 + up_read(&mm->mmap_sem);
213 +
214 +-bad_area_nosemaphore:
215 + /* User mode accesses just cause a SIGSEGV */
216 + if (user_mode(regs)) {
217 + tsk->thread.fault_address = address;
218 +- info.si_signo = SIGSEGV;
219 +- info.si_errno = 0;
220 +- /* info.si_code has been set above */
221 +- info.si_addr = (void __user *)address;
222 +- force_sig_info(SIGSEGV, &info, tsk);
223 ++ force_sig_fault(SIGSEGV, si_code, (void __user *)address, tsk);
224 + return;
225 + }
226 +
227 +@@ -238,9 +234,5 @@ do_sigbus:
228 + goto no_context;
229 +
230 + tsk->thread.fault_address = address;
231 +- info.si_signo = SIGBUS;
232 +- info.si_errno = 0;
233 +- info.si_code = BUS_ADRERR;
234 +- info.si_addr = (void __user *)address;
235 +- force_sig_info(SIGBUS, &info, tsk);
236 ++ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, tsk);
237 + }
238 +diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
239 +index 502a361d1fe9..15d6157b661d 100644
240 +--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
241 ++++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
242 +@@ -65,7 +65,7 @@
243 + gpio-miso = <&gpio1 8 GPIO_ACTIVE_HIGH>;
244 + gpio-mosi = <&gpio1 7 GPIO_ACTIVE_HIGH>;
245 + /* Collides with pflash CE1, not so cool */
246 +- cs-gpios = <&gpio0 20 GPIO_ACTIVE_HIGH>;
247 ++ cs-gpios = <&gpio0 20 GPIO_ACTIVE_LOW>;
248 + num-chipselects = <1>;
249 +
250 + panel: display@0 {
251 +diff --git a/arch/arm/boot/dts/qcom-ipq4019.dtsi b/arch/arm/boot/dts/qcom-ipq4019.dtsi
252 +index 78db67337ed4..54d056b01bb5 100644
253 +--- a/arch/arm/boot/dts/qcom-ipq4019.dtsi
254 ++++ b/arch/arm/boot/dts/qcom-ipq4019.dtsi
255 +@@ -386,10 +386,10 @@
256 + #address-cells = <3>;
257 + #size-cells = <2>;
258 +
259 +- ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000
260 +- 0x82000000 0 0x48000000 0x48000000 0 0x10000000>;
261 ++ ranges = <0x81000000 0 0x40200000 0x40200000 0 0x00100000>,
262 ++ <0x82000000 0 0x40300000 0x40300000 0 0x00d00000>;
263 +
264 +- interrupts = <GIC_SPI 141 IRQ_TYPE_EDGE_RISING>;
265 ++ interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
266 + interrupt-names = "msi";
267 + #interrupt-cells = <1>;
268 + interrupt-map-mask = <0 0 0 0x7>;
269 +diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
270 +index 3c42bf9fa061..708931b47090 100644
271 +--- a/arch/arm/mach-davinci/devices-da8xx.c
272 ++++ b/arch/arm/mach-davinci/devices-da8xx.c
273 +@@ -704,6 +704,46 @@ static struct resource da8xx_gpio_resources[] = {
274 + },
275 + { /* interrupt */
276 + .start = IRQ_DA8XX_GPIO0,
277 ++ .end = IRQ_DA8XX_GPIO0,
278 ++ .flags = IORESOURCE_IRQ,
279 ++ },
280 ++ {
281 ++ .start = IRQ_DA8XX_GPIO1,
282 ++ .end = IRQ_DA8XX_GPIO1,
283 ++ .flags = IORESOURCE_IRQ,
284 ++ },
285 ++ {
286 ++ .start = IRQ_DA8XX_GPIO2,
287 ++ .end = IRQ_DA8XX_GPIO2,
288 ++ .flags = IORESOURCE_IRQ,
289 ++ },
290 ++ {
291 ++ .start = IRQ_DA8XX_GPIO3,
292 ++ .end = IRQ_DA8XX_GPIO3,
293 ++ .flags = IORESOURCE_IRQ,
294 ++ },
295 ++ {
296 ++ .start = IRQ_DA8XX_GPIO4,
297 ++ .end = IRQ_DA8XX_GPIO4,
298 ++ .flags = IORESOURCE_IRQ,
299 ++ },
300 ++ {
301 ++ .start = IRQ_DA8XX_GPIO5,
302 ++ .end = IRQ_DA8XX_GPIO5,
303 ++ .flags = IORESOURCE_IRQ,
304 ++ },
305 ++ {
306 ++ .start = IRQ_DA8XX_GPIO6,
307 ++ .end = IRQ_DA8XX_GPIO6,
308 ++ .flags = IORESOURCE_IRQ,
309 ++ },
310 ++ {
311 ++ .start = IRQ_DA8XX_GPIO7,
312 ++ .end = IRQ_DA8XX_GPIO7,
313 ++ .flags = IORESOURCE_IRQ,
314 ++ },
315 ++ {
316 ++ .start = IRQ_DA8XX_GPIO8,
317 + .end = IRQ_DA8XX_GPIO8,
318 + .flags = IORESOURCE_IRQ,
319 + },
320 +diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
321 +index 9f7d38d12c88..2b0f5d97ab7c 100644
322 +--- a/arch/arm/mach-davinci/dm355.c
323 ++++ b/arch/arm/mach-davinci/dm355.c
324 +@@ -548,6 +548,36 @@ static struct resource dm355_gpio_resources[] = {
325 + },
326 + { /* interrupt */
327 + .start = IRQ_DM355_GPIOBNK0,
328 ++ .end = IRQ_DM355_GPIOBNK0,
329 ++ .flags = IORESOURCE_IRQ,
330 ++ },
331 ++ {
332 ++ .start = IRQ_DM355_GPIOBNK1,
333 ++ .end = IRQ_DM355_GPIOBNK1,
334 ++ .flags = IORESOURCE_IRQ,
335 ++ },
336 ++ {
337 ++ .start = IRQ_DM355_GPIOBNK2,
338 ++ .end = IRQ_DM355_GPIOBNK2,
339 ++ .flags = IORESOURCE_IRQ,
340 ++ },
341 ++ {
342 ++ .start = IRQ_DM355_GPIOBNK3,
343 ++ .end = IRQ_DM355_GPIOBNK3,
344 ++ .flags = IORESOURCE_IRQ,
345 ++ },
346 ++ {
347 ++ .start = IRQ_DM355_GPIOBNK4,
348 ++ .end = IRQ_DM355_GPIOBNK4,
349 ++ .flags = IORESOURCE_IRQ,
350 ++ },
351 ++ {
352 ++ .start = IRQ_DM355_GPIOBNK5,
353 ++ .end = IRQ_DM355_GPIOBNK5,
354 ++ .flags = IORESOURCE_IRQ,
355 ++ },
356 ++ {
357 ++ .start = IRQ_DM355_GPIOBNK6,
358 + .end = IRQ_DM355_GPIOBNK6,
359 + .flags = IORESOURCE_IRQ,
360 + },
361 +diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
362 +index abcf2a5ed89b..42665914166a 100644
363 +--- a/arch/arm/mach-davinci/dm365.c
364 ++++ b/arch/arm/mach-davinci/dm365.c
365 +@@ -267,6 +267,41 @@ static struct resource dm365_gpio_resources[] = {
366 + },
367 + { /* interrupt */
368 + .start = IRQ_DM365_GPIO0,
369 ++ .end = IRQ_DM365_GPIO0,
370 ++ .flags = IORESOURCE_IRQ,
371 ++ },
372 ++ {
373 ++ .start = IRQ_DM365_GPIO1,
374 ++ .end = IRQ_DM365_GPIO1,
375 ++ .flags = IORESOURCE_IRQ,
376 ++ },
377 ++ {
378 ++ .start = IRQ_DM365_GPIO2,
379 ++ .end = IRQ_DM365_GPIO2,
380 ++ .flags = IORESOURCE_IRQ,
381 ++ },
382 ++ {
383 ++ .start = IRQ_DM365_GPIO3,
384 ++ .end = IRQ_DM365_GPIO3,
385 ++ .flags = IORESOURCE_IRQ,
386 ++ },
387 ++ {
388 ++ .start = IRQ_DM365_GPIO4,
389 ++ .end = IRQ_DM365_GPIO4,
390 ++ .flags = IORESOURCE_IRQ,
391 ++ },
392 ++ {
393 ++ .start = IRQ_DM365_GPIO5,
394 ++ .end = IRQ_DM365_GPIO5,
395 ++ .flags = IORESOURCE_IRQ,
396 ++ },
397 ++ {
398 ++ .start = IRQ_DM365_GPIO6,
399 ++ .end = IRQ_DM365_GPIO6,
400 ++ .flags = IORESOURCE_IRQ,
401 ++ },
402 ++ {
403 ++ .start = IRQ_DM365_GPIO7,
404 + .end = IRQ_DM365_GPIO7,
405 + .flags = IORESOURCE_IRQ,
406 + },
407 +diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
408 +index 0720da7809a6..de1ec6dc01e9 100644
409 +--- a/arch/arm/mach-davinci/dm644x.c
410 ++++ b/arch/arm/mach-davinci/dm644x.c
411 +@@ -492,6 +492,26 @@ static struct resource dm644_gpio_resources[] = {
412 + },
413 + { /* interrupt */
414 + .start = IRQ_GPIOBNK0,
415 ++ .end = IRQ_GPIOBNK0,
416 ++ .flags = IORESOURCE_IRQ,
417 ++ },
418 ++ {
419 ++ .start = IRQ_GPIOBNK1,
420 ++ .end = IRQ_GPIOBNK1,
421 ++ .flags = IORESOURCE_IRQ,
422 ++ },
423 ++ {
424 ++ .start = IRQ_GPIOBNK2,
425 ++ .end = IRQ_GPIOBNK2,
426 ++ .flags = IORESOURCE_IRQ,
427 ++ },
428 ++ {
429 ++ .start = IRQ_GPIOBNK3,
430 ++ .end = IRQ_GPIOBNK3,
431 ++ .flags = IORESOURCE_IRQ,
432 ++ },
433 ++ {
434 ++ .start = IRQ_GPIOBNK4,
435 + .end = IRQ_GPIOBNK4,
436 + .flags = IORESOURCE_IRQ,
437 + },
438 +diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
439 +index 6bd2ed069d0d..d9b93e2806d2 100644
440 +--- a/arch/arm/mach-davinci/dm646x.c
441 ++++ b/arch/arm/mach-davinci/dm646x.c
442 +@@ -442,6 +442,16 @@ static struct resource dm646x_gpio_resources[] = {
443 + },
444 + { /* interrupt */
445 + .start = IRQ_DM646X_GPIOBNK0,
446 ++ .end = IRQ_DM646X_GPIOBNK0,
447 ++ .flags = IORESOURCE_IRQ,
448 ++ },
449 ++ {
450 ++ .start = IRQ_DM646X_GPIOBNK1,
451 ++ .end = IRQ_DM646X_GPIOBNK1,
452 ++ .flags = IORESOURCE_IRQ,
453 ++ },
454 ++ {
455 ++ .start = IRQ_DM646X_GPIOBNK2,
456 + .end = IRQ_DM646X_GPIOBNK2,
457 + .flags = IORESOURCE_IRQ,
458 + },
459 +diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
460 +index 5089aa64088f..9a1ea8a46405 100644
461 +--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
462 ++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
463 +@@ -140,6 +140,7 @@
464 + tx-fifo-depth = <16384>;
465 + rx-fifo-depth = <16384>;
466 + snps,multicast-filter-bins = <256>;
467 ++ altr,sysmgr-syscon = <&sysmgr 0x44 0>;
468 + status = "disabled";
469 + };
470 +
471 +@@ -156,6 +157,7 @@
472 + tx-fifo-depth = <16384>;
473 + rx-fifo-depth = <16384>;
474 + snps,multicast-filter-bins = <256>;
475 ++ altr,sysmgr-syscon = <&sysmgr 0x48 0>;
476 + status = "disabled";
477 + };
478 +
479 +@@ -172,6 +174,7 @@
480 + tx-fifo-depth = <16384>;
481 + rx-fifo-depth = <16384>;
482 + snps,multicast-filter-bins = <256>;
483 ++ altr,sysmgr-syscon = <&sysmgr 0x4c 0>;
484 + status = "disabled";
485 + };
486 +
487 +diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
488 +index c142169a58fc..e9147e35b739 100644
489 +--- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
490 ++++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts
491 +@@ -40,6 +40,7 @@
492 + pinctrl-0 = <&usb30_host_drv>;
493 + regulator-name = "vcc_host_5v";
494 + regulator-always-on;
495 ++ regulator-boot-on;
496 + vin-supply = <&vcc_sys>;
497 + };
498 +
499 +@@ -50,6 +51,7 @@
500 + pinctrl-0 = <&usb20_host_drv>;
501 + regulator-name = "vcc_host1_5v";
502 + regulator-always-on;
503 ++ regulator-boot-on;
504 + vin-supply = <&vcc_sys>;
505 + };
506 +
507 +diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
508 +index 83a9aa3cf689..dd18d8174504 100644
509 +--- a/arch/powerpc/include/asm/kvm_book3s.h
510 ++++ b/arch/powerpc/include/asm/kvm_book3s.h
511 +@@ -301,12 +301,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
512 +
513 + static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
514 + {
515 +- vcpu->arch.cr = val;
516 ++ vcpu->arch.regs.ccr = val;
517 + }
518 +
519 + static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
520 + {
521 +- return vcpu->arch.cr;
522 ++ return vcpu->arch.regs.ccr;
523 + }
524 +
525 + static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
526 +diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
527 +index dc435a5af7d6..14fa07c73f44 100644
528 +--- a/arch/powerpc/include/asm/kvm_book3s_64.h
529 ++++ b/arch/powerpc/include/asm/kvm_book3s_64.h
530 +@@ -482,7 +482,7 @@ static inline u64 sanitize_msr(u64 msr)
531 + #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
532 + static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
533 + {
534 +- vcpu->arch.cr = vcpu->arch.cr_tm;
535 ++ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
536 + vcpu->arch.regs.xer = vcpu->arch.xer_tm;
537 + vcpu->arch.regs.link = vcpu->arch.lr_tm;
538 + vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
539 +@@ -499,7 +499,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
540 +
541 + static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
542 + {
543 +- vcpu->arch.cr_tm = vcpu->arch.cr;
544 ++ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
545 + vcpu->arch.xer_tm = vcpu->arch.regs.xer;
546 + vcpu->arch.lr_tm = vcpu->arch.regs.link;
547 + vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
548 +diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
549 +index d513e3ed1c65..f0cef625f17c 100644
550 +--- a/arch/powerpc/include/asm/kvm_booke.h
551 ++++ b/arch/powerpc/include/asm/kvm_booke.h
552 +@@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
553 +
554 + static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
555 + {
556 +- vcpu->arch.cr = val;
557 ++ vcpu->arch.regs.ccr = val;
558 + }
559 +
560 + static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
561 + {
562 +- return vcpu->arch.cr;
563 ++ return vcpu->arch.regs.ccr;
564 + }
565 +
566 + static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
567 +diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
568 +index 2b6049e83970..2f95e38f0549 100644
569 +--- a/arch/powerpc/include/asm/kvm_host.h
570 ++++ b/arch/powerpc/include/asm/kvm_host.h
571 +@@ -538,8 +538,6 @@ struct kvm_vcpu_arch {
572 + ulong tar;
573 + #endif
574 +
575 +- u32 cr;
576 +-
577 + #ifdef CONFIG_PPC_BOOK3S
578 + ulong hflags;
579 + ulong guest_owned_ext;
580 +diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
581 +index b694d6af1150..ae953958c0f3 100644
582 +--- a/arch/powerpc/include/asm/mmu_context.h
583 ++++ b/arch/powerpc/include/asm/mmu_context.h
584 +@@ -217,12 +217,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
585 + #endif
586 + }
587 +
588 +-static inline int arch_dup_mmap(struct mm_struct *oldmm,
589 +- struct mm_struct *mm)
590 +-{
591 +- return 0;
592 +-}
593 +-
594 + #ifndef CONFIG_PPC_BOOK3S_64
595 + static inline void arch_exit_mmap(struct mm_struct *mm)
596 + {
597 +@@ -247,6 +241,7 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
598 + #ifdef CONFIG_PPC_MEM_KEYS
599 + bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
600 + bool execute, bool foreign);
601 ++void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm);
602 + #else /* CONFIG_PPC_MEM_KEYS */
603 + static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
604 + bool write, bool execute, bool foreign)
605 +@@ -259,6 +254,7 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
606 + #define thread_pkey_regs_save(thread)
607 + #define thread_pkey_regs_restore(new_thread, old_thread)
608 + #define thread_pkey_regs_init(thread)
609 ++#define arch_dup_pkeys(oldmm, mm)
610 +
611 + static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
612 + {
613 +@@ -267,5 +263,12 @@ static inline u64 pte_to_hpte_pkey_bits(u64 pteflags)
614 +
615 + #endif /* CONFIG_PPC_MEM_KEYS */
616 +
617 ++static inline int arch_dup_mmap(struct mm_struct *oldmm,
618 ++ struct mm_struct *mm)
619 ++{
620 ++ arch_dup_pkeys(oldmm, mm);
621 ++ return 0;
622 ++}
623 ++
624 + #endif /* __KERNEL__ */
625 + #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
626 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
627 +index e5b314ed054e..640a4d818772 100644
628 +--- a/arch/powerpc/include/asm/reg.h
629 ++++ b/arch/powerpc/include/asm/reg.h
630 +@@ -118,11 +118,16 @@
631 + #define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
632 + #define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
633 + #define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
634 +-#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
635 + #define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
636 + #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
637 + #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
638 +
639 ++#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
640 ++#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
641 ++#else
642 ++#define MSR_TM_ACTIVE(x) 0
643 ++#endif
644 ++
645 + #if defined(CONFIG_PPC_BOOK3S_64)
646 + #define MSR_64BIT MSR_SF
647 +
648 +diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
649 +index 89cf15566c4e..7c3738d890e8 100644
650 +--- a/arch/powerpc/kernel/asm-offsets.c
651 ++++ b/arch/powerpc/kernel/asm-offsets.c
652 +@@ -438,7 +438,7 @@ int main(void)
653 + #ifdef CONFIG_PPC_BOOK3S
654 + OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
655 + #endif
656 +- OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
657 ++ OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
658 + OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
659 + #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
660 + OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
661 +@@ -695,7 +695,7 @@ int main(void)
662 + #endif /* CONFIG_PPC_BOOK3S_64 */
663 +
664 + #else /* CONFIG_PPC_BOOK3S */
665 +- OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
666 ++ OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
667 + OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
668 + OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
669 + OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
670 +diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
671 +index 9168a247e24f..3fb564f3e887 100644
672 +--- a/arch/powerpc/kernel/head_64.S
673 ++++ b/arch/powerpc/kernel/head_64.S
674 +@@ -906,6 +906,7 @@ p_toc: .8byte __toc_start + 0x8000 - 0b
675 + /*
676 + * This is where the main kernel code starts.
677 + */
678 ++__REF
679 + start_here_multiplatform:
680 + /* set up the TOC */
681 + bl relative_toc
682 +@@ -981,6 +982,7 @@ start_here_multiplatform:
683 + RFI
684 + b . /* prevent speculative execution */
685 +
686 ++ .previous
687 + /* This is where all platforms converge execution */
688 +
689 + start_here_common:
690 +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
691 +index d29f2dca725b..909c9407e392 100644
692 +--- a/arch/powerpc/kernel/process.c
693 ++++ b/arch/powerpc/kernel/process.c
694 +@@ -102,27 +102,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
695 + }
696 + }
697 +
698 +-static inline bool msr_tm_active(unsigned long msr)
699 +-{
700 +- return MSR_TM_ACTIVE(msr);
701 +-}
702 +-
703 +-static bool tm_active_with_fp(struct task_struct *tsk)
704 +-{
705 +- return msr_tm_active(tsk->thread.regs->msr) &&
706 +- (tsk->thread.ckpt_regs.msr & MSR_FP);
707 +-}
708 +-
709 +-static bool tm_active_with_altivec(struct task_struct *tsk)
710 +-{
711 +- return msr_tm_active(tsk->thread.regs->msr) &&
712 +- (tsk->thread.ckpt_regs.msr & MSR_VEC);
713 +-}
714 + #else
715 +-static inline bool msr_tm_active(unsigned long msr) { return false; }
716 + static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
717 +-static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
718 +-static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
719 + #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
720 +
721 + bool strict_msr_control;
722 +@@ -247,7 +228,8 @@ void enable_kernel_fp(void)
723 + * giveup as this would save to the 'live' structure not the
724 + * checkpointed structure.
725 + */
726 +- if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
727 ++ if (!MSR_TM_ACTIVE(cpumsr) &&
728 ++ MSR_TM_ACTIVE(current->thread.regs->msr))
729 + return;
730 + __giveup_fpu(current);
731 + }
732 +@@ -256,7 +238,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
733 +
734 + static int restore_fp(struct task_struct *tsk)
735 + {
736 +- if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
737 ++ if (tsk->thread.load_fp) {
738 + load_fp_state(&current->thread.fp_state);
739 + current->thread.load_fp++;
740 + return 1;
741 +@@ -311,7 +293,8 @@ void enable_kernel_altivec(void)
742 + * giveup as this would save to the 'live' structure not the
743 + * checkpointed structure.
744 + */
745 +- if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
746 ++ if (!MSR_TM_ACTIVE(cpumsr) &&
747 ++ MSR_TM_ACTIVE(current->thread.regs->msr))
748 + return;
749 + __giveup_altivec(current);
750 + }
751 +@@ -337,8 +320,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
752 +
753 + static int restore_altivec(struct task_struct *tsk)
754 + {
755 +- if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
756 +- (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
757 ++ if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
758 + load_vr_state(&tsk->thread.vr_state);
759 + tsk->thread.used_vr = 1;
760 + tsk->thread.load_vec++;
761 +@@ -397,7 +379,8 @@ void enable_kernel_vsx(void)
762 + * giveup as this would save to the 'live' structure not the
763 + * checkpointed structure.
764 + */
765 +- if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
766 ++ if (!MSR_TM_ACTIVE(cpumsr) &&
767 ++ MSR_TM_ACTIVE(current->thread.regs->msr))
768 + return;
769 + __giveup_vsx(current);
770 + }
771 +@@ -499,13 +482,14 @@ void giveup_all(struct task_struct *tsk)
772 + if (!tsk->thread.regs)
773 + return;
774 +
775 ++ check_if_tm_restore_required(tsk);
776 ++
777 + usermsr = tsk->thread.regs->msr;
778 +
779 + if ((usermsr & msr_all_available) == 0)
780 + return;
781 +
782 + msr_check_and_set(msr_all_available);
783 +- check_if_tm_restore_required(tsk);
784 +
785 + WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
786 +
787 +@@ -530,7 +514,7 @@ void restore_math(struct pt_regs *regs)
788 + {
789 + unsigned long msr;
790 +
791 +- if (!msr_tm_active(regs->msr) &&
792 ++ if (!MSR_TM_ACTIVE(regs->msr) &&
793 + !current->thread.load_fp && !loadvec(current->thread))
794 + return;
795 +
796 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
797 +index 68e14afecac8..a488c105b923 100644
798 +--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
799 ++++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
800 +@@ -744,12 +744,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
801 + srcu_idx = srcu_read_lock(&kvm->srcu);
802 + slots = kvm_memslots(kvm);
803 + kvm_for_each_memslot(memslot, slots) {
804 ++ /* Mutual exclusion with kvm_unmap_hva_range etc. */
805 ++ spin_lock(&kvm->mmu_lock);
806 + /*
807 + * This assumes it is acceptable to lose reference and
808 + * change bits across a reset.
809 + */
810 + memset(memslot->arch.rmap, 0,
811 + memslot->npages * sizeof(*memslot->arch.rmap));
812 ++ spin_unlock(&kvm->mmu_lock);
813 + }
814 + srcu_read_unlock(&kvm->srcu, srcu_idx);
815 + }
816 +diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
817 +index 36b11c5a0dbb..2654df220d05 100644
818 +--- a/arch/powerpc/kvm/book3s_emulate.c
819 ++++ b/arch/powerpc/kvm/book3s_emulate.c
820 +@@ -110,7 +110,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
821 + vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
822 + vcpu->arch.tar_tm = vcpu->arch.tar;
823 + vcpu->arch.lr_tm = vcpu->arch.regs.link;
824 +- vcpu->arch.cr_tm = vcpu->arch.cr;
825 ++ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
826 + vcpu->arch.xer_tm = vcpu->arch.regs.xer;
827 + vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
828 + }
829 +@@ -129,7 +129,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
830 + vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
831 + vcpu->arch.tar = vcpu->arch.tar_tm;
832 + vcpu->arch.regs.link = vcpu->arch.lr_tm;
833 +- vcpu->arch.cr = vcpu->arch.cr_tm;
834 ++ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
835 + vcpu->arch.regs.xer = vcpu->arch.xer_tm;
836 + vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
837 + }
838 +@@ -141,7 +141,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
839 + uint64_t texasr;
840 +
841 + /* CR0 = 0 | MSR[TS] | 0 */
842 +- vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
843 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
844 + (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
845 + << CR0_SHIFT);
846 +
847 +@@ -220,7 +220,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
848 + tm_abort(ra_val);
849 +
850 + /* CR0 = 0 | MSR[TS] | 0 */
851 +- vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
852 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
853 + (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
854 + << CR0_SHIFT);
855 +
856 +@@ -494,8 +494,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
857 +
858 + if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
859 + preempt_disable();
860 +- vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
861 +- (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
862 ++ vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
863 ++ (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
864 +
865 + vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
866 + (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
867 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
868 +index 083dcedba11c..05b32cc12e41 100644
869 +--- a/arch/powerpc/kvm/book3s_hv.c
870 ++++ b/arch/powerpc/kvm/book3s_hv.c
871 +@@ -410,8 +410,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
872 + vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
873 + pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
874 + vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
875 +- pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
876 +- vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
877 ++ pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
878 ++ vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
879 + pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
880 + pr_err("fault dar = %.16lx dsisr = %.8x\n",
881 + vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
882 +@@ -3813,12 +3813,15 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
883 + /* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
884 + int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
885 + {
886 ++ kvmppc_rmap_reset(kvm);
887 ++ kvm->arch.process_table = 0;
888 ++ /* Mutual exclusion with kvm_unmap_hva_range etc. */
889 ++ spin_lock(&kvm->mmu_lock);
890 ++ kvm->arch.radix = 0;
891 ++ spin_unlock(&kvm->mmu_lock);
892 + kvmppc_free_radix(kvm);
893 + kvmppc_update_lpcr(kvm, LPCR_VPM1,
894 + LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
895 +- kvmppc_rmap_reset(kvm);
896 +- kvm->arch.radix = 0;
897 +- kvm->arch.process_table = 0;
898 + return 0;
899 + }
900 +
901 +@@ -3831,10 +3834,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
902 + if (err)
903 + return err;
904 +
905 ++ kvmppc_rmap_reset(kvm);
906 ++ /* Mutual exclusion with kvm_unmap_hva_range etc. */
907 ++ spin_lock(&kvm->mmu_lock);
908 ++ kvm->arch.radix = 1;
909 ++ spin_unlock(&kvm->mmu_lock);
910 + kvmppc_free_hpt(&kvm->arch.hpt);
911 + kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
912 + LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
913 +- kvm->arch.radix = 1;
914 + return 0;
915 + }
916 +
917 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
918 +index 1d14046124a0..68c7591f2b5f 100644
919 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
920 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
921 +@@ -56,6 +56,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
922 + #define STACK_SLOT_DAWR (SFS-56)
923 + #define STACK_SLOT_DAWRX (SFS-64)
924 + #define STACK_SLOT_HFSCR (SFS-72)
925 ++#define STACK_SLOT_AMR (SFS-80)
926 ++#define STACK_SLOT_UAMOR (SFS-88)
927 +
928 + /*
929 + * Call kvmppc_hv_entry in real mode.
930 +@@ -760,11 +762,9 @@ BEGIN_FTR_SECTION
931 + mfspr r5, SPRN_TIDR
932 + mfspr r6, SPRN_PSSCR
933 + mfspr r7, SPRN_PID
934 +- mfspr r8, SPRN_IAMR
935 + std r5, STACK_SLOT_TID(r1)
936 + std r6, STACK_SLOT_PSSCR(r1)
937 + std r7, STACK_SLOT_PID(r1)
938 +- std r8, STACK_SLOT_IAMR(r1)
939 + mfspr r5, SPRN_HFSCR
940 + std r5, STACK_SLOT_HFSCR(r1)
941 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
942 +@@ -772,11 +772,18 @@ BEGIN_FTR_SECTION
943 + mfspr r5, SPRN_CIABR
944 + mfspr r6, SPRN_DAWR
945 + mfspr r7, SPRN_DAWRX
946 ++ mfspr r8, SPRN_IAMR
947 + std r5, STACK_SLOT_CIABR(r1)
948 + std r6, STACK_SLOT_DAWR(r1)
949 + std r7, STACK_SLOT_DAWRX(r1)
950 ++ std r8, STACK_SLOT_IAMR(r1)
951 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
952 +
953 ++ mfspr r5, SPRN_AMR
954 ++ std r5, STACK_SLOT_AMR(r1)
955 ++ mfspr r6, SPRN_UAMOR
956 ++ std r6, STACK_SLOT_UAMOR(r1)
957 ++
958 + BEGIN_FTR_SECTION
959 + /* Set partition DABR */
960 + /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
961 +@@ -1202,7 +1209,7 @@ BEGIN_FTR_SECTION
962 + END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
963 +
964 + ld r5, VCPU_LR(r4)
965 +- lwz r6, VCPU_CR(r4)
966 ++ ld r6, VCPU_CR(r4)
967 + mtlr r5
968 + mtcr r6
969 +
970 +@@ -1313,7 +1320,7 @@ kvmppc_interrupt_hv:
971 + std r3, VCPU_GPR(R12)(r9)
972 + /* CR is in the high half of r12 */
973 + srdi r4, r12, 32
974 +- stw r4, VCPU_CR(r9)
975 ++ std r4, VCPU_CR(r9)
976 + BEGIN_FTR_SECTION
977 + ld r3, HSTATE_CFAR(r13)
978 + std r3, VCPU_CFAR(r9)
979 +@@ -1713,22 +1720,25 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
980 + mtspr SPRN_PSPB, r0
981 + mtspr SPRN_WORT, r0
982 + BEGIN_FTR_SECTION
983 +- mtspr SPRN_IAMR, r0
984 + mtspr SPRN_TCSCR, r0
985 + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
986 + li r0, 1
987 + sldi r0, r0, 31
988 + mtspr SPRN_MMCRS, r0
989 + END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
990 +-8:
991 +
992 +- /* Save and reset AMR and UAMOR before turning on the MMU */
993 ++ /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
994 ++ ld r8, STACK_SLOT_IAMR(r1)
995 ++ mtspr SPRN_IAMR, r8
996 ++
997 ++8: /* Power7 jumps back in here */
998 + mfspr r5,SPRN_AMR
999 + mfspr r6,SPRN_UAMOR
1000 + std r5,VCPU_AMR(r9)
1001 + std r6,VCPU_UAMOR(r9)
1002 +- li r6,0
1003 +- mtspr SPRN_AMR,r6
1004 ++ ld r5,STACK_SLOT_AMR(r1)
1005 ++ ld r6,STACK_SLOT_UAMOR(r1)
1006 ++ mtspr SPRN_AMR, r5
1007 + mtspr SPRN_UAMOR, r6
1008 +
1009 + /* Switch DSCR back to host value */
1010 +@@ -1897,11 +1907,9 @@ BEGIN_FTR_SECTION
1011 + ld r5, STACK_SLOT_TID(r1)
1012 + ld r6, STACK_SLOT_PSSCR(r1)
1013 + ld r7, STACK_SLOT_PID(r1)
1014 +- ld r8, STACK_SLOT_IAMR(r1)
1015 + mtspr SPRN_TIDR, r5
1016 + mtspr SPRN_PSSCR, r6
1017 + mtspr SPRN_PID, r7
1018 +- mtspr SPRN_IAMR, r8
1019 + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1020 +
1021 + #ifdef CONFIG_PPC_RADIX_MMU
1022 +diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
1023 +index 008285058f9b..31cd0f327c8a 100644
1024 +--- a/arch/powerpc/kvm/book3s_hv_tm.c
1025 ++++ b/arch/powerpc/kvm/book3s_hv_tm.c
1026 +@@ -130,8 +130,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
1027 + return RESUME_GUEST;
1028 + }
1029 + /* Set CR0 to indicate previous transactional state */
1030 +- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
1031 +- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
1032 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1033 ++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
1034 + /* L=1 => tresume, L=0 => tsuspend */
1035 + if (instr & (1 << 21)) {
1036 + if (MSR_TM_SUSPENDED(msr))
1037 +@@ -174,8 +174,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
1038 + copy_from_checkpoint(vcpu);
1039 +
1040 + /* Set CR0 to indicate previous transactional state */
1041 +- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
1042 +- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
1043 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1044 ++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
1045 + vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
1046 + return RESUME_GUEST;
1047 +
1048 +@@ -204,8 +204,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
1049 + copy_to_checkpoint(vcpu);
1050 +
1051 + /* Set CR0 to indicate previous transactional state */
1052 +- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
1053 +- (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
1054 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1055 ++ (((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 29);
1056 + vcpu->arch.shregs.msr = msr | MSR_TS_S;
1057 + return RESUME_GUEST;
1058 + }
1059 +diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
1060 +index b2c7c6fca4f9..3cf5863bc06e 100644
1061 +--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
1062 ++++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
1063 +@@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
1064 + if (instr & (1 << 21))
1065 + vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
1066 + /* Set CR0 to 0b0010 */
1067 +- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
1068 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
1069 ++ 0x20000000;
1070 + return 1;
1071 + }
1072 +
1073 +@@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
1074 + vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
1075 + vcpu->arch.regs.nip = vcpu->arch.tfhar;
1076 + copy_from_checkpoint(vcpu);
1077 +- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
1078 ++ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
1079 + }
1080 +diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
1081 +index 614ebb4261f7..de9702219dee 100644
1082 +--- a/arch/powerpc/kvm/book3s_pr.c
1083 ++++ b/arch/powerpc/kvm/book3s_pr.c
1084 +@@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
1085 + svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
1086 + svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
1087 + svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
1088 +- svcpu->cr = vcpu->arch.cr;
1089 ++ svcpu->cr = vcpu->arch.regs.ccr;
1090 + svcpu->xer = vcpu->arch.regs.xer;
1091 + svcpu->ctr = vcpu->arch.regs.ctr;
1092 + svcpu->lr = vcpu->arch.regs.link;
1093 +@@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
1094 + vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
1095 + vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
1096 + vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
1097 +- vcpu->arch.cr = svcpu->cr;
1098 ++ vcpu->arch.regs.ccr = svcpu->cr;
1099 + vcpu->arch.regs.xer = svcpu->xer;
1100 + vcpu->arch.regs.ctr = svcpu->ctr;
1101 + vcpu->arch.regs.link = svcpu->lr;
1102 +diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
1103 +index 612b7f6a887f..4e5081e58409 100644
1104 +--- a/arch/powerpc/kvm/bookehv_interrupts.S
1105 ++++ b/arch/powerpc/kvm/bookehv_interrupts.S
1106 +@@ -186,7 +186,7 @@ END_BTB_FLUSH_SECTION
1107 + */
1108 + PPC_LL r4, PACACURRENT(r13)
1109 + PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
1110 +- stw r10, VCPU_CR(r4)
1111 ++ PPC_STL r10, VCPU_CR(r4)
1112 + PPC_STL r11, VCPU_GPR(R4)(r4)
1113 + PPC_STL r5, VCPU_GPR(R5)(r4)
1114 + PPC_STL r6, VCPU_GPR(R6)(r4)
1115 +@@ -296,7 +296,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
1116 + PPC_STL r4, VCPU_GPR(R4)(r11)
1117 + PPC_LL r4, THREAD_NORMSAVE(0)(r10)
1118 + PPC_STL r5, VCPU_GPR(R5)(r11)
1119 +- stw r13, VCPU_CR(r11)
1120 ++ PPC_STL r13, VCPU_CR(r11)
1121 + mfspr r5, \srr0
1122 + PPC_STL r3, VCPU_GPR(R10)(r11)
1123 + PPC_LL r3, THREAD_NORMSAVE(2)(r10)
1124 +@@ -323,7 +323,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
1125 + PPC_STL r4, VCPU_GPR(R4)(r11)
1126 + PPC_LL r4, GPR9(r8)
1127 + PPC_STL r5, VCPU_GPR(R5)(r11)
1128 +- stw r9, VCPU_CR(r11)
1129 ++ PPC_STL r9, VCPU_CR(r11)
1130 + mfspr r5, \srr0
1131 + PPC_STL r3, VCPU_GPR(R8)(r11)
1132 + PPC_LL r3, GPR10(r8)
1133 +@@ -647,7 +647,7 @@ lightweight_exit:
1134 + PPC_LL r3, VCPU_LR(r4)
1135 + PPC_LL r5, VCPU_XER(r4)
1136 + PPC_LL r6, VCPU_CTR(r4)
1137 +- lwz r7, VCPU_CR(r4)
1138 ++ PPC_LL r7, VCPU_CR(r4)
1139 + PPC_LL r8, VCPU_PC(r4)
1140 + PPC_LD(r9, VCPU_SHARED_MSR, r11)
1141 + PPC_LL r0, VCPU_GPR(R0)(r4)
1142 +diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
1143 +index 75dce1ef3bc8..f91b1309a0a8 100644
1144 +--- a/arch/powerpc/kvm/emulate_loadstore.c
1145 ++++ b/arch/powerpc/kvm/emulate_loadstore.c
1146 +@@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
1147 +
1148 + emulated = EMULATE_FAIL;
1149 + vcpu->arch.regs.msr = vcpu->arch.shared->msr;
1150 +- vcpu->arch.regs.ccr = vcpu->arch.cr;
1151 + if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
1152 + int type = op.type & INSTR_TYPE_MASK;
1153 + int size = GETSIZE(op.type);
1154 +diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
1155 +index f23a89d8e4ce..29fd8940867e 100644
1156 +--- a/arch/powerpc/mm/hash_utils_64.c
1157 ++++ b/arch/powerpc/mm/hash_utils_64.c
1158 +@@ -1859,11 +1859,20 @@ void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
1159 + *
1160 + * For guests on platforms before POWER9, we clamp the it limit to 1G
1161 + * to avoid some funky things such as RTAS bugs etc...
1162 ++ *
1163 ++ * On POWER9 we limit to 1TB in case the host erroneously told us that
1164 ++ * the RMA was >1TB. Effective address bits 0:23 are treated as zero
1165 ++ * (meaning the access is aliased to zero i.e. addr = addr % 1TB)
1166 ++ * for virtual real mode addressing and so it doesn't make sense to
1167 ++ * have an area larger than 1TB as it can't be addressed.
1168 + */
1169 + if (!early_cpu_has_feature(CPU_FTR_HVMODE)) {
1170 + ppc64_rma_size = first_memblock_size;
1171 + if (!early_cpu_has_feature(CPU_FTR_ARCH_300))
1172 + ppc64_rma_size = min_t(u64, ppc64_rma_size, 0x40000000);
1173 ++ else
1174 ++ ppc64_rma_size = min_t(u64, ppc64_rma_size,
1175 ++ 1UL << SID_SHIFT_1T);
1176 +
1177 + /* Finally limit subsequent allocations */
1178 + memblock_set_current_limit(ppc64_rma_size);
1179 +diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
1180 +index b271b283c785..25a8dd9cd71d 100644
1181 +--- a/arch/powerpc/mm/pkeys.c
1182 ++++ b/arch/powerpc/mm/pkeys.c
1183 +@@ -414,3 +414,13 @@ bool arch_vma_access_permitted(struct vm_area_struct *vma, bool write,
1184 +
1185 + return pkey_access_permitted(vma_pkey(vma), write, execute);
1186 + }
1187 ++
1188 ++void arch_dup_pkeys(struct mm_struct *oldmm, struct mm_struct *mm)
1189 ++{
1190 ++ if (static_branch_likely(&pkey_disabled))
1191 ++ return;
1192 ++
1193 ++ /* Duplicate the oldmm pkey state in mm: */
1194 ++ mm_pkey_allocation_map(mm) = mm_pkey_allocation_map(oldmm);
1195 ++ mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
1196 ++}
1197 +diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
1198 +index c433f6d3dd64..a840b7d074f7 100644
1199 +--- a/arch/riscv/kernel/ftrace.c
1200 ++++ b/arch/riscv/kernel/ftrace.c
1201 +@@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
1202 + {
1203 + unsigned long return_hooker = (unsigned long)&return_to_handler;
1204 + unsigned long old;
1205 +- int err;
1206 +
1207 + if (unlikely(atomic_read(&current->tracing_graph_pause)))
1208 + return;
1209 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
1210 +index 3245b95ad2d9..0d3f5cf3ff3e 100644
1211 +--- a/arch/x86/include/asm/kvm_host.h
1212 ++++ b/arch/x86/include/asm/kvm_host.h
1213 +@@ -117,7 +117,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
1214 + }
1215 +
1216 + #define KVM_PERMILLE_MMU_PAGES 20
1217 +-#define KVM_MIN_ALLOC_MMU_PAGES 64
1218 ++#define KVM_MIN_ALLOC_MMU_PAGES 64UL
1219 + #define KVM_MMU_HASH_SHIFT 12
1220 + #define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
1221 + #define KVM_MIN_FREE_MMU_PAGES 5
1222 +@@ -784,6 +784,9 @@ struct kvm_hv {
1223 + u64 hv_reenlightenment_control;
1224 + u64 hv_tsc_emulation_control;
1225 + u64 hv_tsc_emulation_status;
1226 ++
1227 ++ /* How many vCPUs have VP index != vCPU index */
1228 ++ atomic_t num_mismatched_vp_indexes;
1229 + };
1230 +
1231 + enum kvm_irqchip_mode {
1232 +@@ -793,9 +796,9 @@ enum kvm_irqchip_mode {
1233 + };
1234 +
1235 + struct kvm_arch {
1236 +- unsigned int n_used_mmu_pages;
1237 +- unsigned int n_requested_mmu_pages;
1238 +- unsigned int n_max_mmu_pages;
1239 ++ unsigned long n_used_mmu_pages;
1240 ++ unsigned long n_requested_mmu_pages;
1241 ++ unsigned long n_max_mmu_pages;
1242 + unsigned int indirect_shadow_pages;
1243 + unsigned long mmu_valid_gen;
1244 + struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
1245 +@@ -1198,8 +1201,8 @@ void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1246 + gfn_t gfn_offset, unsigned long mask);
1247 + void kvm_mmu_zap_all(struct kvm *kvm);
1248 + void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
1249 +-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1250 +-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
1251 ++unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
1252 ++void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
1253 +
1254 + int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
1255 + bool pdptrs_changed(struct kvm_vcpu *vcpu);
1256 +diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
1257 +index 50d309662d78..5790671857e5 100644
1258 +--- a/arch/x86/kernel/ftrace.c
1259 ++++ b/arch/x86/kernel/ftrace.c
1260 +@@ -53,7 +53,7 @@ int ftrace_arch_code_modify_post_process(void)
1261 + union ftrace_code_union {
1262 + char code[MCOUNT_INSN_SIZE];
1263 + struct {
1264 +- unsigned char e8;
1265 ++ unsigned char op;
1266 + int offset;
1267 + } __attribute__((packed));
1268 + };
1269 +@@ -63,20 +63,23 @@ static int ftrace_calc_offset(long ip, long addr)
1270 + return (int)(addr - ip);
1271 + }
1272 +
1273 +-static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
1274 ++static unsigned char *
1275 ++ftrace_text_replace(unsigned char op, unsigned long ip, unsigned long addr)
1276 + {
1277 + static union ftrace_code_union calc;
1278 +
1279 +- calc.e8 = 0xe8;
1280 ++ calc.op = op;
1281 + calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
1282 +
1283 +- /*
1284 +- * No locking needed, this must be called via kstop_machine
1285 +- * which in essence is like running on a uniprocessor machine.
1286 +- */
1287 + return calc.code;
1288 + }
1289 +
1290 ++static unsigned char *
1291 ++ftrace_call_replace(unsigned long ip, unsigned long addr)
1292 ++{
1293 ++ return ftrace_text_replace(0xe8, ip, addr);
1294 ++}
1295 ++
1296 + static inline int
1297 + within(unsigned long addr, unsigned long start, unsigned long end)
1298 + {
1299 +@@ -686,22 +689,6 @@ int __init ftrace_dyn_arch_init(void)
1300 + return 0;
1301 + }
1302 +
1303 +-#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
1304 +-static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
1305 +-{
1306 +- static union ftrace_code_union calc;
1307 +-
1308 +- /* Jmp not a call (ignore the .e8) */
1309 +- calc.e8 = 0xe9;
1310 +- calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
1311 +-
1312 +- /*
1313 +- * ftrace external locks synchronize the access to the static variable.
1314 +- */
1315 +- return calc.code;
1316 +-}
1317 +-#endif
1318 +-
1319 + /* Currently only x86_64 supports dynamic trampolines */
1320 + #ifdef CONFIG_X86_64
1321 +
1322 +@@ -923,8 +910,8 @@ static void *addr_from_call(void *ptr)
1323 + return NULL;
1324 +
1325 + /* Make sure this is a call */
1326 +- if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
1327 +- pr_warn("Expected e8, got %x\n", calc.e8);
1328 ++ if (WARN_ON_ONCE(calc.op != 0xe8)) {
1329 ++ pr_warn("Expected e8, got %x\n", calc.op);
1330 + return NULL;
1331 + }
1332 +
1333 +@@ -995,6 +982,11 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
1334 + #ifdef CONFIG_DYNAMIC_FTRACE
1335 + extern void ftrace_graph_call(void);
1336 +
1337 ++static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
1338 ++{
1339 ++ return ftrace_text_replace(0xe9, ip, addr);
1340 ++}
1341 ++
1342 + static int ftrace_mod_jmp(unsigned long ip, void *func)
1343 + {
1344 + unsigned char *new;
1345 +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
1346 +index 013fe3d21dbb..2ec202cb9dfd 100644
1347 +--- a/arch/x86/kernel/kvmclock.c
1348 ++++ b/arch/x86/kernel/kvmclock.c
1349 +@@ -117,12 +117,8 @@ static u64 kvm_sched_clock_read(void)
1350 +
1351 + static inline void kvm_sched_clock_init(bool stable)
1352 + {
1353 +- if (!stable) {
1354 +- pv_time_ops.sched_clock = kvm_clock_read;
1355 ++ if (!stable)
1356 + clear_sched_clock_stable();
1357 +- return;
1358 +- }
1359 +-
1360 + kvm_sched_clock_offset = kvm_clock_read();
1361 + pv_time_ops.sched_clock = kvm_sched_clock_read;
1362 +
1363 +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
1364 +index b4866badb235..90ecc108bc8a 100644
1365 +--- a/arch/x86/kernel/setup.c
1366 ++++ b/arch/x86/kernel/setup.c
1367 +@@ -1251,7 +1251,7 @@ void __init setup_arch(char **cmdline_p)
1368 + x86_init.hyper.guest_late_init();
1369 +
1370 + e820__reserve_resources();
1371 +- e820__register_nosave_regions(max_low_pfn);
1372 ++ e820__register_nosave_regions(max_pfn);
1373 +
1374 + x86_init.resources.reserve_resources();
1375 +
1376 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1377 +index 4a688ef9e448..429728b35bca 100644
1378 +--- a/arch/x86/kvm/emulate.c
1379 ++++ b/arch/x86/kvm/emulate.c
1380 +@@ -2331,12 +2331,16 @@ static int em_lseg(struct x86_emulate_ctxt *ctxt)
1381 +
1382 + static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
1383 + {
1384 ++#ifdef CONFIG_X86_64
1385 + u32 eax, ebx, ecx, edx;
1386 +
1387 + eax = 0x80000001;
1388 + ecx = 0;
1389 + ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx, false);
1390 + return edx & bit(X86_FEATURE_LM);
1391 ++#else
1392 ++ return false;
1393 ++#endif
1394 + }
1395 +
1396 + #define GET_SMSTATE(type, smbase, offset) \
1397 +@@ -2381,6 +2385,7 @@ static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
1398 + return X86EMUL_CONTINUE;
1399 + }
1400 +
1401 ++#ifdef CONFIG_X86_64
1402 + static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
1403 + {
1404 + struct desc_struct desc;
1405 +@@ -2399,6 +2404,7 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
1406 + ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
1407 + return X86EMUL_CONTINUE;
1408 + }
1409 ++#endif
1410 +
1411 + static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
1412 + u64 cr0, u64 cr3, u64 cr4)
1413 +@@ -2499,6 +2505,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
1414 + return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
1415 + }
1416 +
1417 ++#ifdef CONFIG_X86_64
1418 + static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
1419 + {
1420 + struct desc_struct desc;
1421 +@@ -2560,6 +2567,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
1422 +
1423 + return X86EMUL_CONTINUE;
1424 + }
1425 ++#endif
1426 +
1427 + static int em_rsm(struct x86_emulate_ctxt *ctxt)
1428 + {
1429 +@@ -2616,9 +2624,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
1430 + if (ctxt->ops->pre_leave_smm(ctxt, smbase))
1431 + return X86EMUL_UNHANDLEABLE;
1432 +
1433 ++#ifdef CONFIG_X86_64
1434 + if (emulator_has_longmode(ctxt))
1435 + ret = rsm_load_state_64(ctxt, smbase + 0x8000);
1436 + else
1437 ++#endif
1438 + ret = rsm_load_state_32(ctxt, smbase + 0x8000);
1439 +
1440 + if (ret != X86EMUL_CONTINUE) {
1441 +diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
1442 +index 229d99605165..5842c5f587fe 100644
1443 +--- a/arch/x86/kvm/hyperv.c
1444 ++++ b/arch/x86/kvm/hyperv.c
1445 +@@ -132,8 +132,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
1446 + struct kvm_vcpu *vcpu = NULL;
1447 + int i;
1448 +
1449 +- if (vpidx < KVM_MAX_VCPUS)
1450 +- vcpu = kvm_get_vcpu(kvm, vpidx);
1451 ++ if (vpidx >= KVM_MAX_VCPUS)
1452 ++ return NULL;
1453 ++
1454 ++ vcpu = kvm_get_vcpu(kvm, vpidx);
1455 + if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
1456 + return vcpu;
1457 + kvm_for_each_vcpu(i, vcpu, kvm)
1458 +@@ -689,6 +691,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
1459 + stimer_cleanup(&hv_vcpu->stimer[i]);
1460 + }
1461 +
1462 ++bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
1463 ++{
1464 ++ if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
1465 ++ return false;
1466 ++ return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
1467 ++}
1468 ++EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
1469 ++
1470 ++bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
1471 ++ struct hv_vp_assist_page *assist_page)
1472 ++{
1473 ++ if (!kvm_hv_assist_page_enabled(vcpu))
1474 ++ return false;
1475 ++ return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1476 ++ assist_page, sizeof(*assist_page));
1477 ++}
1478 ++EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
1479 ++
1480 + static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
1481 + {
1482 + struct hv_message *msg = &stimer->msg;
1483 +@@ -1040,21 +1060,41 @@ static u64 current_task_runtime_100ns(void)
1484 +
1485 + static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1486 + {
1487 +- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1488 ++ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1489 +
1490 + switch (msr) {
1491 +- case HV_X64_MSR_VP_INDEX:
1492 +- if (!host)
1493 ++ case HV_X64_MSR_VP_INDEX: {
1494 ++ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
1495 ++ int vcpu_idx = kvm_vcpu_get_idx(vcpu);
1496 ++ u32 new_vp_index = (u32)data;
1497 ++
1498 ++ if (!host || new_vp_index >= KVM_MAX_VCPUS)
1499 + return 1;
1500 +- hv->vp_index = (u32)data;
1501 ++
1502 ++ if (new_vp_index == hv_vcpu->vp_index)
1503 ++ return 0;
1504 ++
1505 ++ /*
1506 ++ * The VP index is initialized to vcpu_index by
1507 ++ * kvm_hv_vcpu_postcreate so they initially match. Now the
1508 ++ * VP index is changing, adjust num_mismatched_vp_indexes if
1509 ++ * it now matches or no longer matches vcpu_idx.
1510 ++ */
1511 ++ if (hv_vcpu->vp_index == vcpu_idx)
1512 ++ atomic_inc(&hv->num_mismatched_vp_indexes);
1513 ++ else if (new_vp_index == vcpu_idx)
1514 ++ atomic_dec(&hv->num_mismatched_vp_indexes);
1515 ++
1516 ++ hv_vcpu->vp_index = new_vp_index;
1517 + break;
1518 ++ }
1519 + case HV_X64_MSR_VP_ASSIST_PAGE: {
1520 + u64 gfn;
1521 + unsigned long addr;
1522 +
1523 + if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1524 +- hv->hv_vapic = data;
1525 +- if (kvm_lapic_enable_pv_eoi(vcpu, 0))
1526 ++ hv_vcpu->hv_vapic = data;
1527 ++ if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
1528 + return 1;
1529 + break;
1530 + }
1531 +@@ -1064,10 +1104,11 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1532 + return 1;
1533 + if (__clear_user((void __user *)addr, PAGE_SIZE))
1534 + return 1;
1535 +- hv->hv_vapic = data;
1536 ++ hv_vcpu->hv_vapic = data;
1537 + kvm_vcpu_mark_page_dirty(vcpu, gfn);
1538 + if (kvm_lapic_enable_pv_eoi(vcpu,
1539 +- gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
1540 ++ gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1541 ++ sizeof(struct hv_vp_assist_page)))
1542 + return 1;
1543 + break;
1544 + }
1545 +@@ -1080,7 +1121,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1546 + case HV_X64_MSR_VP_RUNTIME:
1547 + if (!host)
1548 + return 1;
1549 +- hv->runtime_offset = data - current_task_runtime_100ns();
1550 ++ hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1551 + break;
1552 + case HV_X64_MSR_SCONTROL:
1553 + case HV_X64_MSR_SVERSION:
1554 +@@ -1172,11 +1213,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1555 + bool host)
1556 + {
1557 + u64 data = 0;
1558 +- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
1559 ++ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
1560 +
1561 + switch (msr) {
1562 + case HV_X64_MSR_VP_INDEX:
1563 +- data = hv->vp_index;
1564 ++ data = hv_vcpu->vp_index;
1565 + break;
1566 + case HV_X64_MSR_EOI:
1567 + return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1568 +@@ -1185,10 +1226,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1569 + case HV_X64_MSR_TPR:
1570 + return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1571 + case HV_X64_MSR_VP_ASSIST_PAGE:
1572 +- data = hv->hv_vapic;
1573 ++ data = hv_vcpu->hv_vapic;
1574 + break;
1575 + case HV_X64_MSR_VP_RUNTIME:
1576 +- data = current_task_runtime_100ns() + hv->runtime_offset;
1577 ++ data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1578 + break;
1579 + case HV_X64_MSR_SCONTROL:
1580 + case HV_X64_MSR_SVERSION:
1581 +diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
1582 +index d6aa969e20f1..0e66c12ed2c3 100644
1583 +--- a/arch/x86/kvm/hyperv.h
1584 ++++ b/arch/x86/kvm/hyperv.h
1585 +@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
1586 + void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
1587 + void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
1588 +
1589 ++bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
1590 ++bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
1591 ++ struct hv_vp_assist_page *assist_page);
1592 ++
1593 + static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
1594 + int timer_index)
1595 + {
1596 +diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
1597 +index faa264822cee..007bc654f928 100644
1598 +--- a/arch/x86/kvm/irq.c
1599 ++++ b/arch/x86/kvm/irq.c
1600 +@@ -172,3 +172,10 @@ void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
1601 + __kvm_migrate_apic_timer(vcpu);
1602 + __kvm_migrate_pit_timer(vcpu);
1603 + }
1604 ++
1605 ++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
1606 ++{
1607 ++ bool resample = args->flags & KVM_IRQFD_FLAG_RESAMPLE;
1608 ++
1609 ++ return resample ? irqchip_kernel(kvm) : irqchip_in_kernel(kvm);
1610 ++}
1611 +diff --git a/arch/x86/kvm/irq.h b/arch/x86/kvm/irq.h
1612 +index d5005cc26521..fd210cdd4983 100644
1613 +--- a/arch/x86/kvm/irq.h
1614 ++++ b/arch/x86/kvm/irq.h
1615 +@@ -114,6 +114,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
1616 + return mode != KVM_IRQCHIP_NONE;
1617 + }
1618 +
1619 ++bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1620 + void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
1621 + void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
1622 + void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);
1623 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
1624 +index 5f5bc5976804..262e49301cae 100644
1625 +--- a/arch/x86/kvm/lapic.c
1626 ++++ b/arch/x86/kvm/lapic.c
1627 +@@ -2633,17 +2633,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
1628 + return 0;
1629 + }
1630 +
1631 +-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
1632 ++int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
1633 + {
1634 + u64 addr = data & ~KVM_MSR_ENABLED;
1635 ++ struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
1636 ++ unsigned long new_len;
1637 ++
1638 + if (!IS_ALIGNED(addr, 4))
1639 + return 1;
1640 +
1641 + vcpu->arch.pv_eoi.msr_val = data;
1642 + if (!pv_eoi_enabled(vcpu))
1643 + return 0;
1644 +- return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
1645 +- addr, sizeof(u8));
1646 ++
1647 ++ if (addr == ghc->gpa && len <= ghc->len)
1648 ++ new_len = ghc->len;
1649 ++ else
1650 ++ new_len = len;
1651 ++
1652 ++ return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
1653 + }
1654 +
1655 + void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
1656 +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
1657 +index ed0ed39abd36..ff6ef9c3d760 100644
1658 +--- a/arch/x86/kvm/lapic.h
1659 ++++ b/arch/x86/kvm/lapic.h
1660 +@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
1661 + return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
1662 + }
1663 +
1664 +-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
1665 ++int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
1666 + void kvm_lapic_init(void);
1667 + void kvm_lapic_exit(void);
1668 +
1669 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
1670 +index cdc0c460950f..88940261fb53 100644
1671 +--- a/arch/x86/kvm/mmu.c
1672 ++++ b/arch/x86/kvm/mmu.c
1673 +@@ -1954,7 +1954,7 @@ static int is_empty_shadow_page(u64 *spt)
1674 + * aggregate version in order to make the slab shrinker
1675 + * faster
1676 + */
1677 +-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
1678 ++static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
1679 + {
1680 + kvm->arch.n_used_mmu_pages += nr;
1681 + percpu_counter_add(&kvm_total_used_mmu_pages, nr);
1682 +@@ -2704,7 +2704,7 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
1683 + * Changing the number of mmu pages allocated to the vm
1684 + * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1685 + */
1686 +-void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1687 ++void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages)
1688 + {
1689 + LIST_HEAD(invalid_list);
1690 +
1691 +@@ -5926,10 +5926,10 @@ out:
1692 + /*
1693 + * Caculate mmu pages needed for kvm.
1694 + */
1695 +-unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1696 ++unsigned long kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1697 + {
1698 +- unsigned int nr_mmu_pages;
1699 +- unsigned int nr_pages = 0;
1700 ++ unsigned long nr_mmu_pages;
1701 ++ unsigned long nr_pages = 0;
1702 + struct kvm_memslots *slots;
1703 + struct kvm_memory_slot *memslot;
1704 + int i;
1705 +@@ -5942,8 +5942,7 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1706 + }
1707 +
1708 + nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1709 +- nr_mmu_pages = max(nr_mmu_pages,
1710 +- (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1711 ++ nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
1712 +
1713 + return nr_mmu_pages;
1714 + }
1715 +diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
1716 +index 1fab69c0b2f3..65892288bf51 100644
1717 +--- a/arch/x86/kvm/mmu.h
1718 ++++ b/arch/x86/kvm/mmu.h
1719 +@@ -69,7 +69,7 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
1720 + int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
1721 + u64 fault_address, char *insn, int insn_len);
1722 +
1723 +-static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
1724 ++static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
1725 + {
1726 + if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
1727 + return kvm->arch.n_max_mmu_pages -
1728 +diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
1729 +index e9ea2d45ae66..9f72cc427158 100644
1730 +--- a/arch/x86/kvm/mtrr.c
1731 ++++ b/arch/x86/kvm/mtrr.c
1732 +@@ -48,11 +48,6 @@ static bool msr_mtrr_valid(unsigned msr)
1733 + return false;
1734 + }
1735 +
1736 +-static bool valid_pat_type(unsigned t)
1737 +-{
1738 +- return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1739 +-}
1740 +-
1741 + static bool valid_mtrr_type(unsigned t)
1742 + {
1743 + return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1744 +@@ -67,10 +62,7 @@ bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1745 + return false;
1746 +
1747 + if (msr == MSR_IA32_CR_PAT) {
1748 +- for (i = 0; i < 8; i++)
1749 +- if (!valid_pat_type((data >> (i * 8)) & 0xff))
1750 +- return false;
1751 +- return true;
1752 ++ return kvm_pat_valid(data);
1753 + } else if (msr == MSR_MTRRdefType) {
1754 + if (data & ~0xcff)
1755 + return false;
1756 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1757 +index 0f33f00aa4df..ac2cc2ed7a85 100644
1758 +--- a/arch/x86/kvm/svm.c
1759 ++++ b/arch/x86/kvm/svm.c
1760 +@@ -5622,6 +5622,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1761 + svm->vmcb->save.cr2 = vcpu->arch.cr2;
1762 +
1763 + clgi();
1764 ++ kvm_load_guest_xcr0(vcpu);
1765 +
1766 + /*
1767 + * If this vCPU has touched SPEC_CTRL, restore the guest's value if
1768 +@@ -5769,6 +5770,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
1769 + if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
1770 + kvm_before_interrupt(&svm->vcpu);
1771 +
1772 ++ kvm_put_guest_xcr0(vcpu);
1773 + stgi();
1774 +
1775 + /* Any pending NMI will happen here */
1776 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
1777 +index 2e310ea62d60..2938b4bcc968 100644
1778 +--- a/arch/x86/kvm/vmx.c
1779 ++++ b/arch/x86/kvm/vmx.c
1780 +@@ -4135,7 +4135,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1781 + return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index,
1782 + &msr_info->data);
1783 + case MSR_IA32_XSS:
1784 +- if (!vmx_xsaves_supported())
1785 ++ if (!vmx_xsaves_supported() ||
1786 ++ (!msr_info->host_initiated &&
1787 ++ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1788 ++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
1789 + return 1;
1790 + msr_info->data = vcpu->arch.ia32_xss;
1791 + break;
1792 +@@ -4265,9 +4268,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1793 + MSR_TYPE_W);
1794 + break;
1795 + case MSR_IA32_CR_PAT:
1796 ++ if (!kvm_pat_valid(data))
1797 ++ return 1;
1798 ++
1799 + if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1800 +- if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
1801 +- return 1;
1802 + vmcs_write64(GUEST_IA32_PAT, data);
1803 + vcpu->arch.pat = data;
1804 + break;
1805 +@@ -4301,7 +4305,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1806 + return 1;
1807 + return vmx_set_vmx_msr(vcpu, msr_index, data);
1808 + case MSR_IA32_XSS:
1809 +- if (!vmx_xsaves_supported())
1810 ++ if (!vmx_xsaves_supported() ||
1811 ++ (!msr_info->host_initiated &&
1812 ++ !(guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
1813 ++ guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))))
1814 + return 1;
1815 + /*
1816 + * The only supported bit as of Skylake is bit 8, but
1817 +@@ -10437,28 +10444,21 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
1818 +
1819 + static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
1820 + {
1821 +- u32 exit_intr_info = 0;
1822 +- u16 basic_exit_reason = (u16)vmx->exit_reason;
1823 +-
1824 +- if (!(basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
1825 +- || basic_exit_reason == EXIT_REASON_EXCEPTION_NMI))
1826 ++ if (vmx->exit_reason != EXIT_REASON_EXCEPTION_NMI)
1827 + return;
1828 +
1829 +- if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
1830 +- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1831 +- vmx->exit_intr_info = exit_intr_info;
1832 ++ vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
1833 +
1834 + /* if exit due to PF check for async PF */
1835 +- if (is_page_fault(exit_intr_info))
1836 ++ if (is_page_fault(vmx->exit_intr_info))
1837 + vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
1838 +
1839 + /* Handle machine checks before interrupts are enabled */
1840 +- if (basic_exit_reason == EXIT_REASON_MCE_DURING_VMENTRY ||
1841 +- is_machine_check(exit_intr_info))
1842 ++ if (is_machine_check(vmx->exit_intr_info))
1843 + kvm_machine_check();
1844 +
1845 + /* We need to handle NMIs before interrupts are enabled */
1846 +- if (is_nmi(exit_intr_info)) {
1847 ++ if (is_nmi(vmx->exit_intr_info)) {
1848 + kvm_before_interrupt(&vmx->vcpu);
1849 + asm("int $2");
1850 + kvm_after_interrupt(&vmx->vcpu);
1851 +@@ -10756,6 +10756,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1852 + if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1853 + vmx_set_interrupt_shadow(vcpu, 0);
1854 +
1855 ++ kvm_load_guest_xcr0(vcpu);
1856 ++
1857 + if (static_cpu_has(X86_FEATURE_PKU) &&
1858 + kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
1859 + vcpu->arch.pkru != vmx->host_pkru)
1860 +@@ -10808,7 +10810,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1861 + "mov %%" _ASM_AX", %%cr2 \n\t"
1862 + "3: \n\t"
1863 + /* Check if vmlaunch of vmresume is needed */
1864 +- "cmpl $0, %c[launched](%0) \n\t"
1865 ++ "cmpb $0, %c[launched](%0) \n\t"
1866 + /* Load guest registers. Don't clobber flags. */
1867 + "mov %c[rax](%0), %%" _ASM_AX " \n\t"
1868 + "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
1869 +@@ -10971,10 +10973,15 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
1870 + __write_pkru(vmx->host_pkru);
1871 + }
1872 +
1873 ++ kvm_put_guest_xcr0(vcpu);
1874 ++
1875 + vmx->nested.nested_run_pending = 0;
1876 + vmx->idt_vectoring_info = 0;
1877 +
1878 + vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
1879 ++ if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
1880 ++ kvm_machine_check();
1881 ++
1882 + if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
1883 + return;
1884 +
1885 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1886 +index c27ce6059090..cbc39751f36b 100644
1887 +--- a/arch/x86/kvm/x86.c
1888 ++++ b/arch/x86/kvm/x86.c
1889 +@@ -713,7 +713,7 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
1890 + }
1891 + EXPORT_SYMBOL_GPL(kvm_lmsw);
1892 +
1893 +-static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
1894 ++void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
1895 + {
1896 + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
1897 + !vcpu->guest_xcr0_loaded) {
1898 +@@ -723,8 +723,9 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
1899 + vcpu->guest_xcr0_loaded = 1;
1900 + }
1901 + }
1902 ++EXPORT_SYMBOL_GPL(kvm_load_guest_xcr0);
1903 +
1904 +-static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
1905 ++void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
1906 + {
1907 + if (vcpu->guest_xcr0_loaded) {
1908 + if (vcpu->arch.xcr0 != host_xcr0)
1909 +@@ -732,6 +733,7 @@ static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
1910 + vcpu->guest_xcr0_loaded = 0;
1911 + }
1912 + }
1913 ++EXPORT_SYMBOL_GPL(kvm_put_guest_xcr0);
1914 +
1915 + static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1916 + {
1917 +@@ -2494,7 +2496,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1918 +
1919 + break;
1920 + case MSR_KVM_PV_EOI_EN:
1921 +- if (kvm_lapic_enable_pv_eoi(vcpu, data))
1922 ++ if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
1923 + return 1;
1924 + break;
1925 +
1926 +@@ -4116,7 +4118,7 @@ static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
1927 + }
1928 +
1929 + static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1930 +- u32 kvm_nr_mmu_pages)
1931 ++ unsigned long kvm_nr_mmu_pages)
1932 + {
1933 + if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
1934 + return -EINVAL;
1935 +@@ -4130,7 +4132,7 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
1936 + return 0;
1937 + }
1938 +
1939 +-static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1940 ++static unsigned long kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
1941 + {
1942 + return kvm->arch.n_max_mmu_pages;
1943 + }
1944 +@@ -7225,9 +7227,9 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
1945 + put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
1946 + }
1947 +
1948 ++#ifdef CONFIG_X86_64
1949 + static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
1950 + {
1951 +-#ifdef CONFIG_X86_64
1952 + struct desc_ptr dt;
1953 + struct kvm_segment seg;
1954 + unsigned long val;
1955 +@@ -7277,10 +7279,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
1956 +
1957 + for (i = 0; i < 6; i++)
1958 + enter_smm_save_seg_64(vcpu, buf, i);
1959 +-#else
1960 +- WARN_ON_ONCE(1);
1961 +-#endif
1962 + }
1963 ++#endif
1964 +
1965 + static void enter_smm(struct kvm_vcpu *vcpu)
1966 + {
1967 +@@ -7291,9 +7291,11 @@ static void enter_smm(struct kvm_vcpu *vcpu)
1968 +
1969 + trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true);
1970 + memset(buf, 0, 512);
1971 ++#ifdef CONFIG_X86_64
1972 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
1973 + enter_smm_save_state_64(vcpu, buf);
1974 + else
1975 ++#endif
1976 + enter_smm_save_state_32(vcpu, buf);
1977 +
1978 + /*
1979 +@@ -7351,8 +7353,10 @@ static void enter_smm(struct kvm_vcpu *vcpu)
1980 + kvm_set_segment(vcpu, &ds, VCPU_SREG_GS);
1981 + kvm_set_segment(vcpu, &ds, VCPU_SREG_SS);
1982 +
1983 ++#ifdef CONFIG_X86_64
1984 + if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
1985 + kvm_x86_ops->set_efer(vcpu, 0);
1986 ++#endif
1987 +
1988 + kvm_update_cpuid(vcpu);
1989 + kvm_mmu_reset_context(vcpu);
1990 +@@ -7649,8 +7653,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
1991 + goto cancel_injection;
1992 + }
1993 +
1994 +- kvm_load_guest_xcr0(vcpu);
1995 +-
1996 + if (req_immediate_exit) {
1997 + kvm_make_request(KVM_REQ_EVENT, vcpu);
1998 + kvm_x86_ops->request_immediate_exit(vcpu);
1999 +@@ -7703,8 +7705,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
2000 + vcpu->mode = OUTSIDE_GUEST_MODE;
2001 + smp_wmb();
2002 +
2003 +- kvm_put_guest_xcr0(vcpu);
2004 +-
2005 + kvm_before_interrupt(vcpu);
2006 + kvm_x86_ops->handle_external_intr(vcpu);
2007 + kvm_after_interrupt(vcpu);
2008 +diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
2009 +index 1826ed9dd1c8..3a91ea760f07 100644
2010 +--- a/arch/x86/kvm/x86.h
2011 ++++ b/arch/x86/kvm/x86.h
2012 +@@ -345,4 +345,16 @@ static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
2013 + __this_cpu_write(current_vcpu, NULL);
2014 + }
2015 +
2016 ++
2017 ++static inline bool kvm_pat_valid(u64 data)
2018 ++{
2019 ++ if (data & 0xF8F8F8F8F8F8F8F8ull)
2020 ++ return false;
2021 ++ /* 0, 1, 4, 5, 6, 7 are valid values. */
2022 ++ return (data | ((data & 0x0202020202020202ull) << 1)) == data;
2023 ++}
2024 ++
2025 ++void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
2026 ++void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
2027 ++
2028 + #endif
2029 +diff --git a/block/blk-core.c b/block/blk-core.c
2030 +index 4a3e1f417880..af635f878f96 100644
2031 +--- a/block/blk-core.c
2032 ++++ b/block/blk-core.c
2033 +@@ -816,7 +816,8 @@ void blk_cleanup_queue(struct request_queue *q)
2034 + blk_exit_queue(q);
2035 +
2036 + if (q->mq_ops)
2037 +- blk_mq_free_queue(q);
2038 ++ blk_mq_exit_queue(q);
2039 ++
2040 + percpu_ref_exit(&q->q_usage_counter);
2041 +
2042 + spin_lock_irq(lock);
2043 +diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
2044 +index f4f7c73fb828..0529e94a20f7 100644
2045 +--- a/block/blk-iolatency.c
2046 ++++ b/block/blk-iolatency.c
2047 +@@ -560,15 +560,12 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
2048 + u64 now = ktime_to_ns(ktime_get());
2049 + bool issue_as_root = bio_issue_as_root_blkg(bio);
2050 + bool enabled = false;
2051 ++ int inflight = 0;
2052 +
2053 + blkg = bio->bi_blkg;
2054 + if (!blkg)
2055 + return;
2056 +
2057 +- /* We didn't actually submit this bio, don't account it. */
2058 +- if (bio->bi_status == BLK_STS_AGAIN)
2059 +- return;
2060 +-
2061 + iolat = blkg_to_lat(bio->bi_blkg);
2062 + if (!iolat)
2063 + return;
2064 +@@ -585,41 +582,24 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
2065 + }
2066 + rqw = &iolat->rq_wait;
2067 +
2068 +- atomic_dec(&rqw->inflight);
2069 +- if (iolat->min_lat_nsec == 0)
2070 +- goto next;
2071 +- iolatency_record_time(iolat, &bio->bi_issue, now,
2072 +- issue_as_root);
2073 +- window_start = atomic64_read(&iolat->window_start);
2074 +- if (now > window_start &&
2075 +- (now - window_start) >= iolat->cur_win_nsec) {
2076 +- if (atomic64_cmpxchg(&iolat->window_start,
2077 +- window_start, now) == window_start)
2078 +- iolatency_check_latencies(iolat, now);
2079 ++ inflight = atomic_dec_return(&rqw->inflight);
2080 ++ WARN_ON_ONCE(inflight < 0);
2081 ++ /*
2082 ++ * If bi_status is BLK_STS_AGAIN, the bio wasn't actually
2083 ++ * submitted, so do not account for it.
2084 ++ */
2085 ++ if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) {
2086 ++ iolatency_record_time(iolat, &bio->bi_issue, now,
2087 ++ issue_as_root);
2088 ++ window_start = atomic64_read(&iolat->window_start);
2089 ++ if (now > window_start &&
2090 ++ (now - window_start) >= iolat->cur_win_nsec) {
2091 ++ if (atomic64_cmpxchg(&iolat->window_start,
2092 ++ window_start, now) == window_start)
2093 ++ iolatency_check_latencies(iolat, now);
2094 ++ }
2095 + }
2096 +-next:
2097 +- wake_up(&rqw->wait);
2098 +- blkg = blkg->parent;
2099 +- }
2100 +-}
2101 +-
2102 +-static void blkcg_iolatency_cleanup(struct rq_qos *rqos, struct bio *bio)
2103 +-{
2104 +- struct blkcg_gq *blkg;
2105 +-
2106 +- blkg = bio->bi_blkg;
2107 +- while (blkg && blkg->parent) {
2108 +- struct rq_wait *rqw;
2109 +- struct iolatency_grp *iolat;
2110 +-
2111 +- iolat = blkg_to_lat(blkg);
2112 +- if (!iolat)
2113 +- goto next;
2114 +-
2115 +- rqw = &iolat->rq_wait;
2116 +- atomic_dec(&rqw->inflight);
2117 + wake_up(&rqw->wait);
2118 +-next:
2119 + blkg = blkg->parent;
2120 + }
2121 + }
2122 +@@ -635,7 +615,6 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
2123 +
2124 + static struct rq_qos_ops blkcg_iolatency_ops = {
2125 + .throttle = blkcg_iolatency_throttle,
2126 +- .cleanup = blkcg_iolatency_cleanup,
2127 + .done_bio = blkcg_iolatency_done_bio,
2128 + .exit = blkcg_iolatency_exit,
2129 + };
2130 +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
2131 +index aafb44224c89..0b7297a43ccd 100644
2132 +--- a/block/blk-mq-sysfs.c
2133 ++++ b/block/blk-mq-sysfs.c
2134 +@@ -10,6 +10,7 @@
2135 + #include <linux/smp.h>
2136 +
2137 + #include <linux/blk-mq.h>
2138 ++#include "blk.h"
2139 + #include "blk-mq.h"
2140 + #include "blk-mq-tag.h"
2141 +
2142 +@@ -21,6 +22,11 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
2143 + {
2144 + struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
2145 + kobj);
2146 ++
2147 ++ if (hctx->flags & BLK_MQ_F_BLOCKING)
2148 ++ cleanup_srcu_struct(hctx->srcu);
2149 ++ blk_free_flush_queue(hctx->fq);
2150 ++ sbitmap_free(&hctx->ctx_map);
2151 + free_cpumask_var(hctx->cpumask);
2152 + kfree(hctx->ctxs);
2153 + kfree(hctx);
2154 +diff --git a/block/blk-mq.c b/block/blk-mq.c
2155 +index 70d839b9c3b0..455fda99255a 100644
2156 +--- a/block/blk-mq.c
2157 ++++ b/block/blk-mq.c
2158 +@@ -2157,12 +2157,7 @@ static void blk_mq_exit_hctx(struct request_queue *q,
2159 + if (set->ops->exit_hctx)
2160 + set->ops->exit_hctx(hctx, hctx_idx);
2161 +
2162 +- if (hctx->flags & BLK_MQ_F_BLOCKING)
2163 +- cleanup_srcu_struct(hctx->srcu);
2164 +-
2165 + blk_mq_remove_cpuhp(hctx);
2166 +- blk_free_flush_queue(hctx->fq);
2167 +- sbitmap_free(&hctx->ctx_map);
2168 + }
2169 +
2170 + static void blk_mq_exit_hw_queues(struct request_queue *q,
2171 +@@ -2662,7 +2657,8 @@ err_exit:
2172 + }
2173 + EXPORT_SYMBOL(blk_mq_init_allocated_queue);
2174 +
2175 +-void blk_mq_free_queue(struct request_queue *q)
2176 ++/* tags can _not_ be used after returning from blk_mq_exit_queue */
2177 ++void blk_mq_exit_queue(struct request_queue *q)
2178 + {
2179 + struct blk_mq_tag_set *set = q->tag_set;
2180 +
2181 +diff --git a/block/blk-mq.h b/block/blk-mq.h
2182 +index 9497b47e2526..5ad9251627f8 100644
2183 +--- a/block/blk-mq.h
2184 ++++ b/block/blk-mq.h
2185 +@@ -31,7 +31,7 @@ struct blk_mq_ctx {
2186 + } ____cacheline_aligned_in_smp;
2187 +
2188 + void blk_mq_freeze_queue(struct request_queue *q);
2189 +-void blk_mq_free_queue(struct request_queue *q);
2190 ++void blk_mq_exit_queue(struct request_queue *q);
2191 + int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
2192 + void blk_mq_wake_waiters(struct request_queue *q);
2193 + bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
2194 +diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
2195 +index be5d1abd3e8e..8390c5b54c3b 100644
2196 +--- a/drivers/char/tpm/st33zp24/i2c.c
2197 ++++ b/drivers/char/tpm/st33zp24/i2c.c
2198 +@@ -33,7 +33,7 @@
2199 +
2200 + struct st33zp24_i2c_phy {
2201 + struct i2c_client *client;
2202 +- u8 buf[TPM_BUFSIZE + 1];
2203 ++ u8 buf[ST33ZP24_BUFSIZE + 1];
2204 + int io_lpcpd;
2205 + };
2206 +
2207 +diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
2208 +index d7909ab287a8..ff019a1e3c68 100644
2209 +--- a/drivers/char/tpm/st33zp24/spi.c
2210 ++++ b/drivers/char/tpm/st33zp24/spi.c
2211 +@@ -63,7 +63,7 @@
2212 + * some latency byte before the answer is available (max 15).
2213 + * We have 2048 + 1024 + 15.
2214 + */
2215 +-#define ST33ZP24_SPI_BUFFER_SIZE (TPM_BUFSIZE + (TPM_BUFSIZE / 2) +\
2216 ++#define ST33ZP24_SPI_BUFFER_SIZE (ST33ZP24_BUFSIZE + (ST33ZP24_BUFSIZE / 2) +\
2217 + MAX_SPI_LATENCY)
2218 +
2219 +
2220 +diff --git a/drivers/char/tpm/st33zp24/st33zp24.h b/drivers/char/tpm/st33zp24/st33zp24.h
2221 +index 6f4a4198af6a..20da0a84988d 100644
2222 +--- a/drivers/char/tpm/st33zp24/st33zp24.h
2223 ++++ b/drivers/char/tpm/st33zp24/st33zp24.h
2224 +@@ -18,8 +18,8 @@
2225 + #ifndef __LOCAL_ST33ZP24_H__
2226 + #define __LOCAL_ST33ZP24_H__
2227 +
2228 +-#define TPM_WRITE_DIRECTION 0x80
2229 +-#define TPM_BUFSIZE 2048
2230 ++#define TPM_WRITE_DIRECTION 0x80
2231 ++#define ST33ZP24_BUFSIZE 2048
2232 +
2233 + struct st33zp24_dev {
2234 + struct tpm_chip *chip;
2235 +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
2236 +index 977fd42daa1b..3b4e9672ff6c 100644
2237 +--- a/drivers/char/tpm/tpm_i2c_infineon.c
2238 ++++ b/drivers/char/tpm/tpm_i2c_infineon.c
2239 +@@ -26,8 +26,7 @@
2240 + #include <linux/wait.h>
2241 + #include "tpm.h"
2242 +
2243 +-/* max. buffer size supported by our TPM */
2244 +-#define TPM_BUFSIZE 1260
2245 ++#define TPM_I2C_INFINEON_BUFSIZE 1260
2246 +
2247 + /* max. number of iterations after I2C NAK */
2248 + #define MAX_COUNT 3
2249 +@@ -63,11 +62,13 @@ enum i2c_chip_type {
2250 + UNKNOWN,
2251 + };
2252 +
2253 +-/* Structure to store I2C TPM specific stuff */
2254 + struct tpm_inf_dev {
2255 + struct i2c_client *client;
2256 + int locality;
2257 +- u8 buf[TPM_BUFSIZE + sizeof(u8)]; /* max. buffer size + addr */
2258 ++ /* In addition to the data itself, the buffer must fit the 7-bit I2C
2259 ++ * address and the direction bit.
2260 ++ */
2261 ++ u8 buf[TPM_I2C_INFINEON_BUFSIZE + 1];
2262 + struct tpm_chip *chip;
2263 + enum i2c_chip_type chip_type;
2264 + unsigned int adapterlimit;
2265 +@@ -219,7 +220,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
2266 + .buf = tpm_dev.buf
2267 + };
2268 +
2269 +- if (len > TPM_BUFSIZE)
2270 ++ if (len > TPM_I2C_INFINEON_BUFSIZE)
2271 + return -EINVAL;
2272 +
2273 + if (!tpm_dev.client->adapter->algo->master_xfer)
2274 +@@ -527,8 +528,8 @@ static int tpm_tis_i2c_send(struct tpm_chip *chip, u8 *buf, size_t len)
2275 + u8 retries = 0;
2276 + u8 sts = TPM_STS_GO;
2277 +
2278 +- if (len > TPM_BUFSIZE)
2279 +- return -E2BIG; /* command is too long for our tpm, sorry */
2280 ++ if (len > TPM_I2C_INFINEON_BUFSIZE)
2281 ++ return -E2BIG;
2282 +
2283 + if (request_locality(chip, 0) < 0)
2284 + return -EBUSY;
2285 +diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
2286 +index b8defdfdf2dc..280308009784 100644
2287 +--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
2288 ++++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
2289 +@@ -35,14 +35,12 @@
2290 + #include "tpm.h"
2291 +
2292 + /* I2C interface offsets */
2293 +-#define TPM_STS 0x00
2294 +-#define TPM_BURST_COUNT 0x01
2295 +-#define TPM_DATA_FIFO_W 0x20
2296 +-#define TPM_DATA_FIFO_R 0x40
2297 +-#define TPM_VID_DID_RID 0x60
2298 +-/* TPM command header size */
2299 +-#define TPM_HEADER_SIZE 10
2300 +-#define TPM_RETRY 5
2301 ++#define TPM_STS 0x00
2302 ++#define TPM_BURST_COUNT 0x01
2303 ++#define TPM_DATA_FIFO_W 0x20
2304 ++#define TPM_DATA_FIFO_R 0x40
2305 ++#define TPM_VID_DID_RID 0x60
2306 ++#define TPM_I2C_RETRIES 5
2307 + /*
2308 + * I2C bus device maximum buffer size w/o counting I2C address or command
2309 + * i.e. max size required for I2C write is 34 = addr, command, 32 bytes data
2310 +@@ -292,7 +290,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
2311 + dev_err(dev, "%s() count < header size\n", __func__);
2312 + return -EIO;
2313 + }
2314 +- for (retries = 0; retries < TPM_RETRY; retries++) {
2315 ++ for (retries = 0; retries < TPM_I2C_RETRIES; retries++) {
2316 + if (retries > 0) {
2317 + /* if this is not the first trial, set responseRetry */
2318 + i2c_nuvoton_write_status(client,
2319 +diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
2320 +index 0934d3724495..4080d4e78e8e 100644
2321 +--- a/drivers/clk/clk-s2mps11.c
2322 ++++ b/drivers/clk/clk-s2mps11.c
2323 +@@ -255,7 +255,7 @@ MODULE_DEVICE_TABLE(platform, s2mps11_clk_id);
2324 + * This requires of_device_id table. In the same time this will not change the
2325 + * actual *device* matching so do not add .of_match_table.
2326 + */
2327 +-static const struct of_device_id s2mps11_dt_match[] = {
2328 ++static const struct of_device_id s2mps11_dt_match[] __used = {
2329 + {
2330 + .compatible = "samsung,s2mps11-clk",
2331 + .data = (void *)S2MPS11X,
2332 +diff --git a/drivers/clk/tegra/clk-audio-sync.c b/drivers/clk/tegra/clk-audio-sync.c
2333 +index 92d04ce2dee6..53cdc0ec40f3 100644
2334 +--- a/drivers/clk/tegra/clk-audio-sync.c
2335 ++++ b/drivers/clk/tegra/clk-audio-sync.c
2336 +@@ -55,7 +55,7 @@ const struct clk_ops tegra_clk_sync_source_ops = {
2337 + };
2338 +
2339 + struct clk *tegra_clk_register_sync_source(const char *name,
2340 +- unsigned long rate, unsigned long max_rate)
2341 ++ unsigned long max_rate)
2342 + {
2343 + struct tegra_clk_sync_source *sync;
2344 + struct clk_init_data init;
2345 +@@ -67,7 +67,6 @@ struct clk *tegra_clk_register_sync_source(const char *name,
2346 + return ERR_PTR(-ENOMEM);
2347 + }
2348 +
2349 +- sync->rate = rate;
2350 + sync->max_rate = max_rate;
2351 +
2352 + init.ops = &tegra_clk_sync_source_ops;
2353 +diff --git a/drivers/clk/tegra/clk-tegra-audio.c b/drivers/clk/tegra/clk-tegra-audio.c
2354 +index b37cae7af26d..02dd6487d855 100644
2355 +--- a/drivers/clk/tegra/clk-tegra-audio.c
2356 ++++ b/drivers/clk/tegra/clk-tegra-audio.c
2357 +@@ -49,8 +49,6 @@ struct tegra_sync_source_initdata {
2358 + #define SYNC(_name) \
2359 + {\
2360 + .name = #_name,\
2361 +- .rate = 24000000,\
2362 +- .max_rate = 24000000,\
2363 + .clk_id = tegra_clk_ ## _name,\
2364 + }
2365 +
2366 +@@ -176,7 +174,7 @@ static void __init tegra_audio_sync_clk_init(void __iomem *clk_base,
2367 + void __init tegra_audio_clk_init(void __iomem *clk_base,
2368 + void __iomem *pmc_base, struct tegra_clk *tegra_clks,
2369 + struct tegra_audio_clk_info *audio_info,
2370 +- unsigned int num_plls)
2371 ++ unsigned int num_plls, unsigned long sync_max_rate)
2372 + {
2373 + struct clk *clk;
2374 + struct clk **dt_clk;
2375 +@@ -221,8 +219,7 @@ void __init tegra_audio_clk_init(void __iomem *clk_base,
2376 + if (!dt_clk)
2377 + continue;
2378 +
2379 +- clk = tegra_clk_register_sync_source(data->name,
2380 +- data->rate, data->max_rate);
2381 ++ clk = tegra_clk_register_sync_source(data->name, sync_max_rate);
2382 + *dt_clk = clk;
2383 + }
2384 +
2385 +diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
2386 +index 1824f014202b..625d11091330 100644
2387 +--- a/drivers/clk/tegra/clk-tegra114.c
2388 ++++ b/drivers/clk/tegra/clk-tegra114.c
2389 +@@ -1190,6 +1190,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2390 + { TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
2391 + { TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
2392 + { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
2393 ++ { TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2394 ++ { TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2395 ++ { TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2396 ++ { TEGRA114_CLK_I2S2_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2397 ++ { TEGRA114_CLK_I2S3_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2398 ++ { TEGRA114_CLK_I2S4_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2399 ++ { TEGRA114_CLK_VIMCLK_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
2400 + /* must be the last entry */
2401 + { TEGRA114_CLK_CLK_MAX, TEGRA114_CLK_CLK_MAX, 0, 0 },
2402 + };
2403 +@@ -1362,7 +1369,7 @@ static void __init tegra114_clock_init(struct device_node *np)
2404 + tegra114_periph_clk_init(clk_base, pmc_base);
2405 + tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks,
2406 + tegra114_audio_plls,
2407 +- ARRAY_SIZE(tegra114_audio_plls));
2408 ++ ARRAY_SIZE(tegra114_audio_plls), 24000000);
2409 + tegra_pmc_clk_init(pmc_base, tegra114_clks);
2410 + tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks,
2411 + &pll_x_params);
2412 +diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
2413 +index b6cf28ca2ed2..df0018f7bf7e 100644
2414 +--- a/drivers/clk/tegra/clk-tegra124.c
2415 ++++ b/drivers/clk/tegra/clk-tegra124.c
2416 +@@ -1291,6 +1291,13 @@ static struct tegra_clk_init_table common_init_table[] __initdata = {
2417 + { TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1 },
2418 + { TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0 },
2419 + { TEGRA124_CLK_VIC03, TEGRA124_CLK_PLL_C3, 0, 0 },
2420 ++ { TEGRA124_CLK_SPDIF_IN_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2421 ++ { TEGRA124_CLK_I2S0_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2422 ++ { TEGRA124_CLK_I2S1_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2423 ++ { TEGRA124_CLK_I2S2_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2424 ++ { TEGRA124_CLK_I2S3_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2425 ++ { TEGRA124_CLK_I2S4_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2426 ++ { TEGRA124_CLK_VIMCLK_SYNC, TEGRA124_CLK_CLK_MAX, 24576000, 0 },
2427 + /* must be the last entry */
2428 + { TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0 },
2429 + };
2430 +@@ -1455,7 +1462,7 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np)
2431 + tegra124_periph_clk_init(clk_base, pmc_base);
2432 + tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks,
2433 + tegra124_audio_plls,
2434 +- ARRAY_SIZE(tegra124_audio_plls));
2435 ++ ARRAY_SIZE(tegra124_audio_plls), 24576000);
2436 + tegra_pmc_clk_init(pmc_base, tegra124_clks);
2437 +
2438 + /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
2439 +diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
2440 +index 4e1bc23c9865..080bfa24863e 100644
2441 +--- a/drivers/clk/tegra/clk-tegra210.c
2442 ++++ b/drivers/clk/tegra/clk-tegra210.c
2443 +@@ -3369,6 +3369,15 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2444 + { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
2445 + { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
2446 + { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2447 ++ { TEGRA210_CLK_SPDIF_IN_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2448 ++ { TEGRA210_CLK_I2S0_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2449 ++ { TEGRA210_CLK_I2S1_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2450 ++ { TEGRA210_CLK_I2S2_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2451 ++ { TEGRA210_CLK_I2S3_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2452 ++ { TEGRA210_CLK_I2S4_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2453 ++ { TEGRA210_CLK_VIMCLK_SYNC, TEGRA210_CLK_CLK_MAX, 24576000, 0 },
2454 ++ { TEGRA210_CLK_HDA, TEGRA210_CLK_PLL_P, 51000000, 0 },
2455 ++ { TEGRA210_CLK_HDA2CODEC_2X, TEGRA210_CLK_PLL_P, 48000000, 0 },
2456 + /* This MUST be the last entry. */
2457 + { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
2458 + };
2459 +@@ -3562,7 +3571,7 @@ static void __init tegra210_clock_init(struct device_node *np)
2460 + tegra210_periph_clk_init(clk_base, pmc_base);
2461 + tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks,
2462 + tegra210_audio_plls,
2463 +- ARRAY_SIZE(tegra210_audio_plls));
2464 ++ ARRAY_SIZE(tegra210_audio_plls), 24576000);
2465 + tegra_pmc_clk_init(pmc_base, tegra210_clks);
2466 +
2467 + /* For Tegra210, PLLD is the only source for DSIA & DSIB */
2468 +diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
2469 +index acfe661b2ae7..e0aaecd98fbf 100644
2470 +--- a/drivers/clk/tegra/clk-tegra30.c
2471 ++++ b/drivers/clk/tegra/clk-tegra30.c
2472 +@@ -1267,6 +1267,13 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2473 + { TEGRA30_CLK_GR3D2, TEGRA30_CLK_PLL_C, 300000000, 0 },
2474 + { TEGRA30_CLK_PLL_U, TEGRA30_CLK_CLK_MAX, 480000000, 0 },
2475 + { TEGRA30_CLK_VDE, TEGRA30_CLK_CLK_MAX, 600000000, 0 },
2476 ++ { TEGRA30_CLK_SPDIF_IN_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2477 ++ { TEGRA30_CLK_I2S0_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2478 ++ { TEGRA30_CLK_I2S1_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2479 ++ { TEGRA30_CLK_I2S2_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2480 ++ { TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2481 ++ { TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2482 ++ { TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
2483 + /* must be the last entry */
2484 + { TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
2485 + };
2486 +@@ -1344,7 +1351,7 @@ static void __init tegra30_clock_init(struct device_node *np)
2487 + tegra30_periph_clk_init();
2488 + tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks,
2489 + tegra30_audio_plls,
2490 +- ARRAY_SIZE(tegra30_audio_plls));
2491 ++ ARRAY_SIZE(tegra30_audio_plls), 24000000);
2492 + tegra_pmc_clk_init(pmc_base, tegra30_clks);
2493 +
2494 + tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
2495 +diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
2496 +index d2c3a010f8e9..09bccbb9640c 100644
2497 +--- a/drivers/clk/tegra/clk.h
2498 ++++ b/drivers/clk/tegra/clk.h
2499 +@@ -41,7 +41,7 @@ extern const struct clk_ops tegra_clk_sync_source_ops;
2500 + extern int *periph_clk_enb_refcnt;
2501 +
2502 + struct clk *tegra_clk_register_sync_source(const char *name,
2503 +- unsigned long fixed_rate, unsigned long max_rate);
2504 ++ unsigned long max_rate);
2505 +
2506 + /**
2507 + * struct tegra_clk_frac_div - fractional divider clock
2508 +@@ -796,7 +796,7 @@ void tegra_register_devclks(struct tegra_devclk *dev_clks, int num);
2509 + void tegra_audio_clk_init(void __iomem *clk_base,
2510 + void __iomem *pmc_base, struct tegra_clk *tegra_clks,
2511 + struct tegra_audio_clk_info *audio_info,
2512 +- unsigned int num_plls);
2513 ++ unsigned int num_plls, unsigned long sync_max_rate);
2514 +
2515 + void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base,
2516 + struct tegra_clk *tegra_clks,
2517 +diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
2518 +index 1ff229c2aeab..186a2536fb8b 100644
2519 +--- a/drivers/crypto/ccree/cc_driver.c
2520 ++++ b/drivers/crypto/ccree/cc_driver.c
2521 +@@ -364,7 +364,7 @@ static int init_cc_resources(struct platform_device *plat_dev)
2522 + rc = cc_ivgen_init(new_drvdata);
2523 + if (rc) {
2524 + dev_err(dev, "cc_ivgen_init failed\n");
2525 +- goto post_power_mgr_err;
2526 ++ goto post_buf_mgr_err;
2527 + }
2528 +
2529 + /* Allocate crypto algs */
2530 +@@ -387,6 +387,9 @@ static int init_cc_resources(struct platform_device *plat_dev)
2531 + goto post_hash_err;
2532 + }
2533 +
2534 ++ /* All set, we can allow autosuspend */
2535 ++ cc_pm_go(new_drvdata);
2536 ++
2537 + /* If we got here and FIPS mode is enabled
2538 + * it means all FIPS test passed, so let TEE
2539 + * know we're good.
2540 +@@ -401,8 +404,6 @@ post_cipher_err:
2541 + cc_cipher_free(new_drvdata);
2542 + post_ivgen_err:
2543 + cc_ivgen_fini(new_drvdata);
2544 +-post_power_mgr_err:
2545 +- cc_pm_fini(new_drvdata);
2546 + post_buf_mgr_err:
2547 + cc_buffer_mgr_fini(new_drvdata);
2548 + post_req_mgr_err:
2549 +diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c
2550 +index 79fc0a37ba6e..638082dff183 100644
2551 +--- a/drivers/crypto/ccree/cc_pm.c
2552 ++++ b/drivers/crypto/ccree/cc_pm.c
2553 +@@ -103,20 +103,19 @@ int cc_pm_put_suspend(struct device *dev)
2554 +
2555 + int cc_pm_init(struct cc_drvdata *drvdata)
2556 + {
2557 +- int rc = 0;
2558 + struct device *dev = drvdata_to_dev(drvdata);
2559 +
2560 + /* must be before the enabling to avoid resdundent suspending */
2561 + pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
2562 + pm_runtime_use_autosuspend(dev);
2563 + /* activate the PM module */
2564 +- rc = pm_runtime_set_active(dev);
2565 +- if (rc)
2566 +- return rc;
2567 +- /* enable the PM module*/
2568 +- pm_runtime_enable(dev);
2569 ++ return pm_runtime_set_active(dev);
2570 ++}
2571 +
2572 +- return rc;
2573 ++/* enable the PM module*/
2574 ++void cc_pm_go(struct cc_drvdata *drvdata)
2575 ++{
2576 ++ pm_runtime_enable(drvdata_to_dev(drvdata));
2577 + }
2578 +
2579 + void cc_pm_fini(struct cc_drvdata *drvdata)
2580 +diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h
2581 +index 020a5403c58b..907a6db4d6c0 100644
2582 +--- a/drivers/crypto/ccree/cc_pm.h
2583 ++++ b/drivers/crypto/ccree/cc_pm.h
2584 +@@ -16,6 +16,7 @@
2585 + extern const struct dev_pm_ops ccree_pm;
2586 +
2587 + int cc_pm_init(struct cc_drvdata *drvdata);
2588 ++void cc_pm_go(struct cc_drvdata *drvdata);
2589 + void cc_pm_fini(struct cc_drvdata *drvdata);
2590 + int cc_pm_suspend(struct device *dev);
2591 + int cc_pm_resume(struct device *dev);
2592 +@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata)
2593 + return 0;
2594 + }
2595 +
2596 ++static inline void cc_pm_go(struct cc_drvdata *drvdata) {}
2597 ++
2598 + static inline void cc_pm_fini(struct cc_drvdata *drvdata) {}
2599 +
2600 + static inline int cc_pm_suspend(struct device *dev)
2601 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2602 +index 5f3f54073818..17862b9ecccd 100644
2603 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2604 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
2605 +@@ -1070,7 +1070,7 @@ void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
2606 + int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
2607 + {
2608 + struct amdgpu_device *adev = ring->adev;
2609 +- uint32_t rptr = amdgpu_ring_get_rptr(ring);
2610 ++ uint32_t rptr;
2611 + unsigned i;
2612 + int r, timeout = adev->usec_timeout;
2613 +
2614 +@@ -1084,6 +1084,9 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
2615 + ring->idx, r);
2616 + return r;
2617 + }
2618 ++
2619 ++ rptr = amdgpu_ring_get_rptr(ring);
2620 ++
2621 + amdgpu_ring_write(ring, VCE_CMD_END);
2622 + amdgpu_ring_commit(ring);
2623 +
2624 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
2625 +index 400fc74bbae2..205e683fb920 100644
2626 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
2627 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
2628 +@@ -431,7 +431,7 @@ error:
2629 + int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
2630 + {
2631 + struct amdgpu_device *adev = ring->adev;
2632 +- uint32_t rptr = amdgpu_ring_get_rptr(ring);
2633 ++ uint32_t rptr;
2634 + unsigned i;
2635 + int r;
2636 +
2637 +@@ -441,6 +441,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
2638 + ring->idx, r);
2639 + return r;
2640 + }
2641 ++
2642 ++ rptr = amdgpu_ring_get_rptr(ring);
2643 ++
2644 + amdgpu_ring_write(ring, VCN_ENC_CMD_END);
2645 + amdgpu_ring_commit(ring);
2646 +
2647 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2648 +index 46568497ef18..782411649816 100644
2649 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2650 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
2651 +@@ -82,7 +82,8 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
2652 +
2653 + static const struct soc15_reg_golden golden_settings_gc_9_0[] =
2654 + {
2655 +- SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000420),
2656 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG2, 0xf00fffff, 0x00000400),
2657 ++ SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0x80000000, 0x80000000),
2658 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_GPU_ID, 0x0000000f, 0x00000000),
2659 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3, 0x00000003, 0x82400024),
2660 + SOC15_REG_GOLDEN_VALUE(GC, 0, mmPA_SC_ENHANCE, 0x3fffffff, 0x00000001),
2661 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2662 +index d4070839ac80..80613a74df42 100644
2663 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2664 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
2665 +@@ -170,7 +170,7 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2666 + static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2667 + {
2668 + struct amdgpu_device *adev = ring->adev;
2669 +- uint32_t rptr = amdgpu_ring_get_rptr(ring);
2670 ++ uint32_t rptr;
2671 + unsigned i;
2672 + int r;
2673 +
2674 +@@ -180,6 +180,9 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2675 + ring->idx, r);
2676 + return r;
2677 + }
2678 ++
2679 ++ rptr = amdgpu_ring_get_rptr(ring);
2680 ++
2681 + amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
2682 + amdgpu_ring_commit(ring);
2683 +
2684 +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2685 +index 057151b17b45..ce16b8329af0 100644
2686 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2687 ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
2688 +@@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
2689 + static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2690 + {
2691 + struct amdgpu_device *adev = ring->adev;
2692 +- uint32_t rptr = amdgpu_ring_get_rptr(ring);
2693 ++ uint32_t rptr;
2694 + unsigned i;
2695 + int r;
2696 +
2697 +@@ -188,6 +188,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
2698 + ring->me, ring->idx, r);
2699 + return r;
2700 + }
2701 ++
2702 ++ rptr = amdgpu_ring_get_rptr(ring);
2703 ++
2704 + amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
2705 + amdgpu_ring_commit(ring);
2706 +
2707 +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
2708 +index 5aba50f63ac6..938d0053a820 100644
2709 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
2710 ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
2711 +@@ -310,6 +310,7 @@ static const struct kfd_deviceid supported_devices[] = {
2712 + { 0x67CF, &polaris10_device_info }, /* Polaris10 */
2713 + { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
2714 + { 0x67DF, &polaris10_device_info }, /* Polaris10 */
2715 ++ { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
2716 + { 0x67E0, &polaris11_device_info }, /* Polaris11 */
2717 + { 0x67E1, &polaris11_device_info }, /* Polaris11 */
2718 + { 0x67E3, &polaris11_device_info }, /* Polaris11 */
2719 +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2720 +index 59445c83f023..c85bea70d965 100644
2721 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2722 ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2723 +@@ -377,9 +377,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
2724 + drm_connector_attach_encoder(&aconnector->base,
2725 + &aconnector->mst_encoder->base);
2726 +
2727 +- /*
2728 +- * TODO: understand why this one is needed
2729 +- */
2730 + drm_object_attach_property(
2731 + &connector->base,
2732 + dev->mode_config.path_property,
2733 +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
2734 +index 2aab1b475945..cede78cdf28d 100644
2735 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
2736 ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
2737 +@@ -669,20 +669,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
2738 + for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
2739 + table->WatermarkRow[1][i].MinClock =
2740 + cpu_to_le16((uint16_t)
2741 +- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz) /
2742 +- 1000);
2743 ++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
2744 ++ 1000));
2745 + table->WatermarkRow[1][i].MaxClock =
2746 + cpu_to_le16((uint16_t)
2747 +- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz) /
2748 +- 100);
2749 ++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
2750 ++ 1000));
2751 + table->WatermarkRow[1][i].MinUclk =
2752 + cpu_to_le16((uint16_t)
2753 +- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
2754 +- 1000);
2755 ++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
2756 ++ 1000));
2757 + table->WatermarkRow[1][i].MaxUclk =
2758 + cpu_to_le16((uint16_t)
2759 +- (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
2760 +- 1000);
2761 ++ (wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
2762 ++ 1000));
2763 + table->WatermarkRow[1][i].WmSetting = (uint8_t)
2764 + wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
2765 + }
2766 +@@ -690,20 +690,20 @@ int smu_set_watermarks_for_clocks_ranges(void *wt_table,
2767 + for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
2768 + table->WatermarkRow[0][i].MinClock =
2769 + cpu_to_le16((uint16_t)
2770 +- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz) /
2771 +- 1000);
2772 ++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
2773 ++ 1000));
2774 + table->WatermarkRow[0][i].MaxClock =
2775 + cpu_to_le16((uint16_t)
2776 +- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz) /
2777 +- 1000);
2778 ++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
2779 ++ 1000));
2780 + table->WatermarkRow[0][i].MinUclk =
2781 + cpu_to_le16((uint16_t)
2782 +- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz) /
2783 +- 1000);
2784 ++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
2785 ++ 1000));
2786 + table->WatermarkRow[0][i].MaxUclk =
2787 + cpu_to_le16((uint16_t)
2788 +- (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz) /
2789 +- 1000);
2790 ++ (wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
2791 ++ 1000));
2792 + table->WatermarkRow[0][i].WmSetting = (uint8_t)
2793 + wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
2794 + }
2795 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
2796 +index 281cf9cbb44c..1a4b44923aec 100644
2797 +--- a/drivers/gpu/drm/drm_atomic.c
2798 ++++ b/drivers/gpu/drm/drm_atomic.c
2799 +@@ -1702,6 +1702,27 @@ drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
2800 + struct drm_connector *connector = conn_state->connector;
2801 + struct drm_crtc_state *crtc_state;
2802 +
2803 ++ /*
2804 ++ * For compatibility with legacy users, we want to make sure that
2805 ++ * we allow DPMS On<->Off modesets on unregistered connectors, since
2806 ++ * legacy modesetting users will not be expecting these to fail. We do
2807 ++ * not however, want to allow legacy users to assign a connector
2808 ++ * that's been unregistered from sysfs to another CRTC, since doing
2809 ++ * this with a now non-existent connector could potentially leave us
2810 ++ * in an invalid state.
2811 ++ *
2812 ++ * Since the connector can be unregistered at any point during an
2813 ++ * atomic check or commit, this is racy. But that's OK: all we care
2814 ++ * about is ensuring that userspace can't use this connector for new
2815 ++ * configurations after it's been notified that the connector is no
2816 ++ * longer present.
2817 ++ */
2818 ++ if (!READ_ONCE(connector->registered) && crtc) {
2819 ++ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] is not registered\n",
2820 ++ connector->base.id, connector->name);
2821 ++ return -EINVAL;
2822 ++ }
2823 ++
2824 + if (conn_state->crtc == crtc)
2825 + return 0;
2826 +
2827 +diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
2828 +index 138680b37c70..f8672238d444 100644
2829 +--- a/drivers/gpu/drm/drm_ioc32.c
2830 ++++ b/drivers/gpu/drm/drm_ioc32.c
2831 +@@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
2832 + m32.size = map.size;
2833 + m32.type = map.type;
2834 + m32.flags = map.flags;
2835 +- m32.handle = ptr_to_compat(map.handle);
2836 ++ m32.handle = ptr_to_compat((void __user *)map.handle);
2837 + m32.mtrr = map.mtrr;
2838 + if (copy_to_user(argp, &m32, sizeof(m32)))
2839 + return -EFAULT;
2840 +@@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
2841 +
2842 + m32.offset = map.offset;
2843 + m32.mtrr = map.mtrr;
2844 +- m32.handle = ptr_to_compat(map.handle);
2845 ++ m32.handle = ptr_to_compat((void __user *)map.handle);
2846 + if (map.handle != compat_ptr(m32.handle))
2847 + pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
2848 + map.handle, m32.type, m32.offset);
2849 +@@ -529,7 +529,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
2850 + if (err)
2851 + return err;
2852 +
2853 +- req32.handle = ptr_to_compat(req.handle);
2854 ++ req32.handle = ptr_to_compat((void __user *)req.handle);
2855 + if (copy_to_user(argp, &req32, sizeof(req32)))
2856 + return -EFAULT;
2857 +
2858 +diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
2859 +index 28cdcf76b6f9..d1859bcc7ccb 100644
2860 +--- a/drivers/gpu/drm/drm_vblank.c
2861 ++++ b/drivers/gpu/drm/drm_vblank.c
2862 +@@ -105,13 +105,20 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
2863 + write_sequnlock(&vblank->seqlock);
2864 + }
2865 +
2866 ++static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
2867 ++{
2868 ++ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
2869 ++
2870 ++ return vblank->max_vblank_count ?: dev->max_vblank_count;
2871 ++}
2872 ++
2873 + /*
2874 + * "No hw counter" fallback implementation of .get_vblank_counter() hook,
2875 + * if there is no useable hardware frame counter available.
2876 + */
2877 + static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
2878 + {
2879 +- WARN_ON_ONCE(dev->max_vblank_count != 0);
2880 ++ WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
2881 + return 0;
2882 + }
2883 +
2884 +@@ -198,6 +205,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
2885 + ktime_t t_vblank;
2886 + int count = DRM_TIMESTAMP_MAXRETRIES;
2887 + int framedur_ns = vblank->framedur_ns;
2888 ++ u32 max_vblank_count = drm_max_vblank_count(dev, pipe);
2889 +
2890 + /*
2891 + * Interrupts were disabled prior to this call, so deal with counter
2892 +@@ -216,9 +224,9 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
2893 + rc = drm_get_last_vbltimestamp(dev, pipe, &t_vblank, in_vblank_irq);
2894 + } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0);
2895 +
2896 +- if (dev->max_vblank_count != 0) {
2897 ++ if (max_vblank_count) {
2898 + /* trust the hw counter when it's around */
2899 +- diff = (cur_vblank - vblank->last) & dev->max_vblank_count;
2900 ++ diff = (cur_vblank - vblank->last) & max_vblank_count;
2901 + } else if (rc && framedur_ns) {
2902 + u64 diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time));
2903 +
2904 +@@ -1204,6 +1212,37 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
2905 + }
2906 + EXPORT_SYMBOL(drm_crtc_vblank_reset);
2907 +
2908 ++/**
2909 ++ * drm_crtc_set_max_vblank_count - configure the hw max vblank counter value
2910 ++ * @crtc: CRTC in question
2911 ++ * @max_vblank_count: max hardware vblank counter value
2912 ++ *
2913 ++ * Update the maximum hardware vblank counter value for @crtc
2914 ++ * at runtime. Useful for hardware where the operation of the
2915 ++ * hardware vblank counter depends on the currently active
2916 ++ * display configuration.
2917 ++ *
2918 ++ * For example, if the hardware vblank counter does not work
2919 ++ * when a specific connector is active the maximum can be set
2920 ++ * to zero. And when that specific connector isn't active the
2921 ++ * maximum can again be set to the appropriate non-zero value.
2922 ++ *
2923 ++ * If used, must be called before drm_vblank_on().
2924 ++ */
2925 ++void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
2926 ++ u32 max_vblank_count)
2927 ++{
2928 ++ struct drm_device *dev = crtc->dev;
2929 ++ unsigned int pipe = drm_crtc_index(crtc);
2930 ++ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
2931 ++
2932 ++ WARN_ON(dev->max_vblank_count);
2933 ++ WARN_ON(!READ_ONCE(vblank->inmodeset));
2934 ++
2935 ++ vblank->max_vblank_count = max_vblank_count;
2936 ++}
2937 ++EXPORT_SYMBOL(drm_crtc_set_max_vblank_count);
2938 ++
2939 + /**
2940 + * drm_crtc_vblank_on - enable vblank events on a CRTC
2941 + * @crtc: CRTC in question
2942 +diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
2943 +index f9ce35da4123..e063e98d1e82 100644
2944 +--- a/drivers/gpu/drm/i915/i915_debugfs.c
2945 ++++ b/drivers/gpu/drm/i915/i915_debugfs.c
2946 +@@ -1788,6 +1788,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
2947 + if (!IS_GEN5(dev_priv))
2948 + return -ENODEV;
2949 +
2950 ++ intel_runtime_pm_get(dev_priv);
2951 ++
2952 + ret = mutex_lock_interruptible(&dev->struct_mutex);
2953 + if (ret)
2954 + return ret;
2955 +@@ -1802,6 +1804,8 @@ static int i915_emon_status(struct seq_file *m, void *unused)
2956 + seq_printf(m, "GFX power: %ld\n", gfx);
2957 + seq_printf(m, "Total power: %ld\n", chipset + gfx);
2958 +
2959 ++ intel_runtime_pm_put(dev_priv);
2960 ++
2961 + return 0;
2962 + }
2963 +
2964 +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
2965 +index 03cda197fb6b..937287710042 100644
2966 +--- a/drivers/gpu/drm/i915/i915_gem.c
2967 ++++ b/drivers/gpu/drm/i915/i915_gem.c
2968 +@@ -1874,20 +1874,28 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
2969 + * pages from.
2970 + */
2971 + if (!obj->base.filp) {
2972 +- i915_gem_object_put(obj);
2973 +- return -ENXIO;
2974 ++ addr = -ENXIO;
2975 ++ goto err;
2976 ++ }
2977 ++
2978 ++ if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
2979 ++ addr = -EINVAL;
2980 ++ goto err;
2981 + }
2982 +
2983 + addr = vm_mmap(obj->base.filp, 0, args->size,
2984 + PROT_READ | PROT_WRITE, MAP_SHARED,
2985 + args->offset);
2986 ++ if (IS_ERR_VALUE(addr))
2987 ++ goto err;
2988 ++
2989 + if (args->flags & I915_MMAP_WC) {
2990 + struct mm_struct *mm = current->mm;
2991 + struct vm_area_struct *vma;
2992 +
2993 + if (down_write_killable(&mm->mmap_sem)) {
2994 +- i915_gem_object_put(obj);
2995 +- return -EINTR;
2996 ++ addr = -EINTR;
2997 ++ goto err;
2998 + }
2999 + vma = find_vma(mm, addr);
3000 + if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
3001 +@@ -1896,17 +1904,20 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
3002 + else
3003 + addr = -ENOMEM;
3004 + up_write(&mm->mmap_sem);
3005 ++ if (IS_ERR_VALUE(addr))
3006 ++ goto err;
3007 +
3008 + /* This may race, but that's ok, it only gets set */
3009 + WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
3010 + }
3011 + i915_gem_object_put(obj);
3012 +- if (IS_ERR((void *)addr))
3013 +- return addr;
3014 +
3015 + args->addr_ptr = (uint64_t) addr;
3016 +-
3017 + return 0;
3018 ++
3019 ++err:
3020 ++ i915_gem_object_put(obj);
3021 ++ return addr;
3022 + }
3023 +
3024 + static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
3025 +@@ -5595,6 +5606,8 @@ err_uc_misc:
3026 + i915_gem_cleanup_userptr(dev_priv);
3027 +
3028 + if (ret == -EIO) {
3029 ++ mutex_lock(&dev_priv->drm.struct_mutex);
3030 ++
3031 + /*
3032 + * Allow engine initialisation to fail by marking the GPU as
3033 + * wedged. But we only want to do this where the GPU is angry,
3034 +@@ -5605,7 +5618,14 @@ err_uc_misc:
3035 + "Failed to initialize GPU, declaring it wedged!\n");
3036 + i915_gem_set_wedged(dev_priv);
3037 + }
3038 +- ret = 0;
3039 ++
3040 ++ /* Minimal basic recovery for KMS */
3041 ++ ret = i915_ggtt_enable_hw(dev_priv);
3042 ++ i915_gem_restore_gtt_mappings(dev_priv);
3043 ++ i915_gem_restore_fences(dev_priv);
3044 ++ intel_init_clock_gating(dev_priv);
3045 ++
3046 ++ mutex_unlock(&dev_priv->drm.struct_mutex);
3047 + }
3048 +
3049 + i915_gem_drain_freed_objects(dev_priv);
3050 +@@ -5615,6 +5635,7 @@ err_uc_misc:
3051 + void i915_gem_fini(struct drm_i915_private *dev_priv)
3052 + {
3053 + i915_gem_suspend_late(dev_priv);
3054 ++ intel_disable_gt_powersave(dev_priv);
3055 +
3056 + /* Flush any outstanding unpin_work. */
3057 + i915_gem_drain_workqueue(dev_priv);
3058 +@@ -5626,6 +5647,8 @@ void i915_gem_fini(struct drm_i915_private *dev_priv)
3059 + i915_gem_contexts_fini(dev_priv);
3060 + mutex_unlock(&dev_priv->drm.struct_mutex);
3061 +
3062 ++ intel_cleanup_gt_powersave(dev_priv);
3063 ++
3064 + intel_uc_fini_misc(dev_priv);
3065 + i915_gem_cleanup_userptr(dev_priv);
3066 +
3067 +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
3068 +index 16f5d2d93801..4e070afb2738 100644
3069 +--- a/drivers/gpu/drm/i915/i915_reg.h
3070 ++++ b/drivers/gpu/drm/i915/i915_reg.h
3071 +@@ -6531,7 +6531,7 @@ enum {
3072 + #define PLANE_CTL_YUV422_UYVY (1 << 16)
3073 + #define PLANE_CTL_YUV422_YVYU (2 << 16)
3074 + #define PLANE_CTL_YUV422_VYUY (3 << 16)
3075 +-#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
3076 ++#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15)
3077 + #define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
3078 + #define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */
3079 + #define PLANE_CTL_TILED_MASK (0x7 << 10)
3080 +diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
3081 +index 29075c763428..7b4906ede148 100644
3082 +--- a/drivers/gpu/drm/i915/intel_cdclk.c
3083 ++++ b/drivers/gpu/drm/i915/intel_cdclk.c
3084 +@@ -2208,6 +2208,17 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
3085 + if (INTEL_GEN(dev_priv) >= 9)
3086 + min_cdclk = max(2 * 96000, min_cdclk);
3087 +
3088 ++ /*
3089 ++ * "For DP audio configuration, cdclk frequency shall be set to
3090 ++ * meet the following requirements:
3091 ++ * DP Link Frequency(MHz) | Cdclk frequency(MHz)
3092 ++ * 270 | 320 or higher
3093 ++ * 162 | 200 or higher"
3094 ++ */
3095 ++ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
3096 ++ intel_crtc_has_dp_encoder(crtc_state) && crtc_state->has_audio)
3097 ++ min_cdclk = max(crtc_state->port_clock, min_cdclk);
3098 ++
3099 + /*
3100 + * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
3101 + * than 320000KHz.
3102 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3103 +index 3bd44d042a1d..6902fd2da19c 100644
3104 +--- a/drivers/gpu/drm/i915/intel_display.c
3105 ++++ b/drivers/gpu/drm/i915/intel_display.c
3106 +@@ -2712,6 +2712,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3107 + if (size_aligned * 2 > dev_priv->stolen_usable_size)
3108 + return false;
3109 +
3110 ++ switch (fb->modifier) {
3111 ++ case DRM_FORMAT_MOD_LINEAR:
3112 ++ case I915_FORMAT_MOD_X_TILED:
3113 ++ case I915_FORMAT_MOD_Y_TILED:
3114 ++ break;
3115 ++ default:
3116 ++ DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3117 ++ fb->modifier);
3118 ++ return false;
3119 ++ }
3120 ++
3121 + mutex_lock(&dev->struct_mutex);
3122 + obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
3123 + base_aligned,
3124 +@@ -2721,8 +2732,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3125 + if (!obj)
3126 + return false;
3127 +
3128 +- if (plane_config->tiling == I915_TILING_X)
3129 +- obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
3130 ++ switch (plane_config->tiling) {
3131 ++ case I915_TILING_NONE:
3132 ++ break;
3133 ++ case I915_TILING_X:
3134 ++ case I915_TILING_Y:
3135 ++ obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3136 ++ break;
3137 ++ default:
3138 ++ MISSING_CASE(plane_config->tiling);
3139 ++ return false;
3140 ++ }
3141 +
3142 + mode_cmd.pixel_format = fb->format->format;
3143 + mode_cmd.width = fb->width;
3144 +@@ -3561,11 +3581,11 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3145 + case I915_FORMAT_MOD_Y_TILED:
3146 + return PLANE_CTL_TILED_Y;
3147 + case I915_FORMAT_MOD_Y_TILED_CCS:
3148 +- return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE;
3149 ++ return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3150 + case I915_FORMAT_MOD_Yf_TILED:
3151 + return PLANE_CTL_TILED_YF;
3152 + case I915_FORMAT_MOD_Yf_TILED_CCS:
3153 +- return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE;
3154 ++ return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
3155 + default:
3156 + MISSING_CASE(fb_modifier);
3157 + }
3158 +@@ -8812,13 +8832,14 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
3159 + fb->modifier = I915_FORMAT_MOD_X_TILED;
3160 + break;
3161 + case PLANE_CTL_TILED_Y:
3162 +- if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
3163 ++ plane_config->tiling = I915_TILING_Y;
3164 ++ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
3165 + fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
3166 + else
3167 + fb->modifier = I915_FORMAT_MOD_Y_TILED;
3168 + break;
3169 + case PLANE_CTL_TILED_YF:
3170 +- if (val & PLANE_CTL_DECOMPRESSION_ENABLE)
3171 ++ if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
3172 + fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
3173 + else
3174 + fb->modifier = I915_FORMAT_MOD_Yf_TILED;
3175 +@@ -15951,8 +15972,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
3176 + flush_work(&dev_priv->atomic_helper.free_work);
3177 + WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
3178 +
3179 +- intel_disable_gt_powersave(dev_priv);
3180 +-
3181 + /*
3182 + * Interrupts and polling as the first thing to avoid creating havoc.
3183 + * Too much stuff here (turning of connectors, ...) would
3184 +@@ -15980,8 +15999,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
3185 +
3186 + intel_cleanup_overlay(dev_priv);
3187 +
3188 +- intel_cleanup_gt_powersave(dev_priv);
3189 +-
3190 + intel_teardown_gmbus(dev_priv);
3191 +
3192 + destroy_workqueue(dev_priv->modeset_wq);
3193 +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
3194 +index f92079e19de8..20cd4c8acecc 100644
3195 +--- a/drivers/gpu/drm/i915/intel_dp.c
3196 ++++ b/drivers/gpu/drm/i915/intel_dp.c
3197 +@@ -4739,6 +4739,22 @@ intel_dp_long_pulse(struct intel_connector *connector,
3198 + */
3199 + status = connector_status_disconnected;
3200 + goto out;
3201 ++ } else {
3202 ++ /*
3203 ++ * If display is now connected check links status,
3204 ++ * there has been known issues of link loss triggering
3205 ++ * long pulse.
3206 ++ *
3207 ++ * Some sinks (eg. ASUS PB287Q) seem to perform some
3208 ++ * weird HPD ping pong during modesets. So we can apparently
3209 ++ * end up with HPD going low during a modeset, and then
3210 ++ * going back up soon after. And once that happens we must
3211 ++ * retrain the link to get a picture. That's in case no
3212 ++ * userspace component reacted to intermittent HPD dip.
3213 ++ */
3214 ++ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3215 ++
3216 ++ intel_dp_retrain_link(encoder, ctx);
3217 + }
3218 +
3219 + /*
3220 +diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
3221 +index 1fec0c71b4d9..58ba14966d4f 100644
3222 +--- a/drivers/gpu/drm/i915/intel_dp_mst.c
3223 ++++ b/drivers/gpu/drm/i915/intel_dp_mst.c
3224 +@@ -408,8 +408,6 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c
3225 + struct intel_dp *intel_dp = intel_connector->mst_port;
3226 + struct intel_crtc *crtc = to_intel_crtc(state->crtc);
3227 +
3228 +- if (!READ_ONCE(connector->registered))
3229 +- return NULL;
3230 + return &intel_dp->mst_encoders[crtc->pipe]->base.base;
3231 + }
3232 +
3233 +diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
3234 +index f889d41a281f..5e01bfb69d7a 100644
3235 +--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
3236 ++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
3237 +@@ -759,7 +759,8 @@ nv50_msto_enable(struct drm_encoder *encoder)
3238 +
3239 + slots = drm_dp_find_vcpi_slots(&mstm->mgr, mstc->pbn);
3240 + r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, mstc->pbn, slots);
3241 +- WARN_ON(!r);
3242 ++ if (!r)
3243 ++ DRM_DEBUG_KMS("Failed to allocate VCPI\n");
3244 +
3245 + if (!mstm->links++)
3246 + nv50_outp_acquire(mstm->outp);
3247 +diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
3248 +index b1d41c4921dd..5fd94e206029 100644
3249 +--- a/drivers/gpu/drm/panel/panel-simple.c
3250 ++++ b/drivers/gpu/drm/panel/panel-simple.c
3251 +@@ -436,6 +436,32 @@ static const struct panel_desc ampire_am800480r3tmqwa1h = {
3252 + .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
3253 + };
3254 +
3255 ++static const struct display_timing santek_st0700i5y_rbslw_f_timing = {
3256 ++ .pixelclock = { 26400000, 33300000, 46800000 },
3257 ++ .hactive = { 800, 800, 800 },
3258 ++ .hfront_porch = { 16, 210, 354 },
3259 ++ .hback_porch = { 45, 36, 6 },
3260 ++ .hsync_len = { 1, 10, 40 },
3261 ++ .vactive = { 480, 480, 480 },
3262 ++ .vfront_porch = { 7, 22, 147 },
3263 ++ .vback_porch = { 22, 13, 3 },
3264 ++ .vsync_len = { 1, 10, 20 },
3265 ++ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
3266 ++ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE
3267 ++};
3268 ++
3269 ++static const struct panel_desc armadeus_st0700_adapt = {
3270 ++ .timings = &santek_st0700i5y_rbslw_f_timing,
3271 ++ .num_timings = 1,
3272 ++ .bpc = 6,
3273 ++ .size = {
3274 ++ .width = 154,
3275 ++ .height = 86,
3276 ++ },
3277 ++ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
3278 ++ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
3279 ++};
3280 ++
3281 + static const struct drm_display_mode auo_b101aw03_mode = {
3282 + .clock = 51450,
3283 + .hdisplay = 1024,
3284 +@@ -2330,6 +2356,9 @@ static const struct of_device_id platform_of_match[] = {
3285 + }, {
3286 + .compatible = "ampire,am800480r3tmqwa1h",
3287 + .data = &ampire_am800480r3tmqwa1h,
3288 ++ }, {
3289 ++ .compatible = "armadeus,st0700-adapt",
3290 ++ .data = &armadeus_st0700_adapt,
3291 + }, {
3292 + .compatible = "auo,b101aw03",
3293 + .data = &auo_b101aw03,
3294 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
3295 +index 59e9d05ab928..0af048d1a815 100644
3296 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
3297 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
3298 +@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
3299 + !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
3300 + if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
3301 + kfree(reply);
3302 +-
3303 ++ reply = NULL;
3304 + if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
3305 + /* A checkpoint occurred. Retry. */
3306 + continue;
3307 +@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
3308 +
3309 + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
3310 + kfree(reply);
3311 +-
3312 ++ reply = NULL;
3313 + if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
3314 + /* A checkpoint occurred. Retry. */
3315 + continue;
3316 +@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
3317 + break;
3318 + }
3319 +
3320 +- if (retries == RETRIES) {
3321 +- kfree(reply);
3322 ++ if (!reply)
3323 + return -EINVAL;
3324 +- }
3325 +
3326 + *msg_len = reply_len;
3327 + *msg = reply;
3328 +diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
3329 +index 5eed1e7da15c..d6106e1a0d4a 100644
3330 +--- a/drivers/hv/hv_kvp.c
3331 ++++ b/drivers/hv/hv_kvp.c
3332 +@@ -353,7 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
3333 +
3334 + out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
3335 +
3336 +- default:
3337 ++ /* fallthrough */
3338 ++
3339 ++ case KVP_OP_GET_IP_INFO:
3340 + utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
3341 + MAX_ADAPTER_ID_SIZE,
3342 + UTF16_LITTLE_ENDIAN,
3343 +@@ -406,6 +408,10 @@ kvp_send_key(struct work_struct *dummy)
3344 + process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
3345 + break;
3346 + case KVP_OP_GET_IP_INFO:
3347 ++ /*
3348 ++ * We only need to pass on the info of operation, adapter_id
3349 ++ * and addr_family to the userland kvp daemon.
3350 ++ */
3351 + process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
3352 + break;
3353 + case KVP_OP_SET:
3354 +@@ -421,7 +427,7 @@ kvp_send_key(struct work_struct *dummy)
3355 + UTF16_LITTLE_ENDIAN,
3356 + message->body.kvp_set.data.value,
3357 + HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
3358 +- break;
3359 ++ break;
3360 +
3361 + case REG_U32:
3362 + /*
3363 +@@ -446,7 +452,10 @@ kvp_send_key(struct work_struct *dummy)
3364 + break;
3365 +
3366 + }
3367 +- case KVP_OP_GET:
3368 ++
3369 ++ /*
3370 ++ * The key is always a string - utf16 encoding.
3371 ++ */
3372 + message->body.kvp_set.data.key_size =
3373 + utf16s_to_utf8s(
3374 + (wchar_t *)in_msg->body.kvp_set.data.key,
3375 +@@ -454,7 +463,18 @@ kvp_send_key(struct work_struct *dummy)
3376 + UTF16_LITTLE_ENDIAN,
3377 + message->body.kvp_set.data.key,
3378 + HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
3379 +- break;
3380 ++
3381 ++ break;
3382 ++
3383 ++ case KVP_OP_GET:
3384 ++ message->body.kvp_get.data.key_size =
3385 ++ utf16s_to_utf8s(
3386 ++ (wchar_t *)in_msg->body.kvp_get.data.key,
3387 ++ in_msg->body.kvp_get.data.key_size,
3388 ++ UTF16_LITTLE_ENDIAN,
3389 ++ message->body.kvp_get.data.key,
3390 ++ HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
3391 ++ break;
3392 +
3393 + case KVP_OP_DELETE:
3394 + message->body.kvp_delete.key_size =
3395 +@@ -464,12 +484,12 @@ kvp_send_key(struct work_struct *dummy)
3396 + UTF16_LITTLE_ENDIAN,
3397 + message->body.kvp_delete.key,
3398 + HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
3399 +- break;
3400 ++ break;
3401 +
3402 + case KVP_OP_ENUMERATE:
3403 + message->body.kvp_enum_data.index =
3404 + in_msg->body.kvp_enum_data.index;
3405 +- break;
3406 ++ break;
3407 + }
3408 +
3409 + kvp_transaction.state = HVUTIL_USERSPACE_REQ;
3410 +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
3411 +index 3f3e8b3bf5ff..d51bf536bdf7 100644
3412 +--- a/drivers/i2c/busses/i2c-at91.c
3413 ++++ b/drivers/i2c/busses/i2c-at91.c
3414 +@@ -270,9 +270,11 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
3415 + writeb_relaxed(*dev->buf, dev->base + AT91_TWI_THR);
3416 +
3417 + /* send stop when last byte has been written */
3418 +- if (--dev->buf_len == 0)
3419 ++ if (--dev->buf_len == 0) {
3420 + if (!dev->use_alt_cmd)
3421 + at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
3422 ++ at91_twi_write(dev, AT91_TWI_IDR, AT91_TWI_TXRDY);
3423 ++ }
3424 +
3425 + dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len);
3426 +
3427 +@@ -690,9 +692,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
3428 + } else {
3429 + at91_twi_write_next_byte(dev);
3430 + at91_twi_write(dev, AT91_TWI_IER,
3431 +- AT91_TWI_TXCOMP |
3432 +- AT91_TWI_NACK |
3433 +- AT91_TWI_TXRDY);
3434 ++ AT91_TWI_TXCOMP | AT91_TWI_NACK |
3435 ++ (dev->buf_len ? AT91_TWI_TXRDY : 0));
3436 + }
3437 + }
3438 +
3439 +@@ -913,7 +914,7 @@ static struct at91_twi_pdata sama5d4_config = {
3440 +
3441 + static struct at91_twi_pdata sama5d2_config = {
3442 + .clk_max_div = 7,
3443 +- .clk_offset = 4,
3444 ++ .clk_offset = 3,
3445 + .has_unre_flag = true,
3446 + .has_alt_cmd = true,
3447 + .has_hold_field = true,
3448 +diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
3449 +index 4be29ed44755..1ca2c4d39f87 100644
3450 +--- a/drivers/iio/adc/exynos_adc.c
3451 ++++ b/drivers/iio/adc/exynos_adc.c
3452 +@@ -115,6 +115,8 @@
3453 + #define MAX_ADC_V2_CHANNELS 10
3454 + #define MAX_ADC_V1_CHANNELS 8
3455 + #define MAX_EXYNOS3250_ADC_CHANNELS 2
3456 ++#define MAX_EXYNOS4212_ADC_CHANNELS 4
3457 ++#define MAX_S5PV210_ADC_CHANNELS 10
3458 +
3459 + /* Bit definitions common for ADC_V1 and ADC_V2 */
3460 + #define ADC_CON_EN_START (1u << 0)
3461 +@@ -270,6 +272,19 @@ static void exynos_adc_v1_start_conv(struct exynos_adc *info,
3462 + writel(con1 | ADC_CON_EN_START, ADC_V1_CON(info->regs));
3463 + }
3464 +
3465 ++/* Exynos4212 and 4412 is like ADCv1 but with four channels only */
3466 ++static const struct exynos_adc_data exynos4212_adc_data = {
3467 ++ .num_channels = MAX_EXYNOS4212_ADC_CHANNELS,
3468 ++ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
3469 ++ .needs_adc_phy = true,
3470 ++ .phy_offset = EXYNOS_ADCV1_PHY_OFFSET,
3471 ++
3472 ++ .init_hw = exynos_adc_v1_init_hw,
3473 ++ .exit_hw = exynos_adc_v1_exit_hw,
3474 ++ .clear_irq = exynos_adc_v1_clear_irq,
3475 ++ .start_conv = exynos_adc_v1_start_conv,
3476 ++};
3477 ++
3478 + static const struct exynos_adc_data exynos_adc_v1_data = {
3479 + .num_channels = MAX_ADC_V1_CHANNELS,
3480 + .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
3481 +@@ -282,6 +297,16 @@ static const struct exynos_adc_data exynos_adc_v1_data = {
3482 + .start_conv = exynos_adc_v1_start_conv,
3483 + };
3484 +
3485 ++static const struct exynos_adc_data exynos_adc_s5pv210_data = {
3486 ++ .num_channels = MAX_S5PV210_ADC_CHANNELS,
3487 ++ .mask = ADC_DATX_MASK, /* 12 bit ADC resolution */
3488 ++
3489 ++ .init_hw = exynos_adc_v1_init_hw,
3490 ++ .exit_hw = exynos_adc_v1_exit_hw,
3491 ++ .clear_irq = exynos_adc_v1_clear_irq,
3492 ++ .start_conv = exynos_adc_v1_start_conv,
3493 ++};
3494 ++
3495 + static void exynos_adc_s3c2416_start_conv(struct exynos_adc *info,
3496 + unsigned long addr)
3497 + {
3498 +@@ -478,6 +503,12 @@ static const struct of_device_id exynos_adc_match[] = {
3499 + }, {
3500 + .compatible = "samsung,s3c6410-adc",
3501 + .data = &exynos_adc_s3c64xx_data,
3502 ++ }, {
3503 ++ .compatible = "samsung,s5pv210-adc",
3504 ++ .data = &exynos_adc_s5pv210_data,
3505 ++ }, {
3506 ++ .compatible = "samsung,exynos4212-adc",
3507 ++ .data = &exynos4212_adc_data,
3508 + }, {
3509 + .compatible = "samsung,exynos-adc-v1",
3510 + .data = &exynos_adc_v1_data,
3511 +diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
3512 +index dcb50172186f..f3a966ab35dc 100644
3513 +--- a/drivers/iio/adc/rcar-gyroadc.c
3514 ++++ b/drivers/iio/adc/rcar-gyroadc.c
3515 +@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
3516 + dev_err(dev,
3517 + "Only %i channels supported with %s, but reg = <%i>.\n",
3518 + num_channels, child->name, reg);
3519 +- return ret;
3520 ++ return -EINVAL;
3521 + }
3522 + }
3523 +
3524 +@@ -400,7 +400,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
3525 + dev_err(dev,
3526 + "Channel %i uses different ADC mode than the rest.\n",
3527 + reg);
3528 +- return ret;
3529 ++ return -EINVAL;
3530 + }
3531 +
3532 + /* Channel is valid, grab the regulator. */
3533 +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
3534 +index 50152c1b1004..357de3b4fddd 100644
3535 +--- a/drivers/infiniband/core/uverbs_main.c
3536 ++++ b/drivers/infiniband/core/uverbs_main.c
3537 +@@ -265,6 +265,9 @@ void ib_uverbs_release_file(struct kref *ref)
3538 + if (atomic_dec_and_test(&file->device->refcount))
3539 + ib_uverbs_comp_dev(file->device);
3540 +
3541 ++ if (file->async_file)
3542 ++ kref_put(&file->async_file->ref,
3543 ++ ib_uverbs_release_async_event_file);
3544 + kobject_put(&file->device->kobj);
3545 + kfree(file);
3546 + }
3547 +@@ -915,10 +918,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
3548 + }
3549 + mutex_unlock(&file->device->lists_mutex);
3550 +
3551 +- if (file->async_file)
3552 +- kref_put(&file->async_file->ref,
3553 +- ib_uverbs_release_async_event_file);
3554 +-
3555 + kref_put(&file->ref, ib_uverbs_release_file);
3556 +
3557 + return 0;
3558 +diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
3559 +index 88e326d6cc49..d648a4167832 100644
3560 +--- a/drivers/infiniband/hw/hfi1/sdma.c
3561 ++++ b/drivers/infiniband/hw/hfi1/sdma.c
3562 +@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
3563 + sdma_flush_descq(sde);
3564 + spin_lock_irqsave(&sde->flushlist_lock, flags);
3565 + /* copy flush list */
3566 +- list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) {
3567 +- list_del_init(&txp->list);
3568 +- list_add_tail(&txp->list, &flushlist);
3569 +- }
3570 ++ list_splice_init(&sde->flushlist, &flushlist);
3571 + spin_unlock_irqrestore(&sde->flushlist_lock, flags);
3572 + /* flush from flush list */
3573 + list_for_each_entry_safe(txp, txp_next, &flushlist, list)
3574 +@@ -2426,7 +2423,7 @@ unlock_noconn:
3575 + wait->tx_count++;
3576 + wait->count += tx->num_desc;
3577 + }
3578 +- schedule_work(&sde->flush_worker);
3579 ++ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
3580 + ret = -ECOMM;
3581 + goto unlock;
3582 + nodesc:
3583 +@@ -2526,7 +2523,7 @@ unlock_noconn:
3584 + }
3585 + }
3586 + spin_unlock(&sde->flushlist_lock);
3587 +- schedule_work(&sde->flush_worker);
3588 ++ queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
3589 + ret = -ECOMM;
3590 + goto update_tail;
3591 + nodesc:
3592 +diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
3593 +index 9e1cac8cb260..453e5c4ac19f 100644
3594 +--- a/drivers/infiniband/hw/mlx5/odp.c
3595 ++++ b/drivers/infiniband/hw/mlx5/odp.c
3596 +@@ -497,7 +497,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
3597 + static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
3598 + u64 io_virt, size_t bcnt, u32 *bytes_mapped)
3599 + {
3600 +- u64 access_mask = ODP_READ_ALLOWED_BIT;
3601 ++ u64 access_mask;
3602 + int npages = 0, page_shift, np;
3603 + u64 start_idx, page_mask;
3604 + struct ib_umem_odp *odp;
3605 +@@ -522,6 +522,7 @@ next_mr:
3606 + page_shift = mr->umem->page_shift;
3607 + page_mask = ~(BIT(page_shift) - 1);
3608 + start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
3609 ++ access_mask = ODP_READ_ALLOWED_BIT;
3610 +
3611 + if (mr->umem->writable)
3612 + access_mask |= ODP_WRITE_ALLOWED_BIT;
3613 +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
3614 +index 2c1114ee0c6d..bc6a44a16445 100644
3615 +--- a/drivers/infiniband/ulp/srp/ib_srp.c
3616 ++++ b/drivers/infiniband/ulp/srp/ib_srp.c
3617 +@@ -3401,13 +3401,17 @@ static const match_table_t srp_opt_tokens = {
3618 +
3619 + /**
3620 + * srp_parse_in - parse an IP address and port number combination
3621 ++ * @net: [in] Network namespace.
3622 ++ * @sa: [out] Address family, IP address and port number.
3623 ++ * @addr_port_str: [in] IP address and port number.
3624 ++ * @has_port: [out] Whether or not @addr_port_str includes a port number.
3625 + *
3626 + * Parse the following address formats:
3627 + * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3628 + * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3629 + */
3630 + static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3631 +- const char *addr_port_str)
3632 ++ const char *addr_port_str, bool *has_port)
3633 + {
3634 + char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
3635 + char *port_str;
3636 +@@ -3416,9 +3420,12 @@ static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
3637 + if (!addr)
3638 + return -ENOMEM;
3639 + port_str = strrchr(addr, ':');
3640 +- if (!port_str)
3641 +- return -EINVAL;
3642 +- *port_str++ = '\0';
3643 ++ if (port_str && strchr(port_str, ']'))
3644 ++ port_str = NULL;
3645 ++ if (port_str)
3646 ++ *port_str++ = '\0';
3647 ++ if (has_port)
3648 ++ *has_port = port_str != NULL;
3649 + ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
3650 + if (ret && addr[0]) {
3651 + addr_end = addr + strlen(addr) - 1;
3652 +@@ -3440,6 +3447,7 @@ static int srp_parse_options(struct net *net, const char *buf,
3653 + char *p;
3654 + substring_t args[MAX_OPT_ARGS];
3655 + unsigned long long ull;
3656 ++ bool has_port;
3657 + int opt_mask = 0;
3658 + int token;
3659 + int ret = -EINVAL;
3660 +@@ -3538,7 +3546,8 @@ static int srp_parse_options(struct net *net, const char *buf,
3661 + ret = -ENOMEM;
3662 + goto out;
3663 + }
3664 +- ret = srp_parse_in(net, &target->rdma_cm.src.ss, p);
3665 ++ ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
3666 ++ NULL);
3667 + if (ret < 0) {
3668 + pr_warn("bad source parameter '%s'\n", p);
3669 + kfree(p);
3670 +@@ -3554,7 +3563,10 @@ static int srp_parse_options(struct net *net, const char *buf,
3671 + ret = -ENOMEM;
3672 + goto out;
3673 + }
3674 +- ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p);
3675 ++ ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
3676 ++ &has_port);
3677 ++ if (!has_port)
3678 ++ ret = -EINVAL;
3679 + if (ret < 0) {
3680 + pr_warn("bad dest parameter '%s'\n", p);
3681 + kfree(p);
3682 +diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
3683 +index 60348d707b99..9a576ae837dc 100644
3684 +--- a/drivers/iommu/iova.c
3685 ++++ b/drivers/iommu/iova.c
3686 +@@ -148,8 +148,9 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
3687 + struct iova *cached_iova;
3688 +
3689 + cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
3690 +- if (free->pfn_hi < iovad->dma_32bit_pfn &&
3691 +- free->pfn_lo >= cached_iova->pfn_lo)
3692 ++ if (free == cached_iova ||
3693 ++ (free->pfn_hi < iovad->dma_32bit_pfn &&
3694 ++ free->pfn_lo >= cached_iova->pfn_lo))
3695 + iovad->cached32_node = rb_next(&free->node);
3696 +
3697 + cached_iova = rb_entry(iovad->cached_node, struct iova, node);
3698 +diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
3699 +index 3f4211b5cd33..45f684689c35 100644
3700 +--- a/drivers/md/bcache/btree.c
3701 ++++ b/drivers/md/bcache/btree.c
3702 +@@ -35,7 +35,7 @@
3703 + #include <linux/rcupdate.h>
3704 + #include <linux/sched/clock.h>
3705 + #include <linux/rculist.h>
3706 +-
3707 ++#include <linux/delay.h>
3708 + #include <trace/events/bcache.h>
3709 +
3710 + /*
3711 +@@ -649,7 +649,25 @@ static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
3712 + up(&b->io_mutex);
3713 + }
3714 +
3715 ++retry:
3716 ++ /*
3717 ++ * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
3718 ++ * __bch_btree_node_write(). To avoid an extra flush, acquire
3719 ++ * b->write_lock before checking BTREE_NODE_dirty bit.
3720 ++ */
3721 + mutex_lock(&b->write_lock);
3722 ++ /*
3723 ++ * If this btree node is selected in btree_flush_write() by journal
3724 ++ * code, delay and retry until the node is flushed by journal code
3725 ++ * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
3726 ++ */
3727 ++ if (btree_node_journal_flush(b)) {
3728 ++ pr_debug("bnode %p is flushing by journal, retry", b);
3729 ++ mutex_unlock(&b->write_lock);
3730 ++ udelay(1);
3731 ++ goto retry;
3732 ++ }
3733 ++
3734 + if (btree_node_dirty(b))
3735 + __bch_btree_node_write(b, &cl);
3736 + mutex_unlock(&b->write_lock);
3737 +@@ -772,10 +790,15 @@ void bch_btree_cache_free(struct cache_set *c)
3738 + while (!list_empty(&c->btree_cache)) {
3739 + b = list_first_entry(&c->btree_cache, struct btree, list);
3740 +
3741 +- if (btree_node_dirty(b))
3742 ++ /*
3743 ++ * This function is called by cache_set_free(), no I/O
3744 ++ * request on cache now, it is unnecessary to acquire
3745 ++ * b->write_lock before clearing BTREE_NODE_dirty anymore.
3746 ++ */
3747 ++ if (btree_node_dirty(b)) {
3748 + btree_complete_write(b, btree_current_write(b));
3749 +- clear_bit(BTREE_NODE_dirty, &b->flags);
3750 +-
3751 ++ clear_bit(BTREE_NODE_dirty, &b->flags);
3752 ++ }
3753 + mca_data_free(b);
3754 + }
3755 +
3756 +@@ -1061,11 +1084,25 @@ static void btree_node_free(struct btree *b)
3757 +
3758 + BUG_ON(b == b->c->root);
3759 +
3760 ++retry:
3761 + mutex_lock(&b->write_lock);
3762 ++ /*
3763 ++ * If the btree node is selected and flushing in btree_flush_write(),
3764 ++ * delay and retry until the BTREE_NODE_journal_flush bit cleared,
3765 ++ * then it is safe to free the btree node here. Otherwise this btree
3766 ++ * node will be in race condition.
3767 ++ */
3768 ++ if (btree_node_journal_flush(b)) {
3769 ++ mutex_unlock(&b->write_lock);
3770 ++ pr_debug("bnode %p journal_flush set, retry", b);
3771 ++ udelay(1);
3772 ++ goto retry;
3773 ++ }
3774 +
3775 +- if (btree_node_dirty(b))
3776 ++ if (btree_node_dirty(b)) {
3777 + btree_complete_write(b, btree_current_write(b));
3778 +- clear_bit(BTREE_NODE_dirty, &b->flags);
3779 ++ clear_bit(BTREE_NODE_dirty, &b->flags);
3780 ++ }
3781 +
3782 + mutex_unlock(&b->write_lock);
3783 +
3784 +diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h
3785 +index a68d6c55783b..4d0cca145f69 100644
3786 +--- a/drivers/md/bcache/btree.h
3787 ++++ b/drivers/md/bcache/btree.h
3788 +@@ -158,11 +158,13 @@ enum btree_flags {
3789 + BTREE_NODE_io_error,
3790 + BTREE_NODE_dirty,
3791 + BTREE_NODE_write_idx,
3792 ++ BTREE_NODE_journal_flush,
3793 + };
3794 +
3795 + BTREE_FLAG(io_error);
3796 + BTREE_FLAG(dirty);
3797 + BTREE_FLAG(write_idx);
3798 ++BTREE_FLAG(journal_flush);
3799 +
3800 + static inline struct btree_write *btree_current_write(struct btree *b)
3801 + {
3802 +diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
3803 +index c809724e6571..886710043025 100644
3804 +--- a/drivers/md/bcache/extents.c
3805 ++++ b/drivers/md/bcache/extents.c
3806 +@@ -538,6 +538,7 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
3807 + {
3808 + struct btree *b = container_of(bk, struct btree, keys);
3809 + unsigned int i, stale;
3810 ++ char buf[80];
3811 +
3812 + if (!KEY_PTRS(k) ||
3813 + bch_extent_invalid(bk, k))
3814 +@@ -547,19 +548,19 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k)
3815 + if (!ptr_available(b->c, k, i))
3816 + return true;
3817 +
3818 +- if (!expensive_debug_checks(b->c) && KEY_DIRTY(k))
3819 +- return false;
3820 +-
3821 + for (i = 0; i < KEY_PTRS(k); i++) {
3822 + stale = ptr_stale(b->c, k, i);
3823 +
3824 +- btree_bug_on(stale > 96, b,
3825 ++ if (stale && KEY_DIRTY(k)) {
3826 ++ bch_extent_to_text(buf, sizeof(buf), k);
3827 ++ pr_info("stale dirty pointer, stale %u, key: %s",
3828 ++ stale, buf);
3829 ++ }
3830 ++
3831 ++ btree_bug_on(stale > BUCKET_GC_GEN_MAX, b,
3832 + "key too stale: %i, need_gc %u",
3833 + stale, b->c->need_gc);
3834 +
3835 +- btree_bug_on(stale && KEY_DIRTY(k) && KEY_SIZE(k),
3836 +- b, "stale dirty pointer");
3837 +-
3838 + if (stale)
3839 + return true;
3840 +
3841 +diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
3842 +index ec1e35a62934..7bb15cddca5e 100644
3843 +--- a/drivers/md/bcache/journal.c
3844 ++++ b/drivers/md/bcache/journal.c
3845 +@@ -404,6 +404,7 @@ static void btree_flush_write(struct cache_set *c)
3846 + retry:
3847 + best = NULL;
3848 +
3849 ++ mutex_lock(&c->bucket_lock);
3850 + for_each_cached_btree(b, c, i)
3851 + if (btree_current_write(b)->journal) {
3852 + if (!best)
3853 +@@ -416,9 +417,14 @@ retry:
3854 + }
3855 +
3856 + b = best;
3857 ++ if (b)
3858 ++ set_btree_node_journal_flush(b);
3859 ++ mutex_unlock(&c->bucket_lock);
3860 ++
3861 + if (b) {
3862 + mutex_lock(&b->write_lock);
3863 + if (!btree_current_write(b)->journal) {
3864 ++ clear_bit(BTREE_NODE_journal_flush, &b->flags);
3865 + mutex_unlock(&b->write_lock);
3866 + /* We raced */
3867 + atomic_long_inc(&c->retry_flush_write);
3868 +@@ -426,6 +432,7 @@ retry:
3869 + }
3870 +
3871 + __bch_btree_node_write(b, NULL);
3872 ++ clear_bit(BTREE_NODE_journal_flush, &b->flags);
3873 + mutex_unlock(&b->write_lock);
3874 + }
3875 + }
3876 +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3877 +index f3dcc7640319..34f5de13a93d 100644
3878 +--- a/drivers/md/dm-crypt.c
3879 ++++ b/drivers/md/dm-crypt.c
3880 +@@ -949,6 +949,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
3881 + {
3882 + #ifdef CONFIG_BLK_DEV_INTEGRITY
3883 + struct blk_integrity *bi = blk_get_integrity(cc->dev->bdev->bd_disk);
3884 ++ struct mapped_device *md = dm_table_get_md(ti->table);
3885 +
3886 + /* From now we require underlying device with our integrity profile */
3887 + if (!bi || strcasecmp(bi->profile->name, "DM-DIF-EXT-TAG")) {
3888 +@@ -968,7 +969,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
3889 +
3890 + if (crypt_integrity_aead(cc)) {
3891 + cc->integrity_tag_size = cc->on_disk_tag_size - cc->integrity_iv_size;
3892 +- DMINFO("Integrity AEAD, tag size %u, IV size %u.",
3893 ++ DMDEBUG("%s: Integrity AEAD, tag size %u, IV size %u.", dm_device_name(md),
3894 + cc->integrity_tag_size, cc->integrity_iv_size);
3895 +
3896 + if (crypto_aead_setauthsize(any_tfm_aead(cc), cc->integrity_tag_size)) {
3897 +@@ -976,7 +977,7 @@ static int crypt_integrity_ctr(struct crypt_config *cc, struct dm_target *ti)
3898 + return -EINVAL;
3899 + }
3900 + } else if (cc->integrity_iv_size)
3901 +- DMINFO("Additional per-sector space %u bytes for IV.",
3902 ++ DMDEBUG("%s: Additional per-sector space %u bytes for IV.", dm_device_name(md),
3903 + cc->integrity_iv_size);
3904 +
3905 + if ((cc->integrity_tag_size + cc->integrity_iv_size) != bi->tag_size) {
3906 +diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
3907 +index baa966e2778c..481e54ded9dc 100644
3908 +--- a/drivers/md/dm-mpath.c
3909 ++++ b/drivers/md/dm-mpath.c
3910 +@@ -554,8 +554,23 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
3911 + return DM_MAPIO_REMAPPED;
3912 + }
3913 +
3914 +-static void multipath_release_clone(struct request *clone)
3915 ++static void multipath_release_clone(struct request *clone,
3916 ++ union map_info *map_context)
3917 + {
3918 ++ if (unlikely(map_context)) {
3919 ++ /*
3920 ++ * non-NULL map_context means caller is still map
3921 ++ * method; must undo multipath_clone_and_map()
3922 ++ */
3923 ++ struct dm_mpath_io *mpio = get_mpio(map_context);
3924 ++ struct pgpath *pgpath = mpio->pgpath;
3925 ++
3926 ++ if (pgpath && pgpath->pg->ps.type->end_io)
3927 ++ pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
3928 ++ &pgpath->path,
3929 ++ mpio->nr_bytes);
3930 ++ }
3931 ++
3932 + blk_put_request(clone);
3933 + }
3934 +
3935 +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
3936 +index 264b84e274aa..17c6a73c536c 100644
3937 +--- a/drivers/md/dm-rq.c
3938 ++++ b/drivers/md/dm-rq.c
3939 +@@ -219,7 +219,7 @@ static void dm_end_request(struct request *clone, blk_status_t error)
3940 + struct request *rq = tio->orig;
3941 +
3942 + blk_rq_unprep_clone(clone);
3943 +- tio->ti->type->release_clone_rq(clone);
3944 ++ tio->ti->type->release_clone_rq(clone, NULL);
3945 +
3946 + rq_end_stats(md, rq);
3947 + if (!rq->q->mq_ops)
3948 +@@ -270,7 +270,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
3949 + rq_end_stats(md, rq);
3950 + if (tio->clone) {
3951 + blk_rq_unprep_clone(tio->clone);
3952 +- tio->ti->type->release_clone_rq(tio->clone);
3953 ++ tio->ti->type->release_clone_rq(tio->clone, NULL);
3954 + }
3955 +
3956 + if (!rq->q->mq_ops)
3957 +@@ -495,7 +495,7 @@ check_again:
3958 + case DM_MAPIO_REMAPPED:
3959 + if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
3960 + /* -ENOMEM */
3961 +- ti->type->release_clone_rq(clone);
3962 ++ ti->type->release_clone_rq(clone, &tio->info);
3963 + return DM_MAPIO_REQUEUE;
3964 + }
3965 +
3966 +@@ -505,7 +505,7 @@ check_again:
3967 + ret = dm_dispatch_clone_request(clone, rq);
3968 + if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
3969 + blk_rq_unprep_clone(clone);
3970 +- tio->ti->type->release_clone_rq(clone);
3971 ++ tio->ti->type->release_clone_rq(clone, &tio->info);
3972 + tio->clone = NULL;
3973 + if (!rq->q->mq_ops)
3974 + r = DM_MAPIO_DELAY_REQUEUE;
3975 +diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
3976 +index 314d17ca6466..64dd0b34fcf4 100644
3977 +--- a/drivers/md/dm-target.c
3978 ++++ b/drivers/md/dm-target.c
3979 +@@ -136,7 +136,8 @@ static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq,
3980 + return DM_MAPIO_KILL;
3981 + }
3982 +
3983 +-static void io_err_release_clone_rq(struct request *clone)
3984 ++static void io_err_release_clone_rq(struct request *clone,
3985 ++ union map_info *map_context)
3986 + {
3987 + }
3988 +
3989 +diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
3990 +index ed3caceaed07..6a26afcc1fd6 100644
3991 +--- a/drivers/md/dm-thin-metadata.c
3992 ++++ b/drivers/md/dm-thin-metadata.c
3993 +@@ -2001,16 +2001,19 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
3994 +
3995 + int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
3996 + {
3997 +- int r;
3998 ++ int r = -EINVAL;
3999 + struct dm_block *sblock;
4000 + struct thin_disk_superblock *disk_super;
4001 +
4002 + down_write(&pmd->root_lock);
4003 ++ if (pmd->fail_io)
4004 ++ goto out;
4005 ++
4006 + pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
4007 +
4008 + r = superblock_lock(pmd, &sblock);
4009 + if (r) {
4010 +- DMERR("couldn't read superblock");
4011 ++ DMERR("couldn't lock superblock");
4012 + goto out;
4013 + }
4014 +
4015 +diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile
4016 +index 29a2ab9e77c5..ad8677d8c896 100644
4017 +--- a/drivers/media/cec/Makefile
4018 ++++ b/drivers/media/cec/Makefile
4019 +@@ -1,5 +1,5 @@
4020 + # SPDX-License-Identifier: GPL-2.0
4021 +-cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o
4022 ++cec-objs := cec-core.o cec-adap.o cec-api.o
4023 +
4024 + ifeq ($(CONFIG_CEC_NOTIFIER),y)
4025 + cec-objs += cec-notifier.o
4026 +diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
4027 +index a7ea27d2aa8e..4a15d53f659e 100644
4028 +--- a/drivers/media/cec/cec-adap.c
4029 ++++ b/drivers/media/cec/cec-adap.c
4030 +@@ -62,6 +62,19 @@ static unsigned int cec_log_addr2dev(const struct cec_adapter *adap, u8 log_addr
4031 + return adap->log_addrs.primary_device_type[i < 0 ? 0 : i];
4032 + }
4033 +
4034 ++u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
4035 ++ unsigned int *offset)
4036 ++{
4037 ++ unsigned int loc = cec_get_edid_spa_location(edid, size);
4038 ++
4039 ++ if (offset)
4040 ++ *offset = loc;
4041 ++ if (loc == 0)
4042 ++ return CEC_PHYS_ADDR_INVALID;
4043 ++ return (edid[loc] << 8) | edid[loc + 1];
4044 ++}
4045 ++EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
4046 ++
4047 + /*
4048 + * Queue a new event for this filehandle. If ts == 0, then set it
4049 + * to the current time.
4050 +diff --git a/drivers/media/cec/cec-edid.c b/drivers/media/cec/cec-edid.c
4051 +deleted file mode 100644
4052 +index f587e8eaefd8..000000000000
4053 +--- a/drivers/media/cec/cec-edid.c
4054 ++++ /dev/null
4055 +@@ -1,95 +0,0 @@
4056 +-// SPDX-License-Identifier: GPL-2.0-only
4057 +-/*
4058 +- * cec-edid - HDMI Consumer Electronics Control EDID & CEC helper functions
4059 +- *
4060 +- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
4061 +- */
4062 +-
4063 +-#include <linux/module.h>
4064 +-#include <linux/kernel.h>
4065 +-#include <linux/types.h>
4066 +-#include <media/cec.h>
4067 +-
4068 +-u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
4069 +- unsigned int *offset)
4070 +-{
4071 +- unsigned int loc = cec_get_edid_spa_location(edid, size);
4072 +-
4073 +- if (offset)
4074 +- *offset = loc;
4075 +- if (loc == 0)
4076 +- return CEC_PHYS_ADDR_INVALID;
4077 +- return (edid[loc] << 8) | edid[loc + 1];
4078 +-}
4079 +-EXPORT_SYMBOL_GPL(cec_get_edid_phys_addr);
4080 +-
4081 +-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
4082 +-{
4083 +- unsigned int loc = cec_get_edid_spa_location(edid, size);
4084 +- u8 sum = 0;
4085 +- unsigned int i;
4086 +-
4087 +- if (loc == 0)
4088 +- return;
4089 +- edid[loc] = phys_addr >> 8;
4090 +- edid[loc + 1] = phys_addr & 0xff;
4091 +- loc &= ~0x7f;
4092 +-
4093 +- /* update the checksum */
4094 +- for (i = loc; i < loc + 127; i++)
4095 +- sum += edid[i];
4096 +- edid[i] = 256 - sum;
4097 +-}
4098 +-EXPORT_SYMBOL_GPL(cec_set_edid_phys_addr);
4099 +-
4100 +-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
4101 +-{
4102 +- /* Check if input is sane */
4103 +- if (WARN_ON(input == 0 || input > 0xf))
4104 +- return CEC_PHYS_ADDR_INVALID;
4105 +-
4106 +- if (phys_addr == 0)
4107 +- return input << 12;
4108 +-
4109 +- if ((phys_addr & 0x0fff) == 0)
4110 +- return phys_addr | (input << 8);
4111 +-
4112 +- if ((phys_addr & 0x00ff) == 0)
4113 +- return phys_addr | (input << 4);
4114 +-
4115 +- if ((phys_addr & 0x000f) == 0)
4116 +- return phys_addr | input;
4117 +-
4118 +- /*
4119 +- * All nibbles are used so no valid physical addresses can be assigned
4120 +- * to the input.
4121 +- */
4122 +- return CEC_PHYS_ADDR_INVALID;
4123 +-}
4124 +-EXPORT_SYMBOL_GPL(cec_phys_addr_for_input);
4125 +-
4126 +-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
4127 +-{
4128 +- int i;
4129 +-
4130 +- if (parent)
4131 +- *parent = phys_addr;
4132 +- if (port)
4133 +- *port = 0;
4134 +- if (phys_addr == CEC_PHYS_ADDR_INVALID)
4135 +- return 0;
4136 +- for (i = 0; i < 16; i += 4)
4137 +- if (phys_addr & (0xf << i))
4138 +- break;
4139 +- if (i == 16)
4140 +- return 0;
4141 +- if (parent)
4142 +- *parent = phys_addr & (0xfff0 << i);
4143 +- if (port)
4144 +- *port = (phys_addr >> i) & 0xf;
4145 +- for (i += 4; i < 16; i += 4)
4146 +- if ((phys_addr & (0xf << i)) == 0)
4147 +- return -EINVAL;
4148 +- return 0;
4149 +-}
4150 +-EXPORT_SYMBOL_GPL(cec_phys_addr_validate);
4151 +diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
4152 +index 63c9ac2c6a5f..8b1ae1d6680b 100644
4153 +--- a/drivers/media/i2c/Kconfig
4154 ++++ b/drivers/media/i2c/Kconfig
4155 +@@ -60,8 +60,9 @@ config VIDEO_TDA1997X
4156 + tristate "NXP TDA1997x HDMI receiver"
4157 + depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
4158 + depends on SND_SOC
4159 +- select SND_PCM
4160 + select HDMI
4161 ++ select SND_PCM
4162 ++ select V4L2_FWNODE
4163 + ---help---
4164 + V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
4165 +
4166 +diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
4167 +index f01964c36ad5..a4b0a89c7e7e 100644
4168 +--- a/drivers/media/i2c/adv7604.c
4169 ++++ b/drivers/media/i2c/adv7604.c
4170 +@@ -2297,8 +2297,8 @@ static int adv76xx_set_edid(struct v4l2_subdev *sd, struct v4l2_edid *edid)
4171 + edid->blocks = 2;
4172 + return -E2BIG;
4173 + }
4174 +- pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
4175 +- err = cec_phys_addr_validate(pa, &pa, NULL);
4176 ++ pa = v4l2_get_edid_phys_addr(edid->edid, edid->blocks * 128, &spa_loc);
4177 ++ err = v4l2_phys_addr_validate(pa, &pa, NULL);
4178 + if (err)
4179 + return err;
4180 +
4181 +diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
4182 +index bb43a75ed6d0..58662ba92d4f 100644
4183 +--- a/drivers/media/i2c/adv7842.c
4184 ++++ b/drivers/media/i2c/adv7842.c
4185 +@@ -791,8 +791,8 @@ static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
4186 + return 0;
4187 + }
4188 +
4189 +- pa = cec_get_edid_phys_addr(edid, 256, &spa_loc);
4190 +- err = cec_phys_addr_validate(pa, &pa, NULL);
4191 ++ pa = v4l2_get_edid_phys_addr(edid, 256, &spa_loc);
4192 ++ err = v4l2_phys_addr_validate(pa, &pa, NULL);
4193 + if (err)
4194 + return err;
4195 +
4196 +diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
4197 +index 26070fb6ce4e..e4c0a27b636a 100644
4198 +--- a/drivers/media/i2c/tc358743.c
4199 ++++ b/drivers/media/i2c/tc358743.c
4200 +@@ -1789,7 +1789,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
4201 + return -E2BIG;
4202 + }
4203 + pa = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
4204 +- err = cec_phys_addr_validate(pa, &pa, NULL);
4205 ++ err = v4l2_phys_addr_validate(pa, &pa, NULL);
4206 + if (err)
4207 + return err;
4208 +
4209 +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
4210 +index d38682265892..1d9c028e52cb 100644
4211 +--- a/drivers/media/platform/stm32/stm32-dcmi.c
4212 ++++ b/drivers/media/platform/stm32/stm32-dcmi.c
4213 +@@ -1681,7 +1681,7 @@ static int dcmi_probe(struct platform_device *pdev)
4214 + if (irq <= 0) {
4215 + if (irq != -EPROBE_DEFER)
4216 + dev_err(&pdev->dev, "Could not get irq\n");
4217 +- return irq;
4218 ++ return irq ? irq : -ENXIO;
4219 + }
4220 +
4221 + dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4222 +diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
4223 +index 462099a141e4..7b8cf661f238 100644
4224 +--- a/drivers/media/platform/vim2m.c
4225 ++++ b/drivers/media/platform/vim2m.c
4226 +@@ -3,7 +3,8 @@
4227 + *
4228 + * This is a virtual device driver for testing mem-to-mem videobuf framework.
4229 + * It simulates a device that uses memory buffers for both source and
4230 +- * destination, processes the data and issues an "irq" (simulated by a timer).
4231 ++ * destination, processes the data and issues an "irq" (simulated by a delayed
4232 ++ * workqueue).
4233 + * The device is capable of multi-instance, multi-buffer-per-transaction
4234 + * operation (via the mem2mem framework).
4235 + *
4236 +@@ -19,7 +20,6 @@
4237 + #include <linux/module.h>
4238 + #include <linux/delay.h>
4239 + #include <linux/fs.h>
4240 +-#include <linux/timer.h>
4241 + #include <linux/sched.h>
4242 + #include <linux/slab.h>
4243 +
4244 +@@ -148,7 +148,7 @@ struct vim2m_dev {
4245 + struct mutex dev_mutex;
4246 + spinlock_t irqlock;
4247 +
4248 +- struct timer_list timer;
4249 ++ struct delayed_work work_run;
4250 +
4251 + struct v4l2_m2m_dev *m2m_dev;
4252 + };
4253 +@@ -336,12 +336,6 @@ static int device_process(struct vim2m_ctx *ctx,
4254 + return 0;
4255 + }
4256 +
4257 +-static void schedule_irq(struct vim2m_dev *dev, int msec_timeout)
4258 +-{
4259 +- dprintk(dev, "Scheduling a simulated irq\n");
4260 +- mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
4261 +-}
4262 +-
4263 + /*
4264 + * mem2mem callbacks
4265 + */
4266 +@@ -387,13 +381,14 @@ static void device_run(void *priv)
4267 +
4268 + device_process(ctx, src_buf, dst_buf);
4269 +
4270 +- /* Run a timer, which simulates a hardware irq */
4271 +- schedule_irq(dev, ctx->transtime);
4272 ++ /* Run delayed work, which simulates a hardware irq */
4273 ++ schedule_delayed_work(&dev->work_run, msecs_to_jiffies(ctx->transtime));
4274 + }
4275 +
4276 +-static void device_isr(struct timer_list *t)
4277 ++static void device_work(struct work_struct *w)
4278 + {
4279 +- struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
4280 ++ struct vim2m_dev *vim2m_dev =
4281 ++ container_of(w, struct vim2m_dev, work_run.work);
4282 + struct vim2m_ctx *curr_ctx;
4283 + struct vb2_v4l2_buffer *src_vb, *dst_vb;
4284 + unsigned long flags;
4285 +@@ -802,9 +797,13 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
4286 + static void vim2m_stop_streaming(struct vb2_queue *q)
4287 + {
4288 + struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
4289 ++ struct vim2m_dev *dev = ctx->dev;
4290 + struct vb2_v4l2_buffer *vbuf;
4291 + unsigned long flags;
4292 +
4293 ++ if (v4l2_m2m_get_curr_priv(dev->m2m_dev) == ctx)
4294 ++ cancel_delayed_work_sync(&dev->work_run);
4295 ++
4296 + for (;;) {
4297 + if (V4L2_TYPE_IS_OUTPUT(q->type))
4298 + vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
4299 +@@ -1015,6 +1014,7 @@ static int vim2m_probe(struct platform_device *pdev)
4300 + vfd = &dev->vfd;
4301 + vfd->lock = &dev->dev_mutex;
4302 + vfd->v4l2_dev = &dev->v4l2_dev;
4303 ++ INIT_DELAYED_WORK(&dev->work_run, device_work);
4304 +
4305 + ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
4306 + if (ret) {
4307 +@@ -1026,7 +1026,6 @@ static int vim2m_probe(struct platform_device *pdev)
4308 + v4l2_info(&dev->v4l2_dev,
4309 + "Device registered as /dev/video%d\n", vfd->num);
4310 +
4311 +- timer_setup(&dev->timer, device_isr, 0);
4312 + platform_set_drvdata(pdev, dev);
4313 +
4314 + dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
4315 +@@ -1083,7 +1082,6 @@ static int vim2m_remove(struct platform_device *pdev)
4316 + media_device_cleanup(&dev->mdev);
4317 + #endif
4318 + v4l2_m2m_release(dev->m2m_dev);
4319 +- del_timer_sync(&dev->timer);
4320 + video_unregister_device(&dev->vfd);
4321 + v4l2_device_unregister(&dev->v4l2_dev);
4322 +
4323 +diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
4324 +index 3b09ffceefd5..2e273f4dfc29 100644
4325 +--- a/drivers/media/platform/vivid/vivid-vid-cap.c
4326 ++++ b/drivers/media/platform/vivid/vivid-vid-cap.c
4327 +@@ -1724,7 +1724,7 @@ int vidioc_s_edid(struct file *file, void *_fh,
4328 + return -E2BIG;
4329 + }
4330 + phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
4331 +- ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
4332 ++ ret = v4l2_phys_addr_validate(phys_addr, &phys_addr, NULL);
4333 + if (ret)
4334 + return ret;
4335 +
4336 +@@ -1740,7 +1740,7 @@ set_phys_addr:
4337 +
4338 + for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
4339 + cec_s_phys_addr(dev->cec_tx_adap[i],
4340 +- cec_phys_addr_for_input(phys_addr, i + 1),
4341 ++ v4l2_phys_addr_for_input(phys_addr, i + 1),
4342 + false);
4343 + return 0;
4344 + }
4345 +diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
4346 +index 2079861d2270..e108e9befb77 100644
4347 +--- a/drivers/media/platform/vivid/vivid-vid-common.c
4348 ++++ b/drivers/media/platform/vivid/vivid-vid-common.c
4349 +@@ -863,7 +863,7 @@ int vidioc_g_edid(struct file *file, void *_fh,
4350 + if (edid->blocks > dev->edid_blocks - edid->start_block)
4351 + edid->blocks = dev->edid_blocks - edid->start_block;
4352 + if (adap)
4353 +- cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
4354 ++ v4l2_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
4355 + memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
4356 + return 0;
4357 + }
4358 +diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
4359 +index c7c600c1f63b..a24b40dfec97 100644
4360 +--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
4361 ++++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
4362 +@@ -15,6 +15,7 @@
4363 + #include <media/v4l2-dv-timings.h>
4364 + #include <linux/math64.h>
4365 + #include <linux/hdmi.h>
4366 ++#include <media/cec.h>
4367 +
4368 + MODULE_AUTHOR("Hans Verkuil");
4369 + MODULE_DESCRIPTION("V4L2 DV Timings Helper Functions");
4370 +@@ -942,3 +943,153 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
4371 + return c;
4372 + }
4373 + EXPORT_SYMBOL_GPL(v4l2_hdmi_rx_colorimetry);
4374 ++
4375 ++/**
4376 ++ * v4l2_get_edid_phys_addr() - find and return the physical address
4377 ++ *
4378 ++ * @edid: pointer to the EDID data
4379 ++ * @size: size in bytes of the EDID data
4380 ++ * @offset: If not %NULL then the location of the physical address
4381 ++ * bytes in the EDID will be returned here. This is set to 0
4382 ++ * if there is no physical address found.
4383 ++ *
4384 ++ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
4385 ++ */
4386 ++u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
4387 ++ unsigned int *offset)
4388 ++{
4389 ++ unsigned int loc = cec_get_edid_spa_location(edid, size);
4390 ++
4391 ++ if (offset)
4392 ++ *offset = loc;
4393 ++ if (loc == 0)
4394 ++ return CEC_PHYS_ADDR_INVALID;
4395 ++ return (edid[loc] << 8) | edid[loc + 1];
4396 ++}
4397 ++EXPORT_SYMBOL_GPL(v4l2_get_edid_phys_addr);
4398 ++
4399 ++/**
4400 ++ * v4l2_set_edid_phys_addr() - find and set the physical address
4401 ++ *
4402 ++ * @edid: pointer to the EDID data
4403 ++ * @size: size in bytes of the EDID data
4404 ++ * @phys_addr: the new physical address
4405 ++ *
4406 ++ * This function finds the location of the physical address in the EDID
4407 ++ * and fills in the given physical address and updates the checksum
4408 ++ * at the end of the EDID block. It does nothing if the EDID doesn't
4409 ++ * contain a physical address.
4410 ++ */
4411 ++void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr)
4412 ++{
4413 ++ unsigned int loc = cec_get_edid_spa_location(edid, size);
4414 ++ u8 sum = 0;
4415 ++ unsigned int i;
4416 ++
4417 ++ if (loc == 0)
4418 ++ return;
4419 ++ edid[loc] = phys_addr >> 8;
4420 ++ edid[loc + 1] = phys_addr & 0xff;
4421 ++ loc &= ~0x7f;
4422 ++
4423 ++ /* update the checksum */
4424 ++ for (i = loc; i < loc + 127; i++)
4425 ++ sum += edid[i];
4426 ++ edid[i] = 256 - sum;
4427 ++}
4428 ++EXPORT_SYMBOL_GPL(v4l2_set_edid_phys_addr);
4429 ++
4430 ++/**
4431 ++ * v4l2_phys_addr_for_input() - calculate the PA for an input
4432 ++ *
4433 ++ * @phys_addr: the physical address of the parent
4434 ++ * @input: the number of the input port, must be between 1 and 15
4435 ++ *
4436 ++ * This function calculates a new physical address based on the input
4437 ++ * port number. For example:
4438 ++ *
4439 ++ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
4440 ++ *
4441 ++ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
4442 ++ *
4443 ++ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
4444 ++ *
4445 ++ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
4446 ++ *
4447 ++ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
4448 ++ */
4449 ++u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input)
4450 ++{
4451 ++ /* Check if input is sane */
4452 ++ if (WARN_ON(input == 0 || input > 0xf))
4453 ++ return CEC_PHYS_ADDR_INVALID;
4454 ++
4455 ++ if (phys_addr == 0)
4456 ++ return input << 12;
4457 ++
4458 ++ if ((phys_addr & 0x0fff) == 0)
4459 ++ return phys_addr | (input << 8);
4460 ++
4461 ++ if ((phys_addr & 0x00ff) == 0)
4462 ++ return phys_addr | (input << 4);
4463 ++
4464 ++ if ((phys_addr & 0x000f) == 0)
4465 ++ return phys_addr | input;
4466 ++
4467 ++ /*
4468 ++ * All nibbles are used so no valid physical addresses can be assigned
4469 ++ * to the input.
4470 ++ */
4471 ++ return CEC_PHYS_ADDR_INVALID;
4472 ++}
4473 ++EXPORT_SYMBOL_GPL(v4l2_phys_addr_for_input);
4474 ++
4475 ++/**
4476 ++ * v4l2_phys_addr_validate() - validate a physical address from an EDID
4477 ++ *
4478 ++ * @phys_addr: the physical address to validate
4479 ++ * @parent: if not %NULL, then this is filled with the parents PA.
4480 ++ * @port: if not %NULL, then this is filled with the input port.
4481 ++ *
4482 ++ * This validates a physical address as read from an EDID. If the
4483 ++ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
4484 ++ * then it will return -EINVAL.
4485 ++ *
4486 ++ * The parent PA is passed into %parent and the input port is passed into
4487 ++ * %port. For example:
4488 ++ *
4489 ++ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
4490 ++ *
4491 ++ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
4492 ++ *
4493 ++ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
4494 ++ *
4495 ++ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
4496 ++ *
4497 ++ * Return: 0 if the PA is valid, -EINVAL if not.
4498 ++ */
4499 ++int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
4500 ++{
4501 ++ int i;
4502 ++
4503 ++ if (parent)
4504 ++ *parent = phys_addr;
4505 ++ if (port)
4506 ++ *port = 0;
4507 ++ if (phys_addr == CEC_PHYS_ADDR_INVALID)
4508 ++ return 0;
4509 ++ for (i = 0; i < 16; i += 4)
4510 ++ if (phys_addr & (0xf << i))
4511 ++ break;
4512 ++ if (i == 16)
4513 ++ return 0;
4514 ++ if (parent)
4515 ++ *parent = phys_addr & (0xfff0 << i);
4516 ++ if (port)
4517 ++ *port = (phys_addr >> i) & 0xf;
4518 ++ for (i += 4; i < 16; i += 4)
4519 ++ if ((phys_addr & (0xf << i)) == 0)
4520 ++ return -EINVAL;
4521 ++ return 0;
4522 ++}
4523 ++EXPORT_SYMBOL_GPL(v4l2_phys_addr_validate);
4524 +diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
4525 +index 11841f4b7b2b..dd938a5d0409 100644
4526 +--- a/drivers/mfd/Kconfig
4527 ++++ b/drivers/mfd/Kconfig
4528 +@@ -509,10 +509,10 @@ config INTEL_SOC_PMIC
4529 + bool "Support for Crystal Cove PMIC"
4530 + depends on ACPI && HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK
4531 + depends on X86 || COMPILE_TEST
4532 ++ depends on I2C_DESIGNWARE_PLATFORM=y
4533 + select MFD_CORE
4534 + select REGMAP_I2C
4535 + select REGMAP_IRQ
4536 +- select I2C_DESIGNWARE_PLATFORM
4537 + help
4538 + Select this option to enable support for Crystal Cove PMIC
4539 + on some Intel SoC systems. The PMIC provides ADC, GPIO,
4540 +@@ -538,10 +538,10 @@ config INTEL_SOC_PMIC_CHTWC
4541 + bool "Support for Intel Cherry Trail Whiskey Cove PMIC"
4542 + depends on ACPI && HAS_IOMEM && I2C=y && COMMON_CLK
4543 + depends on X86 || COMPILE_TEST
4544 ++ depends on I2C_DESIGNWARE_PLATFORM=y
4545 + select MFD_CORE
4546 + select REGMAP_I2C
4547 + select REGMAP_IRQ
4548 +- select I2C_DESIGNWARE_PLATFORM
4549 + help
4550 + Select this option to enable support for the Intel Cherry Trail
4551 + Whiskey Cove PMIC found on some Intel Cherry Trail systems.
4552 +@@ -1403,9 +1403,9 @@ config MFD_TPS65217
4553 + config MFD_TPS68470
4554 + bool "TI TPS68470 Power Management / LED chips"
4555 + depends on ACPI && I2C=y
4556 ++ depends on I2C_DESIGNWARE_PLATFORM=y
4557 + select MFD_CORE
4558 + select REGMAP_I2C
4559 +- select I2C_DESIGNWARE_PLATFORM
4560 + help
4561 + If you say yes here you get support for the TPS68470 series of
4562 + Power Management / LED chips.
4563 +diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
4564 +index 45baf5d9120e..61f0faddfd88 100644
4565 +--- a/drivers/mmc/host/renesas_sdhi_core.c
4566 ++++ b/drivers/mmc/host/renesas_sdhi_core.c
4567 +@@ -636,6 +636,13 @@ int renesas_sdhi_probe(struct platform_device *pdev,
4568 + host->ops.card_busy = renesas_sdhi_card_busy;
4569 + host->ops.start_signal_voltage_switch =
4570 + renesas_sdhi_start_signal_voltage_switch;
4571 ++
4572 ++ /* SDR and HS200/400 registers requires HW reset */
4573 ++ if (of_data && of_data->scc_offset) {
4574 ++ priv->scc_ctl = host->ctl + of_data->scc_offset;
4575 ++ host->mmc->caps |= MMC_CAP_HW_RESET;
4576 ++ host->hw_reset = renesas_sdhi_hw_reset;
4577 ++ }
4578 + }
4579 +
4580 + /* Orginally registers were 16 bit apart, could be 32 or 64 nowadays */
4581 +@@ -693,8 +700,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
4582 + const struct renesas_sdhi_scc *taps = of_data->taps;
4583 + bool hit = false;
4584 +
4585 +- host->mmc->caps |= MMC_CAP_HW_RESET;
4586 +-
4587 + for (i = 0; i < of_data->taps_num; i++) {
4588 + if (taps[i].clk_rate == 0 ||
4589 + taps[i].clk_rate == host->mmc->f_max) {
4590 +@@ -707,12 +712,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
4591 + if (!hit)
4592 + dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n");
4593 +
4594 +- priv->scc_ctl = host->ctl + of_data->scc_offset;
4595 + host->init_tuning = renesas_sdhi_init_tuning;
4596 + host->prepare_tuning = renesas_sdhi_prepare_tuning;
4597 + host->select_tuning = renesas_sdhi_select_tuning;
4598 + host->check_scc_error = renesas_sdhi_check_scc_error;
4599 +- host->hw_reset = renesas_sdhi_hw_reset;
4600 + host->prepare_hs400_tuning =
4601 + renesas_sdhi_prepare_hs400_tuning;
4602 + host->hs400_downgrade = renesas_sdhi_disable_scc;
4603 +diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
4604 +index c4115bae5db1..71794391f48f 100644
4605 +--- a/drivers/mmc/host/sdhci-pci-core.c
4606 ++++ b/drivers/mmc/host/sdhci-pci-core.c
4607 +@@ -1577,6 +1577,8 @@ static const struct pci_device_id pci_ids[] = {
4608 + SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
4609 + SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
4610 + SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
4611 ++ SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
4612 ++ SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
4613 + SDHCI_PCI_DEVICE(O2, 8120, o2),
4614 + SDHCI_PCI_DEVICE(O2, 8220, o2),
4615 + SDHCI_PCI_DEVICE(O2, 8221, o2),
4616 +diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
4617 +index 2ef0bdca9197..6f04a62b2998 100644
4618 +--- a/drivers/mmc/host/sdhci-pci.h
4619 ++++ b/drivers/mmc/host/sdhci-pci.h
4620 +@@ -50,6 +50,8 @@
4621 + #define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375
4622 + #define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4
4623 + #define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8
4624 ++#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
4625 ++#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
4626 +
4627 + #define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
4628 + #define PCI_DEVICE_ID_VIA_95D0 0x95d0
4629 +diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
4630 +index 91ca77c7571c..b4347806a59e 100644
4631 +--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
4632 ++++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
4633 +@@ -77,10 +77,13 @@
4634 + #define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
4635 + #define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
4636 + #define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
4637 +-#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
4638 ++#define IWL_22000_HR_B_F0_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
4639 ++#define IWL_22000_QU_B_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
4640 ++#define IWL_22000_HR_B_FW_PRE "iwlwifi-QuQnj-b0-hr-b0-"
4641 + #define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
4642 + #define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
4643 + #define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
4644 ++#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
4645 +
4646 + #define IWL_22000_HR_MODULE_FIRMWARE(api) \
4647 + IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
4648 +@@ -88,7 +91,11 @@
4649 + IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
4650 + #define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
4651 + IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
4652 +-#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
4653 ++#define IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(api) \
4654 ++ IWL_22000_HR_B_F0_FW_PRE __stringify(api) ".ucode"
4655 ++#define IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(api) \
4656 ++ IWL_22000_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
4657 ++#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
4658 + IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
4659 + #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
4660 + IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
4661 +@@ -96,6 +103,8 @@
4662 + IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
4663 + #define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
4664 + IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
4665 ++#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
4666 ++ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
4667 +
4668 + #define NVM_HW_SECTION_NUM_FAMILY_22000 10
4669 +
4670 +@@ -190,7 +199,54 @@ const struct iwl_cfg iwl22000_2ac_cfg_jf = {
4671 +
4672 + const struct iwl_cfg iwl22000_2ax_cfg_hr = {
4673 + .name = "Intel(R) Dual Band Wireless AX 22000",
4674 +- .fw_name_pre = IWL_22000_HR_FW_PRE,
4675 ++ .fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
4676 ++ IWL_DEVICE_22500,
4677 ++ /*
4678 ++ * This device doesn't support receiving BlockAck with a large bitmap
4679 ++ * so we need to restrict the size of transmitted aggregation to the
4680 ++ * HT size; mac80211 would otherwise pick the HE max (256) by default.
4681 ++ */
4682 ++ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
4683 ++};
4684 ++
4685 ++/*
4686 ++ * All JF radio modules are part of the 9000 series, but the MAC part
4687 ++ * looks more like 22000. That's why this device is here, but called
4688 ++ * 9560 nevertheless.
4689 ++ */
4690 ++const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0 = {
4691 ++ .name = "Intel(R) Wireless-AC 9461",
4692 ++ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4693 ++ IWL_DEVICE_22500,
4694 ++};
4695 ++
4696 ++const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0 = {
4697 ++ .name = "Intel(R) Wireless-AC 9462",
4698 ++ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4699 ++ IWL_DEVICE_22500,
4700 ++};
4701 ++
4702 ++const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0 = {
4703 ++ .name = "Intel(R) Wireless-AC 9560",
4704 ++ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4705 ++ IWL_DEVICE_22500,
4706 ++};
4707 ++
4708 ++const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0 = {
4709 ++ .name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
4710 ++ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4711 ++ IWL_DEVICE_22500,
4712 ++};
4713 ++
4714 ++const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
4715 ++ .name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
4716 ++ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4717 ++ IWL_DEVICE_22500,
4718 ++};
4719 ++
4720 ++const struct iwl_cfg iwl22000_2ax_cfg_jf = {
4721 ++ .name = "Intel(R) Dual Band Wireless AX 22000",
4722 ++ .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
4723 + IWL_DEVICE_22500,
4724 + /*
4725 + * This device doesn't support receiving BlockAck with a large bitmap
4726 +@@ -264,7 +320,10 @@ const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
4727 + MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4728 + MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4729 + MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4730 ++MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4731 ++MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4732 + MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4733 + MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4734 + MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4735 + MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4736 ++MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
4737 +diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4738 +index 12fddcf15bab..2e9fd7a30398 100644
4739 +--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4740 ++++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
4741 +@@ -574,11 +574,18 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
4742 + extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
4743 + extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
4744 + extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
4745 ++extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
4746 ++extern const struct iwl_cfg iwl9462_2ac_cfg_qu_b0_jf_b0;
4747 ++extern const struct iwl_cfg iwl9560_2ac_cfg_qu_b0_jf_b0;
4748 ++extern const struct iwl_cfg killer1550i_2ac_cfg_qu_b0_jf_b0;
4749 ++extern const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0;
4750 ++extern const struct iwl_cfg iwl22000_2ax_cfg_jf;
4751 + extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
4752 ++extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0_f0;
4753 + extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
4754 + extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
4755 + extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
4756 + extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
4757 +-#endif /* CONFIG_IWLMVM */
4758 ++#endif /* CPTCFG_IWLMVM || CPTCFG_IWLFMAC */
4759 +
4760 + #endif /* __IWL_CONFIG_H__ */
4761 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4762 +index 5d65500a8aa7..0982bd99b1c3 100644
4763 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4764 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
4765 +@@ -601,6 +601,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4766 + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
4767 + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
4768 + {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
4769 ++ {IWL_PCI_DEVICE(0x2526, 0x4018, iwl9260_2ac_cfg)},
4770 + {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
4771 + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
4772 + {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
4773 +@@ -696,34 +697,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
4774 + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
4775 + {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
4776 + {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
4777 +- {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
4778 +- {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
4779 +- {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
4780 +- {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
4781 +- {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
4782 +- {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
4783 +- {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
4784 +- {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
4785 +- {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
4786 +- {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
4787 +- {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
4788 +- {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
4789 +- {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
4790 +- {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
4791 +- {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
4792 +- {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
4793 +- {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
4794 +- {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
4795 +- {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
4796 +- {IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
4797 +- {IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
4798 +- {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
4799 +- {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
4800 +- {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
4801 +- {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
4802 +- {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
4803 +- {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
4804 +- {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
4805 ++
4806 ++ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4807 ++ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4808 ++ {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4809 ++ {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4810 ++ {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4811 ++ {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4812 ++ {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4813 ++ {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4814 ++ {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4815 ++ {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4816 ++ {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4817 ++ {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4818 ++ {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4819 ++ {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
4820 ++ {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4821 ++ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4822 ++ {IWL_PCI_DEVICE(0x34F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
4823 ++ {IWL_PCI_DEVICE(0x34F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
4824 ++ {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4825 ++ {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4826 ++ {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4827 ++ {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4828 ++ {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4829 ++ {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
4830 ++ {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
4831 ++
4832 + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
4833 + {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
4834 + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
4835 +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
4836 +index 6542644bc325..cec31f0c3017 100644
4837 +--- a/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
4838 ++++ b/drivers/net/wireless/mediatek/mt76/mt76x2_mac_common.c
4839 +@@ -402,7 +402,7 @@ void mt76x2_mac_write_txwi(struct mt76x2_dev *dev, struct mt76x2_txwi *txwi,
4840 + ccmp_pn[6] = pn >> 32;
4841 + ccmp_pn[7] = pn >> 40;
4842 + txwi->iv = *((__le32 *)&ccmp_pn[0]);
4843 +- txwi->eiv = *((__le32 *)&ccmp_pn[1]);
4844 ++ txwi->eiv = *((__le32 *)&ccmp_pn[4]);
4845 + }
4846 +
4847 + spin_lock_bh(&dev->mt76.lock);
4848 +diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
4849 +index 67dec8860bf3..565bddcfd130 100644
4850 +--- a/drivers/nvme/host/fc.c
4851 ++++ b/drivers/nvme/host/fc.c
4852 +@@ -206,7 +206,7 @@ static LIST_HEAD(nvme_fc_lport_list);
4853 + static DEFINE_IDA(nvme_fc_local_port_cnt);
4854 + static DEFINE_IDA(nvme_fc_ctrl_cnt);
4855 +
4856 +-
4857 ++static struct workqueue_struct *nvme_fc_wq;
4858 +
4859 + /*
4860 + * These items are short-term. They will eventually be moved into
4861 +@@ -2053,7 +2053,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
4862 + */
4863 + if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
4864 + active = atomic_xchg(&ctrl->err_work_active, 1);
4865 +- if (!active && !schedule_work(&ctrl->err_work)) {
4866 ++ if (!active && !queue_work(nvme_fc_wq, &ctrl->err_work)) {
4867 + atomic_set(&ctrl->err_work_active, 0);
4868 + WARN_ON(1);
4869 + }
4870 +@@ -3321,6 +3321,10 @@ static int __init nvme_fc_init_module(void)
4871 + {
4872 + int ret;
4873 +
4874 ++ nvme_fc_wq = alloc_workqueue("nvme_fc_wq", WQ_MEM_RECLAIM, 0);
4875 ++ if (!nvme_fc_wq)
4876 ++ return -ENOMEM;
4877 ++
4878 + /*
4879 + * NOTE:
4880 + * It is expected that in the future the kernel will combine
4881 +@@ -3338,7 +3342,8 @@ static int __init nvme_fc_init_module(void)
4882 + fc_class = class_create(THIS_MODULE, "fc");
4883 + if (IS_ERR(fc_class)) {
4884 + pr_err("couldn't register class fc\n");
4885 +- return PTR_ERR(fc_class);
4886 ++ ret = PTR_ERR(fc_class);
4887 ++ goto out_destroy_wq;
4888 + }
4889 +
4890 + /*
4891 +@@ -3362,6 +3367,9 @@ out_destroy_device:
4892 + device_destroy(fc_class, MKDEV(0, 0));
4893 + out_destroy_class:
4894 + class_destroy(fc_class);
4895 ++out_destroy_wq:
4896 ++ destroy_workqueue(nvme_fc_wq);
4897 ++
4898 + return ret;
4899 + }
4900 +
4901 +@@ -3378,6 +3386,7 @@ static void __exit nvme_fc_exit_module(void)
4902 +
4903 + device_destroy(fc_class, MKDEV(0, 0));
4904 + class_destroy(fc_class);
4905 ++ destroy_workqueue(nvme_fc_wq);
4906 + }
4907 +
4908 + module_init(nvme_fc_init_module);
4909 +diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
4910 +index acd50920c2ff..b57ee79f6d69 100644
4911 +--- a/drivers/pci/controller/dwc/pcie-designware-host.c
4912 ++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
4913 +@@ -356,7 +356,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4914 + dev_err(dev, "Missing *config* reg space\n");
4915 + }
4916 +
4917 +- bridge = pci_alloc_host_bridge(0);
4918 ++ bridge = devm_pci_alloc_host_bridge(dev, 0);
4919 + if (!bridge)
4920 + return -ENOMEM;
4921 +
4922 +@@ -367,7 +367,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4923 +
4924 + ret = devm_request_pci_bus_resources(dev, &bridge->windows);
4925 + if (ret)
4926 +- goto error;
4927 ++ return ret;
4928 +
4929 + /* Get the I/O and memory ranges from DT */
4930 + resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
4931 +@@ -411,8 +411,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4932 + resource_size(pp->cfg));
4933 + if (!pci->dbi_base) {
4934 + dev_err(dev, "Error with ioremap\n");
4935 +- ret = -ENOMEM;
4936 +- goto error;
4937 ++ return -ENOMEM;
4938 + }
4939 + }
4940 +
4941 +@@ -423,8 +422,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4942 + pp->cfg0_base, pp->cfg0_size);
4943 + if (!pp->va_cfg0_base) {
4944 + dev_err(dev, "Error with ioremap in function\n");
4945 +- ret = -ENOMEM;
4946 +- goto error;
4947 ++ return -ENOMEM;
4948 + }
4949 + }
4950 +
4951 +@@ -434,8 +432,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4952 + pp->cfg1_size);
4953 + if (!pp->va_cfg1_base) {
4954 + dev_err(dev, "Error with ioremap\n");
4955 +- ret = -ENOMEM;
4956 +- goto error;
4957 ++ return -ENOMEM;
4958 + }
4959 + }
4960 +
4961 +@@ -458,14 +455,14 @@ int dw_pcie_host_init(struct pcie_port *pp)
4962 + pp->num_vectors == 0) {
4963 + dev_err(dev,
4964 + "Invalid number of vectors\n");
4965 +- goto error;
4966 ++ return -EINVAL;
4967 + }
4968 + }
4969 +
4970 + if (!pp->ops->msi_host_init) {
4971 + ret = dw_pcie_allocate_domains(pp);
4972 + if (ret)
4973 +- goto error;
4974 ++ return ret;
4975 +
4976 + if (pp->msi_irq)
4977 + irq_set_chained_handler_and_data(pp->msi_irq,
4978 +@@ -474,7 +471,7 @@ int dw_pcie_host_init(struct pcie_port *pp)
4979 + } else {
4980 + ret = pp->ops->msi_host_init(pp);
4981 + if (ret < 0)
4982 +- goto error;
4983 ++ return ret;
4984 + }
4985 + }
4986 +
4987 +@@ -514,8 +511,6 @@ int dw_pcie_host_init(struct pcie_port *pp)
4988 + err_free_msi:
4989 + if (pci_msi_enabled() && !pp->ops->msi_host_init)
4990 + dw_pcie_free_msi(pp);
4991 +-error:
4992 +- pci_free_host_bridge(bridge);
4993 + return ret;
4994 + }
4995 +
4996 +diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
4997 +index 87a8887fd4d3..e292801fff7f 100644
4998 +--- a/drivers/pci/controller/dwc/pcie-qcom.c
4999 ++++ b/drivers/pci/controller/dwc/pcie-qcom.c
5000 +@@ -1091,7 +1091,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
5001 + struct qcom_pcie *pcie = to_qcom_pcie(pci);
5002 + int ret;
5003 +
5004 +- pm_runtime_get_sync(pci->dev);
5005 + qcom_ep_reset_assert(pcie);
5006 +
5007 + ret = pcie->ops->init(pcie);
5008 +@@ -1128,7 +1127,6 @@ err_disable_phy:
5009 + phy_power_off(pcie->phy);
5010 + err_deinit:
5011 + pcie->ops->deinit(pcie);
5012 +- pm_runtime_put(pci->dev);
5013 +
5014 + return ret;
5015 + }
5016 +@@ -1218,6 +1216,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
5017 + return -ENOMEM;
5018 +
5019 + pm_runtime_enable(dev);
5020 ++ ret = pm_runtime_get_sync(dev);
5021 ++ if (ret < 0) {
5022 ++ pm_runtime_disable(dev);
5023 ++ return ret;
5024 ++ }
5025 ++
5026 + pci->dev = dev;
5027 + pci->ops = &dw_pcie_ops;
5028 + pp = &pci->pp;
5029 +@@ -1226,45 +1230,57 @@ static int qcom_pcie_probe(struct platform_device *pdev)
5030 +
5031 + pcie->ops = of_device_get_match_data(dev);
5032 +
5033 +- pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
5034 +- if (IS_ERR(pcie->reset))
5035 +- return PTR_ERR(pcie->reset);
5036 ++ pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_HIGH);
5037 ++ if (IS_ERR(pcie->reset)) {
5038 ++ ret = PTR_ERR(pcie->reset);
5039 ++ goto err_pm_runtime_put;
5040 ++ }
5041 +
5042 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
5043 + pcie->parf = devm_ioremap_resource(dev, res);
5044 +- if (IS_ERR(pcie->parf))
5045 +- return PTR_ERR(pcie->parf);
5046 ++ if (IS_ERR(pcie->parf)) {
5047 ++ ret = PTR_ERR(pcie->parf);
5048 ++ goto err_pm_runtime_put;
5049 ++ }
5050 +
5051 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
5052 + pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
5053 +- if (IS_ERR(pci->dbi_base))
5054 +- return PTR_ERR(pci->dbi_base);
5055 ++ if (IS_ERR(pci->dbi_base)) {
5056 ++ ret = PTR_ERR(pci->dbi_base);
5057 ++ goto err_pm_runtime_put;
5058 ++ }
5059 +
5060 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
5061 + pcie->elbi = devm_ioremap_resource(dev, res);
5062 +- if (IS_ERR(pcie->elbi))
5063 +- return PTR_ERR(pcie->elbi);
5064 ++ if (IS_ERR(pcie->elbi)) {
5065 ++ ret = PTR_ERR(pcie->elbi);
5066 ++ goto err_pm_runtime_put;
5067 ++ }
5068 +
5069 + pcie->phy = devm_phy_optional_get(dev, "pciephy");
5070 +- if (IS_ERR(pcie->phy))
5071 +- return PTR_ERR(pcie->phy);
5072 ++ if (IS_ERR(pcie->phy)) {
5073 ++ ret = PTR_ERR(pcie->phy);
5074 ++ goto err_pm_runtime_put;
5075 ++ }
5076 +
5077 + ret = pcie->ops->get_resources(pcie);
5078 + if (ret)
5079 +- return ret;
5080 ++ goto err_pm_runtime_put;
5081 +
5082 + pp->ops = &qcom_pcie_dw_ops;
5083 +
5084 + if (IS_ENABLED(CONFIG_PCI_MSI)) {
5085 + pp->msi_irq = platform_get_irq_byname(pdev, "msi");
5086 +- if (pp->msi_irq < 0)
5087 +- return pp->msi_irq;
5088 ++ if (pp->msi_irq < 0) {
5089 ++ ret = pp->msi_irq;
5090 ++ goto err_pm_runtime_put;
5091 ++ }
5092 + }
5093 +
5094 + ret = phy_init(pcie->phy);
5095 + if (ret) {
5096 + pm_runtime_disable(&pdev->dev);
5097 +- return ret;
5098 ++ goto err_pm_runtime_put;
5099 + }
5100 +
5101 + platform_set_drvdata(pdev, pcie);
5102 +@@ -1273,10 +1289,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
5103 + if (ret) {
5104 + dev_err(dev, "cannot initialize host\n");
5105 + pm_runtime_disable(&pdev->dev);
5106 +- return ret;
5107 ++ goto err_pm_runtime_put;
5108 + }
5109 +
5110 + return 0;
5111 ++
5112 ++err_pm_runtime_put:
5113 ++ pm_runtime_put(dev);
5114 ++ pm_runtime_disable(dev);
5115 ++
5116 ++ return ret;
5117 + }
5118 +
5119 + static const struct of_device_id qcom_pcie_match[] = {
5120 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
5121 +index 28c64f84bfe7..06be52912dcd 100644
5122 +--- a/drivers/pci/quirks.c
5123 ++++ b/drivers/pci/quirks.c
5124 +@@ -5082,59 +5082,95 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5125 + pci_iounmap(pdev, mmio);
5126 + pci_disable_device(pdev);
5127 + }
5128 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
5129 +- quirk_switchtec_ntb_dma_alias);
5130 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
5131 +- quirk_switchtec_ntb_dma_alias);
5132 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
5133 +- quirk_switchtec_ntb_dma_alias);
5134 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
5135 +- quirk_switchtec_ntb_dma_alias);
5136 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
5137 +- quirk_switchtec_ntb_dma_alias);
5138 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
5139 +- quirk_switchtec_ntb_dma_alias);
5140 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
5141 +- quirk_switchtec_ntb_dma_alias);
5142 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
5143 +- quirk_switchtec_ntb_dma_alias);
5144 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
5145 +- quirk_switchtec_ntb_dma_alias);
5146 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
5147 +- quirk_switchtec_ntb_dma_alias);
5148 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
5149 +- quirk_switchtec_ntb_dma_alias);
5150 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
5151 +- quirk_switchtec_ntb_dma_alias);
5152 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
5153 +- quirk_switchtec_ntb_dma_alias);
5154 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
5155 +- quirk_switchtec_ntb_dma_alias);
5156 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
5157 +- quirk_switchtec_ntb_dma_alias);
5158 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
5159 +- quirk_switchtec_ntb_dma_alias);
5160 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
5161 +- quirk_switchtec_ntb_dma_alias);
5162 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
5163 +- quirk_switchtec_ntb_dma_alias);
5164 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
5165 +- quirk_switchtec_ntb_dma_alias);
5166 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
5167 +- quirk_switchtec_ntb_dma_alias);
5168 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
5169 +- quirk_switchtec_ntb_dma_alias);
5170 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
5171 +- quirk_switchtec_ntb_dma_alias);
5172 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
5173 +- quirk_switchtec_ntb_dma_alias);
5174 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
5175 +- quirk_switchtec_ntb_dma_alias);
5176 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
5177 +- quirk_switchtec_ntb_dma_alias);
5178 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
5179 +- quirk_switchtec_ntb_dma_alias);
5180 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
5181 +- quirk_switchtec_ntb_dma_alias);
5182 +-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
5183 +- quirk_switchtec_ntb_dma_alias);
5184 ++#define SWITCHTEC_QUIRK(vid) \
5185 ++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5186 ++ quirk_switchtec_ntb_dma_alias)
5187 ++
5188 ++SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */
5189 ++SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */
5190 ++SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */
5191 ++SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */
5192 ++SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */
5193 ++SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */
5194 ++SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */
5195 ++SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */
5196 ++SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */
5197 ++SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */
5198 ++SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */
5199 ++SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */
5200 ++SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */
5201 ++SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */
5202 ++SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */
5203 ++SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */
5204 ++SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */
5205 ++SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */
5206 ++SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */
5207 ++SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */
5208 ++SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */
5209 ++SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */
5210 ++SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */
5211 ++SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */
5212 ++SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */
5213 ++SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */
5214 ++SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
5215 ++SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
5216 ++SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
5217 ++SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
5218 ++
5219 ++/*
5220 ++ * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
5221 ++ * not always reset the secondary Nvidia GPU between reboots if the system
5222 ++ * is configured to use Hybrid Graphics mode. This results in the GPU
5223 ++ * being left in whatever state it was in during the *previous* boot, which
5224 ++ * causes spurious interrupts from the GPU, which in turn causes us to
5225 ++ * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
5226 ++ * this also completely breaks nouveau.
5227 ++ *
5228 ++ * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
5229 ++ * clean state and fixes all these issues.
5230 ++ *
5231 ++ * When the machine is configured in Dedicated display mode, the issue
5232 ++ * doesn't occur. Fortunately the GPU advertises NoReset+ when in this
5233 ++ * mode, so we can detect that and avoid resetting it.
5234 ++ */
5235 ++static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5236 ++{
5237 ++ void __iomem *map;
5238 ++ int ret;
5239 ++
5240 ++ if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5241 ++ pdev->subsystem_device != 0x222e ||
5242 ++ !pdev->reset_fn)
5243 ++ return;
5244 ++
5245 ++ if (pci_enable_device_mem(pdev))
5246 ++ return;
5247 ++
5248 ++ /*
5249 ++ * Based on nvkm_device_ctor() in
5250 ++ * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
5251 ++ */
5252 ++ map = pci_iomap(pdev, 0, 0x23000);
5253 ++ if (!map) {
5254 ++ pci_err(pdev, "Can't map MMIO space\n");
5255 ++ goto out_disable;
5256 ++ }
5257 ++
5258 ++ /*
5259 ++ * Make sure the GPU looks like it's been POSTed before resetting
5260 ++ * it.
5261 ++ */
5262 ++ if (ioread32(map + 0x2240c) & 0x2) {
5263 ++ pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5264 ++ ret = pci_reset_bus(pdev);
5265 ++ if (ret < 0)
5266 ++ pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5267 ++ }
5268 ++
5269 ++ iounmap(map);
5270 ++out_disable:
5271 ++ pci_disable_device(pdev);
5272 ++}
5273 ++DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5274 ++ PCI_CLASS_DISPLAY_VGA, 8,
5275 ++ quirk_reset_lenovo_thinkpad_p50_nvgpu);
5276 +diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
5277 +index e9ab90c19304..602af839421d 100644
5278 +--- a/drivers/remoteproc/qcom_q6v5.c
5279 ++++ b/drivers/remoteproc/qcom_q6v5.c
5280 +@@ -188,6 +188,14 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5281 + init_completion(&q6v5->stop_done);
5282 +
5283 + q6v5->wdog_irq = platform_get_irq_byname(pdev, "wdog");
5284 ++ if (q6v5->wdog_irq < 0) {
5285 ++ if (q6v5->wdog_irq != -EPROBE_DEFER)
5286 ++ dev_err(&pdev->dev,
5287 ++ "failed to retrieve wdog IRQ: %d\n",
5288 ++ q6v5->wdog_irq);
5289 ++ return q6v5->wdog_irq;
5290 ++ }
5291 ++
5292 + ret = devm_request_threaded_irq(&pdev->dev, q6v5->wdog_irq,
5293 + NULL, q6v5_wdog_interrupt,
5294 + IRQF_TRIGGER_RISING | IRQF_ONESHOT,
5295 +@@ -198,8 +206,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5296 + }
5297 +
5298 + q6v5->fatal_irq = platform_get_irq_byname(pdev, "fatal");
5299 +- if (q6v5->fatal_irq == -EPROBE_DEFER)
5300 +- return -EPROBE_DEFER;
5301 ++ if (q6v5->fatal_irq < 0) {
5302 ++ if (q6v5->fatal_irq != -EPROBE_DEFER)
5303 ++ dev_err(&pdev->dev,
5304 ++ "failed to retrieve fatal IRQ: %d\n",
5305 ++ q6v5->fatal_irq);
5306 ++ return q6v5->fatal_irq;
5307 ++ }
5308 +
5309 + ret = devm_request_threaded_irq(&pdev->dev, q6v5->fatal_irq,
5310 + NULL, q6v5_fatal_interrupt,
5311 +@@ -211,8 +224,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5312 + }
5313 +
5314 + q6v5->ready_irq = platform_get_irq_byname(pdev, "ready");
5315 +- if (q6v5->ready_irq == -EPROBE_DEFER)
5316 +- return -EPROBE_DEFER;
5317 ++ if (q6v5->ready_irq < 0) {
5318 ++ if (q6v5->ready_irq != -EPROBE_DEFER)
5319 ++ dev_err(&pdev->dev,
5320 ++ "failed to retrieve ready IRQ: %d\n",
5321 ++ q6v5->ready_irq);
5322 ++ return q6v5->ready_irq;
5323 ++ }
5324 +
5325 + ret = devm_request_threaded_irq(&pdev->dev, q6v5->ready_irq,
5326 + NULL, q6v5_ready_interrupt,
5327 +@@ -224,8 +242,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5328 + }
5329 +
5330 + q6v5->handover_irq = platform_get_irq_byname(pdev, "handover");
5331 +- if (q6v5->handover_irq == -EPROBE_DEFER)
5332 +- return -EPROBE_DEFER;
5333 ++ if (q6v5->handover_irq < 0) {
5334 ++ if (q6v5->handover_irq != -EPROBE_DEFER)
5335 ++ dev_err(&pdev->dev,
5336 ++ "failed to retrieve handover IRQ: %d\n",
5337 ++ q6v5->handover_irq);
5338 ++ return q6v5->handover_irq;
5339 ++ }
5340 +
5341 + ret = devm_request_threaded_irq(&pdev->dev, q6v5->handover_irq,
5342 + NULL, q6v5_handover_interrupt,
5343 +@@ -238,8 +261,13 @@ int qcom_q6v5_init(struct qcom_q6v5 *q6v5, struct platform_device *pdev,
5344 + disable_irq(q6v5->handover_irq);
5345 +
5346 + q6v5->stop_irq = platform_get_irq_byname(pdev, "stop-ack");
5347 +- if (q6v5->stop_irq == -EPROBE_DEFER)
5348 +- return -EPROBE_DEFER;
5349 ++ if (q6v5->stop_irq < 0) {
5350 ++ if (q6v5->stop_irq != -EPROBE_DEFER)
5351 ++ dev_err(&pdev->dev,
5352 ++ "failed to retrieve stop-ack IRQ: %d\n",
5353 ++ q6v5->stop_irq);
5354 ++ return q6v5->stop_irq;
5355 ++ }
5356 +
5357 + ret = devm_request_threaded_irq(&pdev->dev, q6v5->stop_irq,
5358 + NULL, q6v5_stop_interrupt,
5359 +diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
5360 +index d7a4b9eca5d2..6a84b6372897 100644
5361 +--- a/drivers/remoteproc/qcom_q6v5_pil.c
5362 ++++ b/drivers/remoteproc/qcom_q6v5_pil.c
5363 +@@ -1132,6 +1132,9 @@ static int q6v5_probe(struct platform_device *pdev)
5364 + if (!desc)
5365 + return -EINVAL;
5366 +
5367 ++ if (desc->need_mem_protection && !qcom_scm_is_available())
5368 ++ return -EPROBE_DEFER;
5369 ++
5370 + rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
5371 + desc->hexagon_mba_image, sizeof(*qproc));
5372 + if (!rproc) {
5373 +diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
5374 +index a57b969b8973..3be54651698a 100644
5375 +--- a/drivers/s390/crypto/ap_bus.c
5376 ++++ b/drivers/s390/crypto/ap_bus.c
5377 +@@ -777,6 +777,8 @@ static int ap_device_probe(struct device *dev)
5378 + drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT;
5379 + if (!!devres != !!drvres)
5380 + return -ENODEV;
5381 ++ /* (re-)init queue's state machine */
5382 ++ ap_queue_reinit_state(to_ap_queue(dev));
5383 + }
5384 +
5385 + /* Add queue/card to list of active queues/cards */
5386 +@@ -809,6 +811,8 @@ static int ap_device_remove(struct device *dev)
5387 + struct ap_device *ap_dev = to_ap_dev(dev);
5388 + struct ap_driver *ap_drv = ap_dev->drv;
5389 +
5390 ++ if (is_queue_dev(dev))
5391 ++ ap_queue_remove(to_ap_queue(dev));
5392 + if (ap_drv->remove)
5393 + ap_drv->remove(ap_dev);
5394 +
5395 +@@ -1446,10 +1450,6 @@ static void ap_scan_bus(struct work_struct *unused)
5396 + aq->ap_dev.device.parent = &ac->ap_dev.device;
5397 + dev_set_name(&aq->ap_dev.device,
5398 + "%02x.%04x", id, dom);
5399 +- /* Start with a device reset */
5400 +- spin_lock_bh(&aq->lock);
5401 +- ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
5402 +- spin_unlock_bh(&aq->lock);
5403 + /* Register device */
5404 + rc = device_register(&aq->ap_dev.device);
5405 + if (rc) {
5406 +diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
5407 +index 5246cd8c16a6..7e85d238767b 100644
5408 +--- a/drivers/s390/crypto/ap_bus.h
5409 ++++ b/drivers/s390/crypto/ap_bus.h
5410 +@@ -253,6 +253,7 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
5411 + void ap_queue_remove(struct ap_queue *aq);
5412 + void ap_queue_suspend(struct ap_device *ap_dev);
5413 + void ap_queue_resume(struct ap_device *ap_dev);
5414 ++void ap_queue_reinit_state(struct ap_queue *aq);
5415 +
5416 + struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type,
5417 + int comp_device_type, unsigned int functions);
5418 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
5419 +index 66f7334bcb03..0aa4b3ccc948 100644
5420 +--- a/drivers/s390/crypto/ap_queue.c
5421 ++++ b/drivers/s390/crypto/ap_queue.c
5422 +@@ -718,5 +718,20 @@ void ap_queue_remove(struct ap_queue *aq)
5423 + {
5424 + ap_flush_queue(aq);
5425 + del_timer_sync(&aq->timeout);
5426 ++
5427 ++ /* reset with zero, also clears irq registration */
5428 ++ spin_lock_bh(&aq->lock);
5429 ++ ap_zapq(aq->qid);
5430 ++ aq->state = AP_STATE_BORKED;
5431 ++ spin_unlock_bh(&aq->lock);
5432 + }
5433 + EXPORT_SYMBOL(ap_queue_remove);
5434 ++
5435 ++void ap_queue_reinit_state(struct ap_queue *aq)
5436 ++{
5437 ++ spin_lock_bh(&aq->lock);
5438 ++ aq->state = AP_STATE_RESET_START;
5439 ++ ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
5440 ++ spin_unlock_bh(&aq->lock);
5441 ++}
5442 ++EXPORT_SYMBOL(ap_queue_reinit_state);
5443 +diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
5444 +index f4ae5fa30ec9..ff17a00273f7 100644
5445 +--- a/drivers/s390/crypto/zcrypt_cex2a.c
5446 ++++ b/drivers/s390/crypto/zcrypt_cex2a.c
5447 +@@ -198,7 +198,6 @@ static void zcrypt_cex2a_queue_remove(struct ap_device *ap_dev)
5448 + struct ap_queue *aq = to_ap_queue(&ap_dev->device);
5449 + struct zcrypt_queue *zq = aq->private;
5450 +
5451 +- ap_queue_remove(aq);
5452 + if (zq)
5453 + zcrypt_queue_unregister(zq);
5454 + }
5455 +diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
5456 +index 35d58dbbc4da..2a42e5962317 100644
5457 +--- a/drivers/s390/crypto/zcrypt_cex4.c
5458 ++++ b/drivers/s390/crypto/zcrypt_cex4.c
5459 +@@ -273,7 +273,6 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
5460 + struct ap_queue *aq = to_ap_queue(&ap_dev->device);
5461 + struct zcrypt_queue *zq = aq->private;
5462 +
5463 +- ap_queue_remove(aq);
5464 + if (zq)
5465 + zcrypt_queue_unregister(zq);
5466 + }
5467 +diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
5468 +index 94d9f7224aea..baa683c3f5d3 100644
5469 +--- a/drivers/s390/crypto/zcrypt_pcixcc.c
5470 ++++ b/drivers/s390/crypto/zcrypt_pcixcc.c
5471 +@@ -276,7 +276,6 @@ static void zcrypt_pcixcc_queue_remove(struct ap_device *ap_dev)
5472 + struct ap_queue *aq = to_ap_queue(&ap_dev->device);
5473 + struct zcrypt_queue *zq = aq->private;
5474 +
5475 +- ap_queue_remove(aq);
5476 + if (zq)
5477 + zcrypt_queue_unregister(zq);
5478 + }
5479 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
5480 +index 3c86e27f094d..aff073a5b52b 100644
5481 +--- a/drivers/s390/scsi/zfcp_fsf.c
5482 ++++ b/drivers/s390/scsi/zfcp_fsf.c
5483 +@@ -1594,6 +1594,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
5484 + {
5485 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
5486 + struct zfcp_fsf_req *req;
5487 ++ unsigned long req_id = 0;
5488 + int retval = -EIO;
5489 +
5490 + spin_lock_irq(&qdio->req_q_lock);
5491 +@@ -1616,6 +1617,8 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
5492 + hton24(req->qtcb->bottom.support.d_id, wka_port->d_id);
5493 + req->data = wka_port;
5494 +
5495 ++ req_id = req->req_id;
5496 ++
5497 + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
5498 + retval = zfcp_fsf_req_send(req);
5499 + if (retval)
5500 +@@ -1623,7 +1626,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
5501 + out:
5502 + spin_unlock_irq(&qdio->req_q_lock);
5503 + if (!retval)
5504 +- zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
5505 ++ zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req_id);
5506 + return retval;
5507 + }
5508 +
5509 +@@ -1649,6 +1652,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
5510 + {
5511 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
5512 + struct zfcp_fsf_req *req;
5513 ++ unsigned long req_id = 0;
5514 + int retval = -EIO;
5515 +
5516 + spin_lock_irq(&qdio->req_q_lock);
5517 +@@ -1671,6 +1675,8 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
5518 + req->data = wka_port;
5519 + req->qtcb->header.port_handle = wka_port->handle;
5520 +
5521 ++ req_id = req->req_id;
5522 ++
5523 + zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
5524 + retval = zfcp_fsf_req_send(req);
5525 + if (retval)
5526 +@@ -1678,7 +1684,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
5527 + out:
5528 + spin_unlock_irq(&qdio->req_q_lock);
5529 + if (!retval)
5530 +- zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
5531 ++ zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req_id);
5532 + return retval;
5533 + }
5534 +
5535 +diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
5536 +index ec54538f7ae1..67efdf25657f 100644
5537 +--- a/drivers/s390/virtio/virtio_ccw.c
5538 ++++ b/drivers/s390/virtio/virtio_ccw.c
5539 +@@ -132,6 +132,7 @@ struct airq_info {
5540 + struct airq_iv *aiv;
5541 + };
5542 + static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
5543 ++static DEFINE_MUTEX(airq_areas_lock);
5544 +
5545 + #define CCW_CMD_SET_VQ 0x13
5546 + #define CCW_CMD_VDEV_RESET 0x33
5547 +@@ -244,9 +245,11 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
5548 + unsigned long bit, flags;
5549 +
5550 + for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
5551 ++ mutex_lock(&airq_areas_lock);
5552 + if (!airq_areas[i])
5553 + airq_areas[i] = new_airq_info();
5554 + info = airq_areas[i];
5555 ++ mutex_unlock(&airq_areas_lock);
5556 + if (!info)
5557 + return 0;
5558 + write_lock_irqsave(&info->lock, flags);
5559 +diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5560 +index 806ceabcabc3..bc37666f998e 100644
5561 +--- a/drivers/scsi/megaraid/megaraid_sas_base.c
5562 ++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5563 +@@ -5218,7 +5218,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5564 + {
5565 + u32 max_sectors_1;
5566 + u32 max_sectors_2, tmp_sectors, msix_enable;
5567 +- u32 scratch_pad_2, scratch_pad_3, scratch_pad_4;
5568 ++ u32 scratch_pad_2, scratch_pad_3, scratch_pad_4, status_reg;
5569 + resource_size_t base_addr;
5570 + struct megasas_register_set __iomem *reg_set;
5571 + struct megasas_ctrl_info *ctrl_info = NULL;
5572 +@@ -5226,6 +5226,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
5573 + int i, j, loop, fw_msix_count = 0;
5574 + struct IOV_111 *iovPtr;
5575 + struct fusion_context *fusion;
5576 ++ bool do_adp_reset = true;
5577 +
5578 + fusion = instance->ctrl_context;
5579 +
5580 +@@ -5274,19 +5275,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
5581 + }
5582 +
5583 + if (megasas_transition_to_ready(instance, 0)) {
5584 +- atomic_set(&instance->fw_reset_no_pci_access, 1);
5585 +- instance->instancet->adp_reset
5586 +- (instance, instance->reg_set);
5587 +- atomic_set(&instance->fw_reset_no_pci_access, 0);
5588 +- dev_info(&instance->pdev->dev,
5589 +- "FW restarted successfully from %s!\n",
5590 +- __func__);
5591 ++ if (instance->adapter_type >= INVADER_SERIES) {
5592 ++ status_reg = instance->instancet->read_fw_status_reg(
5593 ++ instance->reg_set);
5594 ++ do_adp_reset = status_reg & MFI_RESET_ADAPTER;
5595 ++ }
5596 +
5597 +- /*waitting for about 30 second before retry*/
5598 +- ssleep(30);
5599 ++ if (do_adp_reset) {
5600 ++ atomic_set(&instance->fw_reset_no_pci_access, 1);
5601 ++ instance->instancet->adp_reset
5602 ++ (instance, instance->reg_set);
5603 ++ atomic_set(&instance->fw_reset_no_pci_access, 0);
5604 ++ dev_info(&instance->pdev->dev,
5605 ++ "FW restarted successfully from %s!\n",
5606 ++ __func__);
5607 +
5608 +- if (megasas_transition_to_ready(instance, 0))
5609 ++ /*waiting for about 30 second before retry*/
5610 ++ ssleep(30);
5611 ++
5612 ++ if (megasas_transition_to_ready(instance, 0))
5613 ++ goto fail_ready_state;
5614 ++ } else {
5615 + goto fail_ready_state;
5616 ++ }
5617 + }
5618 +
5619 + megasas_init_ctrl_params(instance);
5620 +@@ -5325,12 +5336,29 @@ static int megasas_init_fw(struct megasas_instance *instance)
5621 + instance->msix_vectors = (scratch_pad_2
5622 + & MR_MAX_REPLY_QUEUES_OFFSET) + 1;
5623 + fw_msix_count = instance->msix_vectors;
5624 +- } else { /* Invader series supports more than 8 MSI-x vectors*/
5625 ++ } else {
5626 + instance->msix_vectors = ((scratch_pad_2
5627 + & MR_MAX_REPLY_QUEUES_EXT_OFFSET)
5628 + >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
5629 +- if (instance->msix_vectors > 16)
5630 +- instance->msix_combined = true;
5631 ++
5632 ++ /*
5633 ++ * For Invader series, > 8 MSI-x vectors
5634 ++ * supported by FW/HW implies combined
5635 ++ * reply queue mode is enabled.
5636 ++ * For Ventura series, > 16 MSI-x vectors
5637 ++ * supported by FW/HW implies combined
5638 ++ * reply queue mode is enabled.
5639 ++ */
5640 ++ switch (instance->adapter_type) {
5641 ++ case INVADER_SERIES:
5642 ++ if (instance->msix_vectors > 8)
5643 ++ instance->msix_combined = true;
5644 ++ break;
5645 ++ case VENTURA_SERIES:
5646 ++ if (instance->msix_vectors > 16)
5647 ++ instance->msix_combined = true;
5648 ++ break;
5649 ++ }
5650 +
5651 + if (rdpq_enable)
5652 + instance->is_rdpq = (scratch_pad_2 & MR_RDPQ_MODE_OFFSET) ?
5653 +@@ -6028,13 +6056,13 @@ static int megasas_io_attach(struct megasas_instance *instance)
5654 + * @instance: Adapter soft state
5655 + * Description:
5656 + *
5657 +- * For Ventura, driver/FW will operate in 64bit DMA addresses.
5658 ++ * For Ventura, driver/FW will operate in 63bit DMA addresses.
5659 + *
5660 + * For invader-
5661 + * By default, driver/FW will operate in 32bit DMA addresses
5662 + * for consistent DMA mapping but if 32 bit consistent
5663 +- * DMA mask fails, driver will try with 64 bit consistent
5664 +- * mask provided FW is true 64bit DMA capable
5665 ++ * DMA mask fails, driver will try with 63 bit consistent
5666 ++ * mask provided FW is true 63bit DMA capable
5667 + *
5668 + * For older controllers(Thunderbolt and MFI based adapters)-
5669 + * driver/FW will operate in 32 bit consistent DMA addresses.
5670 +@@ -6047,15 +6075,15 @@ megasas_set_dma_mask(struct megasas_instance *instance)
5671 + u32 scratch_pad_2;
5672 +
5673 + pdev = instance->pdev;
5674 +- consistent_mask = (instance->adapter_type == VENTURA_SERIES) ?
5675 +- DMA_BIT_MASK(64) : DMA_BIT_MASK(32);
5676 ++ consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
5677 ++ DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
5678 +
5679 + if (IS_DMA64) {
5680 +- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
5681 ++ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
5682 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5683 + goto fail_set_dma_mask;
5684 +
5685 +- if ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) &&
5686 ++ if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
5687 + (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
5688 + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
5689 + /*
5690 +@@ -6068,7 +6096,7 @@ megasas_set_dma_mask(struct megasas_instance *instance)
5691 + if (!(scratch_pad_2 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
5692 + goto fail_set_dma_mask;
5693 + else if (dma_set_mask_and_coherent(&pdev->dev,
5694 +- DMA_BIT_MASK(64)))
5695 ++ DMA_BIT_MASK(63)))
5696 + goto fail_set_dma_mask;
5697 + }
5698 + } else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
5699 +@@ -6080,8 +6108,8 @@ megasas_set_dma_mask(struct megasas_instance *instance)
5700 + instance->consistent_mask_64bit = true;
5701 +
5702 + dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
5703 +- ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "64" : "32"),
5704 +- (instance->consistent_mask_64bit ? "64" : "32"));
5705 ++ ((*pdev->dev.dma_mask == DMA_BIT_MASK(64)) ? "63" : "32"),
5706 ++ (instance->consistent_mask_64bit ? "63" : "32"));
5707 +
5708 + return 0;
5709 +
5710 +diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
5711 +index 1f1a05a90d3d..fc08e46a93ca 100644
5712 +--- a/drivers/scsi/qla2xxx/qla_gs.c
5713 ++++ b/drivers/scsi/qla2xxx/qla_gs.c
5714 +@@ -3360,15 +3360,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
5715 + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
5716 + sp->done = qla24xx_async_gpsc_sp_done;
5717 +
5718 +- rval = qla2x00_start_sp(sp);
5719 +- if (rval != QLA_SUCCESS)
5720 +- goto done_free_sp;
5721 +-
5722 + ql_dbg(ql_dbg_disc, vha, 0x205e,
5723 + "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
5724 + sp->name, fcport->port_name, sp->handle,
5725 + fcport->loop_id, fcport->d_id.b.domain,
5726 + fcport->d_id.b.area, fcport->d_id.b.al_pa);
5727 ++
5728 ++ rval = qla2x00_start_sp(sp);
5729 ++ if (rval != QLA_SUCCESS)
5730 ++ goto done_free_sp;
5731 + return rval;
5732 +
5733 + done_free_sp:
5734 +@@ -3729,13 +3729,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
5735 + sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
5736 + sp->done = qla2x00_async_gpnid_sp_done;
5737 +
5738 ++ ql_dbg(ql_dbg_disc, vha, 0x2067,
5739 ++ "Async-%s hdl=%x ID %3phC.\n", sp->name,
5740 ++ sp->handle, ct_req->req.port_id.port_id);
5741 ++
5742 + rval = qla2x00_start_sp(sp);
5743 + if (rval != QLA_SUCCESS)
5744 + goto done_free_sp;
5745 +
5746 +- ql_dbg(ql_dbg_disc, vha, 0x2067,
5747 +- "Async-%s hdl=%x ID %3phC.\n", sp->name,
5748 +- sp->handle, ct_req->req.port_id.port_id);
5749 + return rval;
5750 +
5751 + done_free_sp:
5752 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
5753 +index ddce32fe0513..39a8f4a671aa 100644
5754 +--- a/drivers/scsi/qla2xxx/qla_init.c
5755 ++++ b/drivers/scsi/qla2xxx/qla_init.c
5756 +@@ -247,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
5757 +
5758 + }
5759 +
5760 ++ ql_dbg(ql_dbg_disc, vha, 0x2072,
5761 ++ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
5762 ++ "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
5763 ++ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
5764 ++ fcport->login_retry);
5765 ++
5766 + rval = qla2x00_start_sp(sp);
5767 + if (rval != QLA_SUCCESS) {
5768 + fcport->flags |= FCF_LOGIN_NEEDED;
5769 +@@ -254,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
5770 + goto done_free_sp;
5771 + }
5772 +
5773 +- ql_dbg(ql_dbg_disc, vha, 0x2072,
5774 +- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
5775 +- "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
5776 +- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
5777 +- fcport->login_retry);
5778 + return rval;
5779 +
5780 + done_free_sp:
5781 +@@ -303,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
5782 + qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
5783 +
5784 + sp->done = qla2x00_async_logout_sp_done;
5785 +- rval = qla2x00_start_sp(sp);
5786 +- if (rval != QLA_SUCCESS)
5787 +- goto done_free_sp;
5788 +
5789 + ql_dbg(ql_dbg_disc, vha, 0x2070,
5790 + "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
5791 + sp->handle, fcport->loop_id, fcport->d_id.b.domain,
5792 + fcport->d_id.b.area, fcport->d_id.b.al_pa,
5793 + fcport->port_name);
5794 ++
5795 ++ rval = qla2x00_start_sp(sp);
5796 ++ if (rval != QLA_SUCCESS)
5797 ++ goto done_free_sp;
5798 + return rval;
5799 +
5800 + done_free_sp:
5801 +@@ -489,13 +491,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
5802 + sp->done = qla2x00_async_adisc_sp_done;
5803 + if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
5804 + lio->u.logio.flags |= SRB_LOGIN_RETRIED;
5805 +- rval = qla2x00_start_sp(sp);
5806 +- if (rval != QLA_SUCCESS)
5807 +- goto done_free_sp;
5808 +
5809 + ql_dbg(ql_dbg_disc, vha, 0x206f,
5810 + "Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
5811 + sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
5812 ++
5813 ++ rval = qla2x00_start_sp(sp);
5814 ++ if (rval != QLA_SUCCESS)
5815 ++ goto done_free_sp;
5816 ++
5817 + return rval;
5818 +
5819 + done_free_sp:
5820 +@@ -1161,14 +1165,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
5821 +
5822 + sp->done = qla24xx_async_gpdb_sp_done;
5823 +
5824 +- rval = qla2x00_start_sp(sp);
5825 +- if (rval != QLA_SUCCESS)
5826 +- goto done_free_sp;
5827 +-
5828 + ql_dbg(ql_dbg_disc, vha, 0x20dc,
5829 + "Async-%s %8phC hndl %x opt %x\n",
5830 + sp->name, fcport->port_name, sp->handle, opt);
5831 +
5832 ++ rval = qla2x00_start_sp(sp);
5833 ++ if (rval != QLA_SUCCESS)
5834 ++ goto done_free_sp;
5835 + return rval;
5836 +
5837 + done_free_sp:
5838 +@@ -1698,15 +1701,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
5839 + tm_iocb->u.tmf.data = tag;
5840 + sp->done = qla2x00_tmf_sp_done;
5841 +
5842 +- rval = qla2x00_start_sp(sp);
5843 +- if (rval != QLA_SUCCESS)
5844 +- goto done_free_sp;
5845 +-
5846 + ql_dbg(ql_dbg_taskm, vha, 0x802f,
5847 + "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
5848 + sp->handle, fcport->loop_id, fcport->d_id.b.domain,
5849 + fcport->d_id.b.area, fcport->d_id.b.al_pa);
5850 +
5851 ++ rval = qla2x00_start_sp(sp);
5852 ++ if (rval != QLA_SUCCESS)
5853 ++ goto done_free_sp;
5854 + wait_for_completion(&tm_iocb->u.tmf.comp);
5855 +
5856 + rval = tm_iocb->u.tmf.data;
5857 +@@ -1790,14 +1792,14 @@ qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
5858 +
5859 + sp->done = qla24xx_abort_sp_done;
5860 +
5861 +- rval = qla2x00_start_sp(sp);
5862 +- if (rval != QLA_SUCCESS)
5863 +- goto done_free_sp;
5864 +-
5865 + ql_dbg(ql_dbg_async, vha, 0x507c,
5866 + "Abort command issued - hdl=%x, target_id=%x\n",
5867 + cmd_sp->handle, fcport->tgt_id);
5868 +
5869 ++ rval = qla2x00_start_sp(sp);
5870 ++ if (rval != QLA_SUCCESS)
5871 ++ goto done_free_sp;
5872 ++
5873 + if (wait) {
5874 + wait_for_completion(&abt_iocb->u.abt.comp);
5875 + rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
5876 +diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
5877 +index 088772ebef9b..77838d8fd9bb 100644
5878 +--- a/drivers/spi/spi-gpio.c
5879 ++++ b/drivers/spi/spi-gpio.c
5880 +@@ -410,7 +410,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
5881 + return status;
5882 +
5883 + master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
5884 +- master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL;
5885 ++ master->mode_bits = SPI_3WIRE | SPI_CPHA | SPI_CPOL | SPI_CS_HIGH;
5886 + master->flags = master_flags;
5887 + master->bus_num = pdev->id;
5888 + /* The master needs to think there is a chipselect even if not connected */
5889 +@@ -437,7 +437,6 @@ static int spi_gpio_probe(struct platform_device *pdev)
5890 + spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
5891 + }
5892 + spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
5893 +- spi_gpio->bitbang.flags = SPI_CS_HIGH;
5894 +
5895 + status = spi_bitbang_start(&spi_gpio->bitbang);
5896 + if (status)
5897 +diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
5898 +index 649caae2b603..25798119426b 100644
5899 +--- a/drivers/staging/wilc1000/linux_wlan.c
5900 ++++ b/drivers/staging/wilc1000/linux_wlan.c
5901 +@@ -649,17 +649,17 @@ static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif)
5902 + goto fail_locks;
5903 + }
5904 +
5905 +- if (wl->gpio_irq && init_irq(dev)) {
5906 +- ret = -EIO;
5907 +- goto fail_locks;
5908 +- }
5909 +-
5910 + ret = wlan_initialize_threads(dev);
5911 + if (ret < 0) {
5912 + ret = -EIO;
5913 + goto fail_wilc_wlan;
5914 + }
5915 +
5916 ++ if (wl->gpio_irq && init_irq(dev)) {
5917 ++ ret = -EIO;
5918 ++ goto fail_threads;
5919 ++ }
5920 ++
5921 + if (!wl->dev_irq_num &&
5922 + wl->hif_func->enable_interrupt &&
5923 + wl->hif_func->enable_interrupt(wl)) {
5924 +@@ -715,7 +715,7 @@ fail_irq_enable:
5925 + fail_irq_init:
5926 + if (wl->dev_irq_num)
5927 + deinit_irq(dev);
5928 +-
5929 ++fail_threads:
5930 + wlan_deinitialize_threads(dev);
5931 + fail_wilc_wlan:
5932 + wilc_wlan_cleanup(dev);
5933 +diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
5934 +index ce1321a5cb7b..854b2bcca7c1 100644
5935 +--- a/drivers/target/target_core_iblock.c
5936 ++++ b/drivers/target/target_core_iblock.c
5937 +@@ -514,8 +514,8 @@ iblock_execute_write_same(struct se_cmd *cmd)
5938 + }
5939 +
5940 + /* Always in 512 byte units for Linux/Block */
5941 +- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
5942 +- sectors -= 1;
5943 ++ block_lba += sg->length >> SECTOR_SHIFT;
5944 ++ sectors -= sg->length >> SECTOR_SHIFT;
5945 + }
5946 +
5947 + iblock_submit_bios(&list);
5948 +@@ -757,7 +757,7 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5949 + }
5950 +
5951 + /* Always in 512 byte units for Linux/Block */
5952 +- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
5953 ++ block_lba += sg->length >> SECTOR_SHIFT;
5954 + sg_num--;
5955 + }
5956 +
5957 +diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
5958 +index 9cc3843404d4..cefc641145b3 100644
5959 +--- a/drivers/target/target_core_iblock.h
5960 ++++ b/drivers/target/target_core_iblock.h
5961 +@@ -9,7 +9,6 @@
5962 + #define IBLOCK_VERSION "4.0"
5963 +
5964 + #define IBLOCK_MAX_CDBS 16
5965 +-#define IBLOCK_LBA_SHIFT 9
5966 +
5967 + struct iblock_req {
5968 + refcount_t pending;
5969 +diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
5970 +index fb20aa974ae1..819ae3b2bd7e 100644
5971 +--- a/drivers/usb/typec/tcpm.c
5972 ++++ b/drivers/usb/typec/tcpm.c
5973 +@@ -37,6 +37,7 @@
5974 + S(SRC_ATTACHED), \
5975 + S(SRC_STARTUP), \
5976 + S(SRC_SEND_CAPABILITIES), \
5977 ++ S(SRC_SEND_CAPABILITIES_TIMEOUT), \
5978 + S(SRC_NEGOTIATE_CAPABILITIES), \
5979 + S(SRC_TRANSITION_SUPPLY), \
5980 + S(SRC_READY), \
5981 +@@ -2987,10 +2988,34 @@ static void run_state_machine(struct tcpm_port *port)
5982 + /* port->hard_reset_count = 0; */
5983 + port->caps_count = 0;
5984 + port->pd_capable = true;
5985 +- tcpm_set_state_cond(port, hard_reset_state(port),
5986 ++ tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
5987 + PD_T_SEND_SOURCE_CAP);
5988 + }
5989 + break;
5990 ++ case SRC_SEND_CAPABILITIES_TIMEOUT:
5991 ++ /*
5992 ++ * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
5993 ++ *
5994 ++ * PD 2.0 sinks are supposed to accept src-capabilities with a
5995 ++ * 3.0 header and simply ignore any src PDOs which the sink does
5996 ++ * not understand such as PPS but some 2.0 sinks instead ignore
5997 ++ * the entire PD_DATA_SOURCE_CAP message, causing contract
5998 ++ * negotiation to fail.
5999 ++ *
6000 ++ * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
6001 ++ * sending src-capabilities with a lower PD revision to
6002 ++ * make these broken sinks work.
6003 ++ */
6004 ++ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
6005 ++ tcpm_set_state(port, HARD_RESET_SEND, 0);
6006 ++ } else if (port->negotiated_rev > PD_REV20) {
6007 ++ port->negotiated_rev--;
6008 ++ port->hard_reset_count = 0;
6009 ++ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
6010 ++ } else {
6011 ++ tcpm_set_state(port, hard_reset_state(port), 0);
6012 ++ }
6013 ++ break;
6014 + case SRC_NEGOTIATE_CAPABILITIES:
6015 + ret = tcpm_pd_check_request(port);
6016 + if (ret < 0) {
6017 +diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
6018 +index 40589850eb33..a9be2d8e98df 100644
6019 +--- a/drivers/vhost/test.c
6020 ++++ b/drivers/vhost/test.c
6021 +@@ -23,6 +23,12 @@
6022 + * Using this limit prevents one virtqueue from starving others. */
6023 + #define VHOST_TEST_WEIGHT 0x80000
6024 +
6025 ++/* Max number of packets transferred before requeueing the job.
6026 ++ * Using this limit prevents one virtqueue from starving others with
6027 ++ * pkts.
6028 ++ */
6029 ++#define VHOST_TEST_PKT_WEIGHT 256
6030 ++
6031 + enum {
6032 + VHOST_TEST_VQ = 0,
6033 + VHOST_TEST_VQ_MAX = 1,
6034 +@@ -81,10 +87,8 @@ static void handle_vq(struct vhost_test *n)
6035 + }
6036 + vhost_add_used_and_signal(&n->dev, vq, head, 0);
6037 + total_len += len;
6038 +- if (unlikely(total_len >= VHOST_TEST_WEIGHT)) {
6039 +- vhost_poll_queue(&vq->poll);
6040 ++ if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
6041 + break;
6042 +- }
6043 + }
6044 +
6045 + mutex_unlock(&vq->mutex);
6046 +@@ -116,7 +120,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
6047 + dev = &n->dev;
6048 + vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
6049 + n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
6050 +- vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX);
6051 ++ vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
6052 ++ VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
6053 +
6054 + f->private_data = n;
6055 +
6056 +diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
6057 +index 0752f8dc47b1..98b6eb902df9 100644
6058 +--- a/drivers/vhost/vhost.c
6059 ++++ b/drivers/vhost/vhost.c
6060 +@@ -2073,7 +2073,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
6061 + /* If this is an input descriptor, increment that count. */
6062 + if (access == VHOST_ACCESS_WO) {
6063 + *in_num += ret;
6064 +- if (unlikely(log)) {
6065 ++ if (unlikely(log && ret)) {
6066 + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
6067 + log[*log_num].len = vhost32_to_cpu(vq, desc.len);
6068 + ++*log_num;
6069 +@@ -2216,7 +2216,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
6070 + /* If this is an input descriptor,
6071 + * increment that count. */
6072 + *in_num += ret;
6073 +- if (unlikely(log)) {
6074 ++ if (unlikely(log && ret)) {
6075 + log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
6076 + log[*log_num].len = vhost32_to_cpu(vq, desc.len);
6077 + ++*log_num;
6078 +diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
6079 +index 9bfa66592aa7..c71e534ca7ef 100644
6080 +--- a/fs/btrfs/compression.c
6081 ++++ b/fs/btrfs/compression.c
6082 +@@ -42,6 +42,22 @@ const char* btrfs_compress_type2str(enum btrfs_compression_type type)
6083 + return NULL;
6084 + }
6085 +
6086 ++bool btrfs_compress_is_valid_type(const char *str, size_t len)
6087 ++{
6088 ++ int i;
6089 ++
6090 ++ for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
6091 ++ size_t comp_len = strlen(btrfs_compress_types[i]);
6092 ++
6093 ++ if (len < comp_len)
6094 ++ continue;
6095 ++
6096 ++ if (!strncmp(btrfs_compress_types[i], str, comp_len))
6097 ++ return true;
6098 ++ }
6099 ++ return false;
6100 ++}
6101 ++
6102 + static int btrfs_decompress_bio(struct compressed_bio *cb);
6103 +
6104 + static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
6105 +diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
6106 +index ddda9b80bf20..f97d90a1fa53 100644
6107 +--- a/fs/btrfs/compression.h
6108 ++++ b/fs/btrfs/compression.h
6109 +@@ -127,6 +127,7 @@ extern const struct btrfs_compress_op btrfs_lzo_compress;
6110 + extern const struct btrfs_compress_op btrfs_zstd_compress;
6111 +
6112 + const char* btrfs_compress_type2str(enum btrfs_compression_type type);
6113 ++bool btrfs_compress_is_valid_type(const char *str, size_t len);
6114 +
6115 + int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
6116 +
6117 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
6118 +index 82682da5a40d..4644f9b629a5 100644
6119 +--- a/fs/btrfs/ctree.h
6120 ++++ b/fs/btrfs/ctree.h
6121 +@@ -3200,6 +3200,9 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
6122 + struct btrfs_trans_handle *trans, int mode,
6123 + u64 start, u64 num_bytes, u64 min_size,
6124 + loff_t actual_len, u64 *alloc_hint);
6125 ++int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
6126 ++ u64 start, u64 end, int *page_started, unsigned long *nr_written,
6127 ++ struct writeback_control *wbc);
6128 + extern const struct dentry_operations btrfs_dentry_operations;
6129 + #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
6130 + void btrfs_test_inode_set_ops(struct inode *inode);
6131 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
6132 +index 0cc800d22a08..88c939f7aad9 100644
6133 +--- a/fs/btrfs/extent-tree.c
6134 ++++ b/fs/btrfs/extent-tree.c
6135 +@@ -10478,22 +10478,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6136 + }
6137 + spin_unlock(&block_group->lock);
6138 +
6139 +- if (remove_em) {
6140 +- struct extent_map_tree *em_tree;
6141 +-
6142 +- em_tree = &fs_info->mapping_tree.map_tree;
6143 +- write_lock(&em_tree->lock);
6144 +- /*
6145 +- * The em might be in the pending_chunks list, so make sure the
6146 +- * chunk mutex is locked, since remove_extent_mapping() will
6147 +- * delete us from that list.
6148 +- */
6149 +- remove_extent_mapping(em_tree, em);
6150 +- write_unlock(&em_tree->lock);
6151 +- /* once for the tree */
6152 +- free_extent_map(em);
6153 +- }
6154 +-
6155 + mutex_unlock(&fs_info->chunk_mutex);
6156 +
6157 + ret = remove_block_group_free_space(trans, block_group);
6158 +@@ -10510,6 +10494,24 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6159 + goto out;
6160 +
6161 + ret = btrfs_del_item(trans, root, path);
6162 ++ if (ret)
6163 ++ goto out;
6164 ++
6165 ++ if (remove_em) {
6166 ++ struct extent_map_tree *em_tree;
6167 ++
6168 ++ em_tree = &fs_info->mapping_tree.map_tree;
6169 ++ write_lock(&em_tree->lock);
6170 ++ /*
6171 ++ * The em might be in the pending_chunks list, so make sure the
6172 ++ * chunk mutex is locked, since remove_extent_mapping() will
6173 ++ * delete us from that list.
6174 ++ */
6175 ++ remove_extent_mapping(em_tree, em);
6176 ++ write_unlock(&em_tree->lock);
6177 ++ /* once for the tree */
6178 ++ free_extent_map(em);
6179 ++ }
6180 + out:
6181 + btrfs_free_path(path);
6182 + return ret;
6183 +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
6184 +index 90b0a6eff535..cb598eb4f3bd 100644
6185 +--- a/fs/btrfs/extent_io.c
6186 ++++ b/fs/btrfs/extent_io.c
6187 +@@ -3199,7 +3199,7 @@ static void update_nr_written(struct writeback_control *wbc,
6188 + /*
6189 + * helper for __extent_writepage, doing all of the delayed allocation setup.
6190 + *
6191 +- * This returns 1 if our fill_delalloc function did all the work required
6192 ++ * This returns 1 if btrfs_run_delalloc_range function did all the work required
6193 + * to write the page (copy into inline extent). In this case the IO has
6194 + * been started and the page is already unlocked.
6195 + *
6196 +@@ -3220,7 +3220,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
6197 + int ret;
6198 + int page_started = 0;
6199 +
6200 +- if (epd->extent_locked || !tree->ops || !tree->ops->fill_delalloc)
6201 ++ if (epd->extent_locked)
6202 + return 0;
6203 +
6204 + while (delalloc_end < page_end) {
6205 +@@ -3233,18 +3233,16 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
6206 + delalloc_start = delalloc_end + 1;
6207 + continue;
6208 + }
6209 +- ret = tree->ops->fill_delalloc(inode, page,
6210 +- delalloc_start,
6211 +- delalloc_end,
6212 +- &page_started,
6213 +- nr_written, wbc);
6214 ++ ret = btrfs_run_delalloc_range(inode, page, delalloc_start,
6215 ++ delalloc_end, &page_started, nr_written, wbc);
6216 + /* File system has been set read-only */
6217 + if (ret) {
6218 + SetPageError(page);
6219 +- /* fill_delalloc should be return < 0 for error
6220 +- * but just in case, we use > 0 here meaning the
6221 +- * IO is started, so we don't want to return > 0
6222 +- * unless things are going well.
6223 ++ /*
6224 ++ * btrfs_run_delalloc_range should return < 0 for error
6225 ++ * but just in case, we use > 0 here meaning the IO is
6226 ++ * started, so we don't want to return > 0 unless
6227 ++ * things are going well.
6228 + */
6229 + ret = ret < 0 ? ret : -EIO;
6230 + goto done;
6231 +diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
6232 +index b4d03e677e1d..ed27becd963c 100644
6233 +--- a/fs/btrfs/extent_io.h
6234 ++++ b/fs/btrfs/extent_io.h
6235 +@@ -106,11 +106,6 @@ struct extent_io_ops {
6236 + /*
6237 + * Optional hooks, called if the pointer is not NULL
6238 + */
6239 +- int (*fill_delalloc)(void *private_data, struct page *locked_page,
6240 +- u64 start, u64 end, int *page_started,
6241 +- unsigned long *nr_written,
6242 +- struct writeback_control *wbc);
6243 +-
6244 + int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
6245 + void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
6246 + struct extent_state *state, int uptodate);
6247 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6248 +index 355ff08e9d44..98c535ae038d 100644
6249 +--- a/fs/btrfs/inode.c
6250 ++++ b/fs/btrfs/inode.c
6251 +@@ -110,17 +110,17 @@ static void __endio_write_update_ordered(struct inode *inode,
6252 + * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
6253 + * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
6254 + * to be released, which we want to happen only when finishing the ordered
6255 +- * extent (btrfs_finish_ordered_io()). Also note that the caller of the
6256 +- * fill_delalloc() callback already does proper cleanup for the first page of
6257 +- * the range, that is, it invokes the callback writepage_end_io_hook() for the
6258 +- * range of the first page.
6259 ++ * extent (btrfs_finish_ordered_io()).
6260 + */
6261 + static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
6262 +- const u64 offset,
6263 +- const u64 bytes)
6264 ++ struct page *locked_page,
6265 ++ u64 offset, u64 bytes)
6266 + {
6267 + unsigned long index = offset >> PAGE_SHIFT;
6268 + unsigned long end_index = (offset + bytes - 1) >> PAGE_SHIFT;
6269 ++ u64 page_start = page_offset(locked_page);
6270 ++ u64 page_end = page_start + PAGE_SIZE - 1;
6271 ++
6272 + struct page *page;
6273 +
6274 + while (index <= end_index) {
6275 +@@ -131,8 +131,18 @@ static inline void btrfs_cleanup_ordered_extents(struct inode *inode,
6276 + ClearPagePrivate2(page);
6277 + put_page(page);
6278 + }
6279 +- return __endio_write_update_ordered(inode, offset + PAGE_SIZE,
6280 +- bytes - PAGE_SIZE, false);
6281 ++
6282 ++ /*
6283 ++ * In case this page belongs to the delalloc range being instantiated
6284 ++ * then skip it, since the first page of a range is going to be
6285 ++ * properly cleaned up by the caller of run_delalloc_range
6286 ++ */
6287 ++ if (page_start >= offset && page_end <= (offset + bytes - 1)) {
6288 ++ offset += PAGE_SIZE;
6289 ++ bytes -= PAGE_SIZE;
6290 ++ }
6291 ++
6292 ++ return __endio_write_update_ordered(inode, offset, bytes, false);
6293 + }
6294 +
6295 + static int btrfs_dirty_inode(struct inode *inode);
6296 +@@ -1599,12 +1609,12 @@ static inline int need_force_cow(struct inode *inode, u64 start, u64 end)
6297 + }
6298 +
6299 + /*
6300 +- * extent_io.c call back to do delayed allocation processing
6301 ++ * Function to process delayed allocation (create CoW) for ranges which are
6302 ++ * being touched for the first time.
6303 + */
6304 +-static int run_delalloc_range(void *private_data, struct page *locked_page,
6305 +- u64 start, u64 end, int *page_started,
6306 +- unsigned long *nr_written,
6307 +- struct writeback_control *wbc)
6308 ++int btrfs_run_delalloc_range(void *private_data, struct page *locked_page,
6309 ++ u64 start, u64 end, int *page_started, unsigned long *nr_written,
6310 ++ struct writeback_control *wbc)
6311 + {
6312 + struct inode *inode = private_data;
6313 + int ret;
6314 +@@ -1629,7 +1639,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
6315 + write_flags);
6316 + }
6317 + if (ret)
6318 +- btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
6319 ++ btrfs_cleanup_ordered_extents(inode, locked_page, start,
6320 ++ end - start + 1);
6321 + return ret;
6322 + }
6323 +
6324 +@@ -10598,7 +10609,6 @@ static const struct extent_io_ops btrfs_extent_io_ops = {
6325 + .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
6326 +
6327 + /* optional callbacks */
6328 +- .fill_delalloc = run_delalloc_range,
6329 + .writepage_end_io_hook = btrfs_writepage_end_io_hook,
6330 + .writepage_start_hook = btrfs_writepage_start_hook,
6331 + .set_bit_hook = btrfs_set_bit_hook,
6332 +diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
6333 +index 61d22a56c0ba..6980a0e13f18 100644
6334 +--- a/fs/btrfs/props.c
6335 ++++ b/fs/btrfs/props.c
6336 +@@ -366,11 +366,7 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
6337 +
6338 + static int prop_compression_validate(const char *value, size_t len)
6339 + {
6340 +- if (!strncmp("lzo", value, 3))
6341 +- return 0;
6342 +- else if (!strncmp("zlib", value, 4))
6343 +- return 0;
6344 +- else if (!strncmp("zstd", value, 4))
6345 ++ if (btrfs_compress_is_valid_type(value, len))
6346 + return 0;
6347 +
6348 + return -EINVAL;
6349 +diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
6350 +index 3be1456b5116..916c39770467 100644
6351 +--- a/fs/btrfs/scrub.c
6352 ++++ b/fs/btrfs/scrub.c
6353 +@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
6354 + struct rb_node *parent = NULL;
6355 + struct full_stripe_lock *entry;
6356 + struct full_stripe_lock *ret;
6357 ++ unsigned int nofs_flag;
6358 +
6359 + lockdep_assert_held(&locks_root->lock);
6360 +
6361 +@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
6362 + }
6363 + }
6364 +
6365 +- /* Insert new lock */
6366 ++ /*
6367 ++ * Insert new lock.
6368 ++ *
6369 ++ * We must use GFP_NOFS because the scrub task might be waiting for a
6370 ++ * worker task executing this function and in turn a transaction commit
6371 ++ * might be waiting the scrub task to pause (which needs to wait for all
6372 ++ * the worker tasks to complete before pausing).
6373 ++ */
6374 ++ nofs_flag = memalloc_nofs_save();
6375 + ret = kmalloc(sizeof(*ret), GFP_KERNEL);
6376 ++ memalloc_nofs_restore(nofs_flag);
6377 + if (!ret)
6378 + return ERR_PTR(-ENOMEM);
6379 + ret->logical = fstripe_logical;
6380 +@@ -568,12 +578,11 @@ static void scrub_put_ctx(struct scrub_ctx *sctx)
6381 + scrub_free_ctx(sctx);
6382 + }
6383 +
6384 +-static noinline_for_stack
6385 +-struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
6386 ++static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(
6387 ++ struct btrfs_fs_info *fs_info, int is_dev_replace)
6388 + {
6389 + struct scrub_ctx *sctx;
6390 + int i;
6391 +- struct btrfs_fs_info *fs_info = dev->fs_info;
6392 +
6393 + sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
6394 + if (!sctx)
6395 +@@ -582,7 +591,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
6396 + sctx->is_dev_replace = is_dev_replace;
6397 + sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
6398 + sctx->curr = -1;
6399 +- sctx->fs_info = dev->fs_info;
6400 ++ sctx->fs_info = fs_info;
6401 ++ INIT_LIST_HEAD(&sctx->csum_list);
6402 + for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
6403 + struct scrub_bio *sbio;
6404 +
6405 +@@ -607,7 +617,6 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
6406 + atomic_set(&sctx->workers_pending, 0);
6407 + atomic_set(&sctx->cancel_req, 0);
6408 + sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
6409 +- INIT_LIST_HEAD(&sctx->csum_list);
6410 +
6411 + spin_lock_init(&sctx->list_lock);
6412 + spin_lock_init(&sctx->stat_lock);
6413 +@@ -1622,8 +1631,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
6414 + mutex_lock(&sctx->wr_lock);
6415 + again:
6416 + if (!sctx->wr_curr_bio) {
6417 ++ unsigned int nofs_flag;
6418 ++
6419 ++ /*
6420 ++ * We must use GFP_NOFS because the scrub task might be waiting
6421 ++ * for a worker task executing this function and in turn a
6422 ++ * transaction commit might be waiting the scrub task to pause
6423 ++ * (which needs to wait for all the worker tasks to complete
6424 ++ * before pausing).
6425 ++ */
6426 ++ nofs_flag = memalloc_nofs_save();
6427 + sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
6428 + GFP_KERNEL);
6429 ++ memalloc_nofs_restore(nofs_flag);
6430 + if (!sctx->wr_curr_bio) {
6431 + mutex_unlock(&sctx->wr_lock);
6432 + return -ENOMEM;
6433 +@@ -3022,8 +3042,7 @@ out:
6434 + static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
6435 + struct map_lookup *map,
6436 + struct btrfs_device *scrub_dev,
6437 +- int num, u64 base, u64 length,
6438 +- int is_dev_replace)
6439 ++ int num, u64 base, u64 length)
6440 + {
6441 + struct btrfs_path *path, *ppath;
6442 + struct btrfs_fs_info *fs_info = sctx->fs_info;
6443 +@@ -3299,7 +3318,7 @@ again:
6444 + extent_physical = extent_logical - logical + physical;
6445 + extent_dev = scrub_dev;
6446 + extent_mirror_num = mirror_num;
6447 +- if (is_dev_replace)
6448 ++ if (sctx->is_dev_replace)
6449 + scrub_remap_extent(fs_info, extent_logical,
6450 + extent_len, &extent_physical,
6451 + &extent_dev,
6452 +@@ -3397,8 +3416,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
6453 + struct btrfs_device *scrub_dev,
6454 + u64 chunk_offset, u64 length,
6455 + u64 dev_offset,
6456 +- struct btrfs_block_group_cache *cache,
6457 +- int is_dev_replace)
6458 ++ struct btrfs_block_group_cache *cache)
6459 + {
6460 + struct btrfs_fs_info *fs_info = sctx->fs_info;
6461 + struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
6462 +@@ -3435,8 +3453,7 @@ static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
6463 + if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
6464 + map->stripes[i].physical == dev_offset) {
6465 + ret = scrub_stripe(sctx, map, scrub_dev, i,
6466 +- chunk_offset, length,
6467 +- is_dev_replace);
6468 ++ chunk_offset, length);
6469 + if (ret)
6470 + goto out;
6471 + }
6472 +@@ -3449,8 +3466,7 @@ out:
6473 +
6474 + static noinline_for_stack
6475 + int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6476 +- struct btrfs_device *scrub_dev, u64 start, u64 end,
6477 +- int is_dev_replace)
6478 ++ struct btrfs_device *scrub_dev, u64 start, u64 end)
6479 + {
6480 + struct btrfs_dev_extent *dev_extent = NULL;
6481 + struct btrfs_path *path;
6482 +@@ -3544,7 +3560,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6483 + */
6484 + scrub_pause_on(fs_info);
6485 + ret = btrfs_inc_block_group_ro(cache);
6486 +- if (!ret && is_dev_replace) {
6487 ++ if (!ret && sctx->is_dev_replace) {
6488 + /*
6489 + * If we are doing a device replace wait for any tasks
6490 + * that started dellaloc right before we set the block
6491 +@@ -3609,7 +3625,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6492 + dev_replace->item_needs_writeback = 1;
6493 + btrfs_dev_replace_write_unlock(&fs_info->dev_replace);
6494 + ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
6495 +- found_key.offset, cache, is_dev_replace);
6496 ++ found_key.offset, cache);
6497 +
6498 + /*
6499 + * flush, submit all pending read and write bios, afterwards
6500 +@@ -3670,7 +3686,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
6501 + btrfs_put_block_group(cache);
6502 + if (ret)
6503 + break;
6504 +- if (is_dev_replace &&
6505 ++ if (sctx->is_dev_replace &&
6506 + atomic64_read(&dev_replace->num_write_errors) > 0) {
6507 + ret = -EIO;
6508 + break;
6509 +@@ -3762,16 +3778,6 @@ fail_scrub_workers:
6510 + return -ENOMEM;
6511 + }
6512 +
6513 +-static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
6514 +-{
6515 +- if (--fs_info->scrub_workers_refcnt == 0) {
6516 +- btrfs_destroy_workqueue(fs_info->scrub_workers);
6517 +- btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
6518 +- btrfs_destroy_workqueue(fs_info->scrub_parity_workers);
6519 +- }
6520 +- WARN_ON(fs_info->scrub_workers_refcnt < 0);
6521 +-}
6522 +-
6523 + int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6524 + u64 end, struct btrfs_scrub_progress *progress,
6525 + int readonly, int is_dev_replace)
6526 +@@ -3779,6 +3785,10 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6527 + struct scrub_ctx *sctx;
6528 + int ret;
6529 + struct btrfs_device *dev;
6530 ++ unsigned int nofs_flag;
6531 ++ struct btrfs_workqueue *scrub_workers = NULL;
6532 ++ struct btrfs_workqueue *scrub_wr_comp = NULL;
6533 ++ struct btrfs_workqueue *scrub_parity = NULL;
6534 +
6535 + if (btrfs_fs_closing(fs_info))
6536 + return -EINVAL;
6537 +@@ -3820,13 +3830,18 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6538 + return -EINVAL;
6539 + }
6540 +
6541 ++ /* Allocate outside of device_list_mutex */
6542 ++ sctx = scrub_setup_ctx(fs_info, is_dev_replace);
6543 ++ if (IS_ERR(sctx))
6544 ++ return PTR_ERR(sctx);
6545 +
6546 + mutex_lock(&fs_info->fs_devices->device_list_mutex);
6547 + dev = btrfs_find_device(fs_info, devid, NULL, NULL);
6548 + if (!dev || (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) &&
6549 + !is_dev_replace)) {
6550 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6551 +- return -ENODEV;
6552 ++ ret = -ENODEV;
6553 ++ goto out_free_ctx;
6554 + }
6555 +
6556 + if (!is_dev_replace && !readonly &&
6557 +@@ -3834,7 +3849,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6558 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6559 + btrfs_err_in_rcu(fs_info, "scrub: device %s is not writable",
6560 + rcu_str_deref(dev->name));
6561 +- return -EROFS;
6562 ++ ret = -EROFS;
6563 ++ goto out_free_ctx;
6564 + }
6565 +
6566 + mutex_lock(&fs_info->scrub_lock);
6567 +@@ -3842,7 +3858,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6568 + test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &dev->dev_state)) {
6569 + mutex_unlock(&fs_info->scrub_lock);
6570 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6571 +- return -EIO;
6572 ++ ret = -EIO;
6573 ++ goto out_free_ctx;
6574 + }
6575 +
6576 + btrfs_dev_replace_read_lock(&fs_info->dev_replace);
6577 +@@ -3852,7 +3869,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6578 + btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
6579 + mutex_unlock(&fs_info->scrub_lock);
6580 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6581 +- return -EINPROGRESS;
6582 ++ ret = -EINPROGRESS;
6583 ++ goto out_free_ctx;
6584 + }
6585 + btrfs_dev_replace_read_unlock(&fs_info->dev_replace);
6586 +
6587 +@@ -3860,16 +3878,9 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6588 + if (ret) {
6589 + mutex_unlock(&fs_info->scrub_lock);
6590 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6591 +- return ret;
6592 ++ goto out_free_ctx;
6593 + }
6594 +
6595 +- sctx = scrub_setup_ctx(dev, is_dev_replace);
6596 +- if (IS_ERR(sctx)) {
6597 +- mutex_unlock(&fs_info->scrub_lock);
6598 +- mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6599 +- scrub_workers_put(fs_info);
6600 +- return PTR_ERR(sctx);
6601 +- }
6602 + sctx->readonly = readonly;
6603 + dev->scrub_ctx = sctx;
6604 + mutex_unlock(&fs_info->fs_devices->device_list_mutex);
6605 +@@ -3882,6 +3893,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6606 + atomic_inc(&fs_info->scrubs_running);
6607 + mutex_unlock(&fs_info->scrub_lock);
6608 +
6609 ++ /*
6610 ++ * In order to avoid deadlock with reclaim when there is a transaction
6611 ++ * trying to pause scrub, make sure we use GFP_NOFS for all the
6612 ++ * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
6613 ++ * invoked by our callees. The pausing request is done when the
6614 ++ * transaction commit starts, and it blocks the transaction until scrub
6615 ++ * is paused (done at specific points at scrub_stripe() or right above
6616 ++ * before incrementing fs_info->scrubs_running).
6617 ++ */
6618 ++ nofs_flag = memalloc_nofs_save();
6619 + if (!is_dev_replace) {
6620 + /*
6621 + * by holding device list mutex, we can
6622 +@@ -3893,8 +3914,8 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6623 + }
6624 +
6625 + if (!ret)
6626 +- ret = scrub_enumerate_chunks(sctx, dev, start, end,
6627 +- is_dev_replace);
6628 ++ ret = scrub_enumerate_chunks(sctx, dev, start, end);
6629 ++ memalloc_nofs_restore(nofs_flag);
6630 +
6631 + wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
6632 + atomic_dec(&fs_info->scrubs_running);
6633 +@@ -3907,11 +3928,23 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
6634 +
6635 + mutex_lock(&fs_info->scrub_lock);
6636 + dev->scrub_ctx = NULL;
6637 +- scrub_workers_put(fs_info);
6638 ++ if (--fs_info->scrub_workers_refcnt == 0) {
6639 ++ scrub_workers = fs_info->scrub_workers;
6640 ++ scrub_wr_comp = fs_info->scrub_wr_completion_workers;
6641 ++ scrub_parity = fs_info->scrub_parity_workers;
6642 ++ }
6643 + mutex_unlock(&fs_info->scrub_lock);
6644 +
6645 ++ btrfs_destroy_workqueue(scrub_workers);
6646 ++ btrfs_destroy_workqueue(scrub_wr_comp);
6647 ++ btrfs_destroy_workqueue(scrub_parity);
6648 + scrub_put_ctx(sctx);
6649 +
6650 ++ return ret;
6651 ++
6652 ++out_free_ctx:
6653 ++ scrub_free_ctx(sctx);
6654 ++
6655 + return ret;
6656 + }
6657 +
6658 +diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
6659 +index 6e008bd5c8cd..a8297e7489d9 100644
6660 +--- a/fs/btrfs/volumes.c
6661 ++++ b/fs/btrfs/volumes.c
6662 +@@ -7411,6 +7411,7 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
6663 + struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
6664 + struct extent_map *em;
6665 + struct map_lookup *map;
6666 ++ struct btrfs_device *dev;
6667 + u64 stripe_len;
6668 + bool found = false;
6669 + int ret = 0;
6670 +@@ -7460,6 +7461,34 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
6671 + physical_offset, devid);
6672 + ret = -EUCLEAN;
6673 + }
6674 ++
6675 ++ /* Make sure no dev extent is beyond device bondary */
6676 ++ dev = btrfs_find_device(fs_info, devid, NULL, NULL);
6677 ++ if (!dev) {
6678 ++ btrfs_err(fs_info, "failed to find devid %llu", devid);
6679 ++ ret = -EUCLEAN;
6680 ++ goto out;
6681 ++ }
6682 ++
6683 ++ /* It's possible this device is a dummy for seed device */
6684 ++ if (dev->disk_total_bytes == 0) {
6685 ++ dev = find_device(fs_info->fs_devices->seed, devid, NULL);
6686 ++ if (!dev) {
6687 ++ btrfs_err(fs_info, "failed to find seed devid %llu",
6688 ++ devid);
6689 ++ ret = -EUCLEAN;
6690 ++ goto out;
6691 ++ }
6692 ++ }
6693 ++
6694 ++ if (physical_offset + physical_len > dev->disk_total_bytes) {
6695 ++ btrfs_err(fs_info,
6696 ++"dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
6697 ++ devid, physical_offset, physical_len,
6698 ++ dev->disk_total_bytes);
6699 ++ ret = -EUCLEAN;
6700 ++ goto out;
6701 ++ }
6702 + out:
6703 + free_extent_map(em);
6704 + return ret;
6705 +diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
6706 +index 11f19432a74c..665a86f83f4b 100644
6707 +--- a/fs/ceph/inode.c
6708 ++++ b/fs/ceph/inode.c
6709 +@@ -528,13 +528,16 @@ static void ceph_i_callback(struct rcu_head *head)
6710 + kmem_cache_free(ceph_inode_cachep, ci);
6711 + }
6712 +
6713 +-void ceph_destroy_inode(struct inode *inode)
6714 ++void ceph_evict_inode(struct inode *inode)
6715 + {
6716 + struct ceph_inode_info *ci = ceph_inode(inode);
6717 + struct ceph_inode_frag *frag;
6718 + struct rb_node *n;
6719 +
6720 +- dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
6721 ++ dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
6722 ++
6723 ++ truncate_inode_pages_final(&inode->i_data);
6724 ++ clear_inode(inode);
6725 +
6726 + ceph_fscache_unregister_inode_cookie(ci);
6727 +
6728 +diff --git a/fs/ceph/super.c b/fs/ceph/super.c
6729 +index c5cf46e43f2e..02528e11bf33 100644
6730 +--- a/fs/ceph/super.c
6731 ++++ b/fs/ceph/super.c
6732 +@@ -827,9 +827,9 @@ static int ceph_remount(struct super_block *sb, int *flags, char *data)
6733 +
6734 + static const struct super_operations ceph_super_ops = {
6735 + .alloc_inode = ceph_alloc_inode,
6736 +- .destroy_inode = ceph_destroy_inode,
6737 + .write_inode = ceph_write_inode,
6738 + .drop_inode = ceph_drop_inode,
6739 ++ .evict_inode = ceph_evict_inode,
6740 + .sync_fs = ceph_sync_fs,
6741 + .put_super = ceph_put_super,
6742 + .remount_fs = ceph_remount,
6743 +diff --git a/fs/ceph/super.h b/fs/ceph/super.h
6744 +index 018019309790..6e968e48e5e4 100644
6745 +--- a/fs/ceph/super.h
6746 ++++ b/fs/ceph/super.h
6747 +@@ -854,7 +854,7 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
6748 + extern const struct inode_operations ceph_file_iops;
6749 +
6750 + extern struct inode *ceph_alloc_inode(struct super_block *sb);
6751 +-extern void ceph_destroy_inode(struct inode *inode);
6752 ++extern void ceph_evict_inode(struct inode *inode);
6753 + extern int ceph_drop_inode(struct inode *inode);
6754 +
6755 + extern struct inode *ceph_get_inode(struct super_block *sb,
6756 +diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
6757 +index 9731d0d891e7..aba2b48d4da1 100644
6758 +--- a/fs/cifs/cifs_fs_sb.h
6759 ++++ b/fs/cifs/cifs_fs_sb.h
6760 +@@ -72,5 +72,10 @@ struct cifs_sb_info {
6761 + struct delayed_work prune_tlinks;
6762 + struct rcu_head rcu;
6763 + char *prepath;
6764 ++ /*
6765 ++ * Indicate whether serverino option was turned off later
6766 ++ * (cifs_autodisable_serverino) in order to match new mounts.
6767 ++ */
6768 ++ bool mnt_cifs_serverino_autodisabled;
6769 + };
6770 + #endif /* _CIFS_FS_SB_H */
6771 +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
6772 +index fb32f3d6925e..64e3888f30e6 100644
6773 +--- a/fs/cifs/cifsfs.c
6774 ++++ b/fs/cifs/cifsfs.c
6775 +@@ -292,6 +292,7 @@ cifs_alloc_inode(struct super_block *sb)
6776 + cifs_inode->uniqueid = 0;
6777 + cifs_inode->createtime = 0;
6778 + cifs_inode->epoch = 0;
6779 ++ spin_lock_init(&cifs_inode->open_file_lock);
6780 + generate_random_uuid(cifs_inode->lease_key);
6781 +
6782 + /*
6783 +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
6784 +index 6f227cc781e5..57af9bac0045 100644
6785 +--- a/fs/cifs/cifsglob.h
6786 ++++ b/fs/cifs/cifsglob.h
6787 +@@ -1287,6 +1287,7 @@ struct cifsInodeInfo {
6788 + struct rw_semaphore lock_sem; /* protect the fields above */
6789 + /* BB add in lists for dirty pages i.e. write caching info for oplock */
6790 + struct list_head openFileList;
6791 ++ spinlock_t open_file_lock; /* protects openFileList */
6792 + __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
6793 + unsigned int oplock; /* oplock/lease level we have */
6794 + unsigned int epoch; /* used to track lease state changes */
6795 +@@ -1563,6 +1564,25 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param,
6796 + kfree(param);
6797 + }
6798 +
6799 ++static inline bool is_interrupt_error(int error)
6800 ++{
6801 ++ switch (error) {
6802 ++ case -EINTR:
6803 ++ case -ERESTARTSYS:
6804 ++ case -ERESTARTNOHAND:
6805 ++ case -ERESTARTNOINTR:
6806 ++ return true;
6807 ++ }
6808 ++ return false;
6809 ++}
6810 ++
6811 ++static inline bool is_retryable_error(int error)
6812 ++{
6813 ++ if (is_interrupt_error(error) || error == -EAGAIN)
6814 ++ return true;
6815 ++ return false;
6816 ++}
6817 ++
6818 + #define MID_FREE 0
6819 + #define MID_REQUEST_ALLOCATED 1
6820 + #define MID_REQUEST_SUBMITTED 2
6821 +@@ -1668,10 +1688,14 @@ require use of the stronger protocol */
6822 + * tcp_ses_lock protects:
6823 + * list operations on tcp and SMB session lists
6824 + * tcon->open_file_lock protects the list of open files hanging off the tcon
6825 ++ * inode->open_file_lock protects the openFileList hanging off the inode
6826 + * cfile->file_info_lock protects counters and fields in cifs file struct
6827 + * f_owner.lock protects certain per file struct operations
6828 + * mapping->page_lock protects certain per page operations
6829 + *
6830 ++ * Note that the cifs_tcon.open_file_lock should be taken before
6831 ++ * not after the cifsInodeInfo.open_file_lock
6832 ++ *
6833 + * Semaphores
6834 + * ----------
6835 + * sesSem operations on smb session
6836 +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
6837 +index 269471c8f42b..86a54b809c48 100644
6838 +--- a/fs/cifs/cifssmb.c
6839 ++++ b/fs/cifs/cifssmb.c
6840 +@@ -2033,16 +2033,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
6841 +
6842 + wdata2->cfile = find_writable_file(CIFS_I(inode), false);
6843 + if (!wdata2->cfile) {
6844 +- cifs_dbg(VFS, "No writable handles for inode\n");
6845 ++ cifs_dbg(VFS, "No writable handle to retry writepages\n");
6846 + rc = -EBADF;
6847 +- break;
6848 ++ } else {
6849 ++ wdata2->pid = wdata2->cfile->pid;
6850 ++ rc = server->ops->async_writev(wdata2,
6851 ++ cifs_writedata_release);
6852 + }
6853 +- wdata2->pid = wdata2->cfile->pid;
6854 +- rc = server->ops->async_writev(wdata2, cifs_writedata_release);
6855 +
6856 + for (j = 0; j < nr_pages; j++) {
6857 + unlock_page(wdata2->pages[j]);
6858 +- if (rc != 0 && rc != -EAGAIN) {
6859 ++ if (rc != 0 && !is_retryable_error(rc)) {
6860 + SetPageError(wdata2->pages[j]);
6861 + end_page_writeback(wdata2->pages[j]);
6862 + put_page(wdata2->pages[j]);
6863 +@@ -2051,8 +2052,9 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
6864 +
6865 + if (rc) {
6866 + kref_put(&wdata2->refcount, cifs_writedata_release);
6867 +- if (rc == -EAGAIN)
6868 ++ if (is_retryable_error(rc))
6869 + continue;
6870 ++ i += nr_pages;
6871 + break;
6872 + }
6873 +
6874 +@@ -2060,7 +2062,15 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
6875 + i += nr_pages;
6876 + } while (i < wdata->nr_pages);
6877 +
6878 +- mapping_set_error(inode->i_mapping, rc);
6879 ++ /* cleanup remaining pages from the original wdata */
6880 ++ for (; i < wdata->nr_pages; i++) {
6881 ++ SetPageError(wdata->pages[i]);
6882 ++ end_page_writeback(wdata->pages[i]);
6883 ++ put_page(wdata->pages[i]);
6884 ++ }
6885 ++
6886 ++ if (rc != 0 && !is_retryable_error(rc))
6887 ++ mapping_set_error(inode->i_mapping, rc);
6888 + kref_put(&wdata->refcount, cifs_writedata_release);
6889 + }
6890 +
6891 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
6892 +index c53a2e86ed54..208430bb66fc 100644
6893 +--- a/fs/cifs/connect.c
6894 ++++ b/fs/cifs/connect.c
6895 +@@ -3247,12 +3247,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
6896 + {
6897 + struct cifs_sb_info *old = CIFS_SB(sb);
6898 + struct cifs_sb_info *new = mnt_data->cifs_sb;
6899 ++ unsigned int oldflags = old->mnt_cifs_flags & CIFS_MOUNT_MASK;
6900 ++ unsigned int newflags = new->mnt_cifs_flags & CIFS_MOUNT_MASK;
6901 +
6902 + if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK))
6903 + return 0;
6904 +
6905 +- if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) !=
6906 +- (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
6907 ++ if (old->mnt_cifs_serverino_autodisabled)
6908 ++ newflags &= ~CIFS_MOUNT_SERVER_INUM;
6909 ++
6910 ++ if (oldflags != newflags)
6911 + return 0;
6912 +
6913 + /*
6914 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
6915 +index 23cee91ed442..8703b5f26f45 100644
6916 +--- a/fs/cifs/file.c
6917 ++++ b/fs/cifs/file.c
6918 +@@ -336,10 +336,12 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
6919 + list_add(&cfile->tlist, &tcon->openFileList);
6920 +
6921 + /* if readable file instance put first in list*/
6922 ++ spin_lock(&cinode->open_file_lock);
6923 + if (file->f_mode & FMODE_READ)
6924 + list_add(&cfile->flist, &cinode->openFileList);
6925 + else
6926 + list_add_tail(&cfile->flist, &cinode->openFileList);
6927 ++ spin_unlock(&cinode->open_file_lock);
6928 + spin_unlock(&tcon->open_file_lock);
6929 +
6930 + if (fid->purge_cache)
6931 +@@ -411,7 +413,9 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
6932 + cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
6933 +
6934 + /* remove it from the lists */
6935 ++ spin_lock(&cifsi->open_file_lock);
6936 + list_del(&cifs_file->flist);
6937 ++ spin_unlock(&cifsi->open_file_lock);
6938 + list_del(&cifs_file->tlist);
6939 +
6940 + if (list_empty(&cifsi->openFileList)) {
6941 +@@ -749,7 +753,8 @@ reopen_success:
6942 +
6943 + if (can_flush) {
6944 + rc = filemap_write_and_wait(inode->i_mapping);
6945 +- mapping_set_error(inode->i_mapping, rc);
6946 ++ if (!is_interrupt_error(rc))
6947 ++ mapping_set_error(inode->i_mapping, rc);
6948 +
6949 + if (tcon->unix_ext)
6950 + rc = cifs_get_inode_info_unix(&inode, full_path,
6951 +@@ -1928,10 +1933,10 @@ refind_writable:
6952 + if (!rc)
6953 + return inv_file;
6954 + else {
6955 +- spin_lock(&tcon->open_file_lock);
6956 ++ spin_lock(&cifs_inode->open_file_lock);
6957 + list_move_tail(&inv_file->flist,
6958 + &cifs_inode->openFileList);
6959 +- spin_unlock(&tcon->open_file_lock);
6960 ++ spin_unlock(&cifs_inode->open_file_lock);
6961 + cifsFileInfo_put(inv_file);
6962 + ++refind;
6963 + inv_file = NULL;
6964 +@@ -2137,6 +2142,7 @@ static int cifs_writepages(struct address_space *mapping,
6965 + pgoff_t end, index;
6966 + struct cifs_writedata *wdata;
6967 + int rc = 0;
6968 ++ int saved_rc = 0;
6969 +
6970 + /*
6971 + * If wsize is smaller than the page cache size, default to writing
6972 +@@ -2163,8 +2169,10 @@ retry:
6973 +
6974 + rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
6975 + &wsize, &credits);
6976 +- if (rc)
6977 ++ if (rc != 0) {
6978 ++ done = true;
6979 + break;
6980 ++ }
6981 +
6982 + tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
6983 +
6984 +@@ -2172,6 +2180,7 @@ retry:
6985 + &found_pages);
6986 + if (!wdata) {
6987 + rc = -ENOMEM;
6988 ++ done = true;
6989 + add_credits_and_wake_if(server, credits, 0);
6990 + break;
6991 + }
6992 +@@ -2200,7 +2209,7 @@ retry:
6993 + if (rc != 0) {
6994 + add_credits_and_wake_if(server, wdata->credits, 0);
6995 + for (i = 0; i < nr_pages; ++i) {
6996 +- if (rc == -EAGAIN)
6997 ++ if (is_retryable_error(rc))
6998 + redirty_page_for_writepage(wbc,
6999 + wdata->pages[i]);
7000 + else
7001 +@@ -2208,7 +2217,7 @@ retry:
7002 + end_page_writeback(wdata->pages[i]);
7003 + put_page(wdata->pages[i]);
7004 + }
7005 +- if (rc != -EAGAIN)
7006 ++ if (!is_retryable_error(rc))
7007 + mapping_set_error(mapping, rc);
7008 + }
7009 + kref_put(&wdata->refcount, cifs_writedata_release);
7010 +@@ -2218,6 +2227,15 @@ retry:
7011 + continue;
7012 + }
7013 +
7014 ++ /* Return immediately if we received a signal during writing */
7015 ++ if (is_interrupt_error(rc)) {
7016 ++ done = true;
7017 ++ break;
7018 ++ }
7019 ++
7020 ++ if (rc != 0 && saved_rc == 0)
7021 ++ saved_rc = rc;
7022 ++
7023 + wbc->nr_to_write -= nr_pages;
7024 + if (wbc->nr_to_write <= 0)
7025 + done = true;
7026 +@@ -2235,6 +2253,9 @@ retry:
7027 + goto retry;
7028 + }
7029 +
7030 ++ if (saved_rc != 0)
7031 ++ rc = saved_rc;
7032 ++
7033 + if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
7034 + mapping->writeback_index = index;
7035 +
7036 +@@ -2266,8 +2287,8 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
7037 + set_page_writeback(page);
7038 + retry_write:
7039 + rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
7040 +- if (rc == -EAGAIN) {
7041 +- if (wbc->sync_mode == WB_SYNC_ALL)
7042 ++ if (is_retryable_error(rc)) {
7043 ++ if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN)
7044 + goto retry_write;
7045 + redirty_page_for_writepage(wbc, page);
7046 + } else if (rc != 0) {
7047 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
7048 +index 1fadd314ae7f..53f3d08898af 100644
7049 +--- a/fs/cifs/inode.c
7050 ++++ b/fs/cifs/inode.c
7051 +@@ -2261,6 +2261,11 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
7052 + * the flush returns error?
7053 + */
7054 + rc = filemap_write_and_wait(inode->i_mapping);
7055 ++ if (is_interrupt_error(rc)) {
7056 ++ rc = -ERESTARTSYS;
7057 ++ goto out;
7058 ++ }
7059 ++
7060 + mapping_set_error(inode->i_mapping, rc);
7061 + rc = 0;
7062 +
7063 +@@ -2404,6 +2409,11 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
7064 + * the flush returns error?
7065 + */
7066 + rc = filemap_write_and_wait(inode->i_mapping);
7067 ++ if (is_interrupt_error(rc)) {
7068 ++ rc = -ERESTARTSYS;
7069 ++ goto cifs_setattr_exit;
7070 ++ }
7071 ++
7072 + mapping_set_error(inode->i_mapping, rc);
7073 + rc = 0;
7074 +
7075 +diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
7076 +index facc94e159a1..e45f8e321371 100644
7077 +--- a/fs/cifs/misc.c
7078 ++++ b/fs/cifs/misc.c
7079 +@@ -523,6 +523,7 @@ cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
7080 + {
7081 + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
7082 + cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
7083 ++ cifs_sb->mnt_cifs_serverino_autodisabled = true;
7084 + cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s. This server doesn't seem to support them properly. Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n",
7085 + cifs_sb_master_tcon(cifs_sb)->treeName);
7086 + }
7087 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
7088 +index 2bc47eb6215e..cbe633f1840a 100644
7089 +--- a/fs/cifs/smb2pdu.c
7090 ++++ b/fs/cifs/smb2pdu.c
7091 +@@ -712,6 +712,7 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
7092 + } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
7093 + /* ops set to 3.0 by default for default so update */
7094 + ses->server->ops = &smb21_operations;
7095 ++ ses->server->vals = &smb21_values;
7096 + }
7097 + } else if (le16_to_cpu(rsp->DialectRevision) !=
7098 + ses->server->vals->protocol_id) {
7099 +diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
7100 +index 5fdb9a509a97..1959931e14c1 100644
7101 +--- a/fs/cifs/smbdirect.c
7102 ++++ b/fs/cifs/smbdirect.c
7103 +@@ -2090,7 +2090,8 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
7104 + * rqst: the data to write
7105 + * return value: 0 if successfully write, otherwise error code
7106 + */
7107 +-int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7108 ++int smbd_send(struct TCP_Server_Info *server,
7109 ++ int num_rqst, struct smb_rqst *rqst_array)
7110 + {
7111 + struct smbd_connection *info = server->smbd_conn;
7112 + struct kvec vec;
7113 +@@ -2102,6 +2103,8 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7114 + info->max_send_size - sizeof(struct smbd_data_transfer);
7115 + struct kvec *iov;
7116 + int rc;
7117 ++ struct smb_rqst *rqst;
7118 ++ int rqst_idx;
7119 +
7120 + info->smbd_send_pending++;
7121 + if (info->transport_status != SMBD_CONNECTED) {
7122 +@@ -2109,47 +2112,41 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7123 + goto done;
7124 + }
7125 +
7126 +- /*
7127 +- * Skip the RFC1002 length defined in MS-SMB2 section 2.1
7128 +- * It is used only for TCP transport in the iov[0]
7129 +- * In future we may want to add a transport layer under protocol
7130 +- * layer so this will only be issued to TCP transport
7131 +- */
7132 +-
7133 +- if (rqst->rq_iov[0].iov_len != 4) {
7134 +- log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
7135 +- return -EINVAL;
7136 +- }
7137 +-
7138 + /*
7139 + * Add in the page array if there is one. The caller needs to set
7140 + * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
7141 + * ends at page boundary
7142 + */
7143 +- buflen = smb_rqst_len(server, rqst);
7144 ++ remaining_data_length = 0;
7145 ++ for (i = 0; i < num_rqst; i++)
7146 ++ remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
7147 +
7148 +- if (buflen + sizeof(struct smbd_data_transfer) >
7149 ++ if (remaining_data_length + sizeof(struct smbd_data_transfer) >
7150 + info->max_fragmented_send_size) {
7151 + log_write(ERR, "payload size %d > max size %d\n",
7152 +- buflen, info->max_fragmented_send_size);
7153 ++ remaining_data_length, info->max_fragmented_send_size);
7154 + rc = -EINVAL;
7155 + goto done;
7156 + }
7157 +
7158 +- iov = &rqst->rq_iov[1];
7159 ++ rqst_idx = 0;
7160 ++
7161 ++next_rqst:
7162 ++ rqst = &rqst_array[rqst_idx];
7163 ++ iov = rqst->rq_iov;
7164 +
7165 +- cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
7166 +- for (i = 0; i < rqst->rq_nvec-1; i++)
7167 ++ cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n",
7168 ++ rqst_idx, smb_rqst_len(server, rqst));
7169 ++ for (i = 0; i < rqst->rq_nvec; i++)
7170 + dump_smb(iov[i].iov_base, iov[i].iov_len);
7171 +
7172 +- remaining_data_length = buflen;
7173 +
7174 +- log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
7175 +- "rq_tailsz=%d buflen=%d\n",
7176 +- rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
7177 +- rqst->rq_tailsz, buflen);
7178 ++ log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d "
7179 ++ "rq_tailsz=%d buflen=%lu\n",
7180 ++ rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz,
7181 ++ rqst->rq_tailsz, smb_rqst_len(server, rqst));
7182 +
7183 +- start = i = iov[0].iov_len ? 0 : 1;
7184 ++ start = i = 0;
7185 + buflen = 0;
7186 + while (true) {
7187 + buflen += iov[i].iov_len;
7188 +@@ -2197,14 +2194,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7189 + goto done;
7190 + }
7191 + i++;
7192 +- if (i == rqst->rq_nvec-1)
7193 ++ if (i == rqst->rq_nvec)
7194 + break;
7195 + }
7196 + start = i;
7197 + buflen = 0;
7198 + } else {
7199 + i++;
7200 +- if (i == rqst->rq_nvec-1) {
7201 ++ if (i == rqst->rq_nvec) {
7202 + /* send out all remaining vecs */
7203 + remaining_data_length -= buflen;
7204 + log_write(INFO,
7205 +@@ -2248,6 +2245,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
7206 + }
7207 + }
7208 +
7209 ++ rqst_idx++;
7210 ++ if (rqst_idx < num_rqst)
7211 ++ goto next_rqst;
7212 ++
7213 + done:
7214 + /*
7215 + * As an optimization, we don't wait for individual I/O to finish
7216 +diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
7217 +index a11096254f29..b5c240ff2191 100644
7218 +--- a/fs/cifs/smbdirect.h
7219 ++++ b/fs/cifs/smbdirect.h
7220 +@@ -292,7 +292,8 @@ void smbd_destroy(struct smbd_connection *info);
7221 +
7222 + /* Interface for carrying upper layer I/O through send/recv */
7223 + int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
7224 +-int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
7225 ++int smbd_send(struct TCP_Server_Info *server,
7226 ++ int num_rqst, struct smb_rqst *rqst);
7227 +
7228 + enum mr_state {
7229 + MR_READY,
7230 +@@ -332,7 +333,7 @@ static inline void *smbd_get_connection(
7231 + static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
7232 + static inline void smbd_destroy(struct smbd_connection *info) {}
7233 + static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
7234 +-static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
7235 ++static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; }
7236 + #endif
7237 +
7238 + #endif
7239 +diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
7240 +index f2938bd95c40..fe77f41bff9f 100644
7241 +--- a/fs/cifs/transport.c
7242 ++++ b/fs/cifs/transport.c
7243 +@@ -287,7 +287,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
7244 + __be32 rfc1002_marker;
7245 +
7246 + if (cifs_rdma_enabled(server) && server->smbd_conn) {
7247 +- rc = smbd_send(server, rqst);
7248 ++ rc = smbd_send(server, num_rqst, rqst);
7249 + goto smbd_done;
7250 + }
7251 + if (ssocket == NULL)
7252 +diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
7253 +index 913061c0de1b..e8e27cdc2f67 100644
7254 +--- a/fs/ext4/block_validity.c
7255 ++++ b/fs/ext4/block_validity.c
7256 +@@ -137,6 +137,49 @@ static void debug_print_tree(struct ext4_sb_info *sbi)
7257 + printk(KERN_CONT "\n");
7258 + }
7259 +
7260 ++static int ext4_protect_reserved_inode(struct super_block *sb, u32 ino)
7261 ++{
7262 ++ struct inode *inode;
7263 ++ struct ext4_sb_info *sbi = EXT4_SB(sb);
7264 ++ struct ext4_map_blocks map;
7265 ++ u32 i = 0, num;
7266 ++ int err = 0, n;
7267 ++
7268 ++ if ((ino < EXT4_ROOT_INO) ||
7269 ++ (ino > le32_to_cpu(sbi->s_es->s_inodes_count)))
7270 ++ return -EINVAL;
7271 ++ inode = ext4_iget(sb, ino, EXT4_IGET_SPECIAL);
7272 ++ if (IS_ERR(inode))
7273 ++ return PTR_ERR(inode);
7274 ++ num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
7275 ++ while (i < num) {
7276 ++ map.m_lblk = i;
7277 ++ map.m_len = num - i;
7278 ++ n = ext4_map_blocks(NULL, inode, &map, 0);
7279 ++ if (n < 0) {
7280 ++ err = n;
7281 ++ break;
7282 ++ }
7283 ++ if (n == 0) {
7284 ++ i++;
7285 ++ } else {
7286 ++ if (!ext4_data_block_valid(sbi, map.m_pblk, n)) {
7287 ++ ext4_error(sb, "blocks %llu-%llu from inode %u "
7288 ++ "overlap system zone", map.m_pblk,
7289 ++ map.m_pblk + map.m_len - 1, ino);
7290 ++ err = -EFSCORRUPTED;
7291 ++ break;
7292 ++ }
7293 ++ err = add_system_zone(sbi, map.m_pblk, n);
7294 ++ if (err < 0)
7295 ++ break;
7296 ++ i += n;
7297 ++ }
7298 ++ }
7299 ++ iput(inode);
7300 ++ return err;
7301 ++}
7302 ++
7303 + int ext4_setup_system_zone(struct super_block *sb)
7304 + {
7305 + ext4_group_t ngroups = ext4_get_groups_count(sb);
7306 +@@ -171,6 +214,12 @@ int ext4_setup_system_zone(struct super_block *sb)
7307 + if (ret)
7308 + return ret;
7309 + }
7310 ++ if (ext4_has_feature_journal(sb) && sbi->s_es->s_journal_inum) {
7311 ++ ret = ext4_protect_reserved_inode(sb,
7312 ++ le32_to_cpu(sbi->s_es->s_journal_inum));
7313 ++ if (ret)
7314 ++ return ret;
7315 ++ }
7316 +
7317 + if (test_opt(sb, DEBUG))
7318 + debug_print_tree(sbi);
7319 +@@ -227,6 +276,11 @@ int ext4_check_blockref(const char *function, unsigned int line,
7320 + __le32 *bref = p;
7321 + unsigned int blk;
7322 +
7323 ++ if (ext4_has_feature_journal(inode->i_sb) &&
7324 ++ (inode->i_ino ==
7325 ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
7326 ++ return 0;
7327 ++
7328 + while (bref < p+max) {
7329 + blk = le32_to_cpu(*bref++);
7330 + if (blk &&
7331 +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
7332 +index 45aea792d22a..00bf0b67aae8 100644
7333 +--- a/fs/ext4/extents.c
7334 ++++ b/fs/ext4/extents.c
7335 +@@ -518,10 +518,14 @@ __read_extent_tree_block(const char *function, unsigned int line,
7336 + }
7337 + if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
7338 + return bh;
7339 +- err = __ext4_ext_check(function, line, inode,
7340 +- ext_block_hdr(bh), depth, pblk);
7341 +- if (err)
7342 +- goto errout;
7343 ++ if (!ext4_has_feature_journal(inode->i_sb) ||
7344 ++ (inode->i_ino !=
7345 ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
7346 ++ err = __ext4_ext_check(function, line, inode,
7347 ++ ext_block_hdr(bh), depth, pblk);
7348 ++ if (err)
7349 ++ goto errout;
7350 ++ }
7351 + set_buffer_verified(bh);
7352 + /*
7353 + * If this is a leaf block, cache all of its entries
7354 +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
7355 +index e65559bf7728..cff6277f7a9f 100644
7356 +--- a/fs/ext4/inode.c
7357 ++++ b/fs/ext4/inode.c
7358 +@@ -399,6 +399,10 @@ static int __check_block_validity(struct inode *inode, const char *func,
7359 + unsigned int line,
7360 + struct ext4_map_blocks *map)
7361 + {
7362 ++ if (ext4_has_feature_journal(inode->i_sb) &&
7363 ++ (inode->i_ino ==
7364 ++ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum)))
7365 ++ return 0;
7366 + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
7367 + map->m_len)) {
7368 + ext4_error_inode(inode, func, line, map->m_pblk,
7369 +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
7370 +index 75fe92eaa681..1624618c2bc7 100644
7371 +--- a/fs/nfs/delegation.c
7372 ++++ b/fs/nfs/delegation.c
7373 +@@ -153,7 +153,7 @@ again:
7374 + /* Block nfs4_proc_unlck */
7375 + mutex_lock(&sp->so_delegreturn_mutex);
7376 + seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
7377 +- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
7378 ++ err = nfs4_open_delegation_recall(ctx, state, stateid);
7379 + if (!err)
7380 + err = nfs_delegation_claim_locks(ctx, state, stateid);
7381 + if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
7382 +diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
7383 +index bb1ef8c37af4..c95477823fa6 100644
7384 +--- a/fs/nfs/delegation.h
7385 ++++ b/fs/nfs/delegation.h
7386 +@@ -61,7 +61,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
7387 +
7388 + /* NFSv4 delegation-related procedures */
7389 + int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
7390 +-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
7391 ++int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
7392 + int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
7393 + bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
7394 + bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
7395 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
7396 +index 31ae3bd5d9d2..621e3cf90f4e 100644
7397 +--- a/fs/nfs/nfs4proc.c
7398 ++++ b/fs/nfs/nfs4proc.c
7399 +@@ -2113,12 +2113,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7400 + case -NFS4ERR_BAD_HIGH_SLOT:
7401 + case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
7402 + case -NFS4ERR_DEADSESSION:
7403 +- set_bit(NFS_DELEGATED_STATE, &state->flags);
7404 + nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
7405 + return -EAGAIN;
7406 + case -NFS4ERR_STALE_CLIENTID:
7407 + case -NFS4ERR_STALE_STATEID:
7408 +- set_bit(NFS_DELEGATED_STATE, &state->flags);
7409 + /* Don't recall a delegation if it was lost */
7410 + nfs4_schedule_lease_recovery(server->nfs_client);
7411 + return -EAGAIN;
7412 +@@ -2139,7 +2137,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7413 + return -EAGAIN;
7414 + case -NFS4ERR_DELAY:
7415 + case -NFS4ERR_GRACE:
7416 +- set_bit(NFS_DELEGATED_STATE, &state->flags);
7417 + ssleep(1);
7418 + return -EAGAIN;
7419 + case -ENOMEM:
7420 +@@ -2155,8 +2152,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
7421 + }
7422 +
7423 + int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
7424 +- struct nfs4_state *state, const nfs4_stateid *stateid,
7425 +- fmode_t type)
7426 ++ struct nfs4_state *state, const nfs4_stateid *stateid)
7427 + {
7428 + struct nfs_server *server = NFS_SERVER(state->inode);
7429 + struct nfs4_opendata *opendata;
7430 +@@ -2167,20 +2163,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
7431 + if (IS_ERR(opendata))
7432 + return PTR_ERR(opendata);
7433 + nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
7434 +- nfs_state_clear_delegation(state);
7435 +- switch (type & (FMODE_READ|FMODE_WRITE)) {
7436 +- case FMODE_READ|FMODE_WRITE:
7437 +- case FMODE_WRITE:
7438 ++ if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
7439 + err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
7440 + if (err)
7441 +- break;
7442 ++ goto out;
7443 ++ }
7444 ++ if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
7445 + err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
7446 + if (err)
7447 +- break;
7448 +- /* Fall through */
7449 +- case FMODE_READ:
7450 ++ goto out;
7451 ++ }
7452 ++ if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
7453 + err = nfs4_open_recover_helper(opendata, FMODE_READ);
7454 ++ if (err)
7455 ++ goto out;
7456 + }
7457 ++ nfs_state_clear_delegation(state);
7458 ++out:
7459 + nfs4_opendata_put(opendata);
7460 + return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
7461 + }
7462 +diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
7463 +index 8cf2218b46a7..6f90d91a8733 100644
7464 +--- a/fs/pstore/inode.c
7465 ++++ b/fs/pstore/inode.c
7466 +@@ -330,10 +330,6 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
7467 + goto fail;
7468 + inode->i_mode = S_IFREG | 0444;
7469 + inode->i_fop = &pstore_file_operations;
7470 +- private = kzalloc(sizeof(*private), GFP_KERNEL);
7471 +- if (!private)
7472 +- goto fail_alloc;
7473 +- private->record = record;
7474 +
7475 + switch (record->type) {
7476 + case PSTORE_TYPE_DMESG:
7477 +@@ -383,12 +379,16 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
7478 + break;
7479 + }
7480 +
7481 ++ private = kzalloc(sizeof(*private), GFP_KERNEL);
7482 ++ if (!private)
7483 ++ goto fail_inode;
7484 ++
7485 + dentry = d_alloc_name(root, name);
7486 + if (!dentry)
7487 + goto fail_private;
7488 +
7489 ++ private->record = record;
7490 + inode->i_size = private->total_size = size;
7491 +-
7492 + inode->i_private = private;
7493 +
7494 + if (record->time.tv_sec)
7495 +@@ -404,7 +404,7 @@ int pstore_mkfile(struct dentry *root, struct pstore_record *record)
7496 +
7497 + fail_private:
7498 + free_pstore_private(private);
7499 +-fail_alloc:
7500 ++fail_inode:
7501 + iput(inode);
7502 +
7503 + fail:
7504 +diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
7505 +index f9c6e0e3aec7..fa117e11458a 100644
7506 +--- a/include/drm/drm_device.h
7507 ++++ b/include/drm/drm_device.h
7508 +@@ -174,7 +174,13 @@ struct drm_device {
7509 + * races and imprecision over longer time periods, hence exposing a
7510 + * hardware vblank counter is always recommended.
7511 + *
7512 +- * If non-zeor, &drm_crtc_funcs.get_vblank_counter must be set.
7513 ++ * This is the statically configured device wide maximum. The driver
7514 ++ * can instead choose to use a runtime configurable per-crtc value
7515 ++ * &drm_vblank_crtc.max_vblank_count, in which case @max_vblank_count
7516 ++ * must be left at zero. See drm_crtc_set_max_vblank_count() on how
7517 ++ * to use the per-crtc value.
7518 ++ *
7519 ++ * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
7520 + */
7521 + u32 max_vblank_count; /**< size of vblank counter register */
7522 +
7523 +diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
7524 +index d25a9603ab57..e9c676381fd4 100644
7525 +--- a/include/drm/drm_vblank.h
7526 ++++ b/include/drm/drm_vblank.h
7527 +@@ -128,6 +128,26 @@ struct drm_vblank_crtc {
7528 + * @last: Protected by &drm_device.vbl_lock, used for wraparound handling.
7529 + */
7530 + u32 last;
7531 ++ /**
7532 ++ * @max_vblank_count:
7533 ++ *
7534 ++ * Maximum value of the vblank registers for this crtc. This value +1
7535 ++ * will result in a wrap-around of the vblank register. It is used
7536 ++ * by the vblank core to handle wrap-arounds.
7537 ++ *
7538 ++ * If set to zero the vblank core will try to guess the elapsed vblanks
7539 ++ * between times when the vblank interrupt is disabled through
7540 ++ * high-precision timestamps. That approach is suffering from small
7541 ++ * races and imprecision over longer time periods, hence exposing a
7542 ++ * hardware vblank counter is always recommended.
7543 ++ *
7544 ++ * This is the runtime configurable per-crtc maximum set through
7545 ++ * drm_crtc_set_max_vblank_count(). If this is used the driver
7546 ++ * must leave the device wide &drm_device.max_vblank_count at zero.
7547 ++ *
7548 ++ * If non-zero, &drm_crtc_funcs.get_vblank_counter must be set.
7549 ++ */
7550 ++ u32 max_vblank_count;
7551 + /**
7552 + * @inmodeset: Tracks whether the vblank is disabled due to a modeset.
7553 + * For legacy driver bit 2 additionally tracks whether an additional
7554 +@@ -206,4 +226,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
7555 + void drm_calc_timestamping_constants(struct drm_crtc *crtc,
7556 + const struct drm_display_mode *mode);
7557 + wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
7558 ++void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
7559 ++ u32 max_vblank_count);
7560 + #endif
7561 +diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
7562 +index bef2e36c01b4..91f9f95ad506 100644
7563 +--- a/include/linux/device-mapper.h
7564 ++++ b/include/linux/device-mapper.h
7565 +@@ -62,7 +62,8 @@ typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
7566 + struct request *rq,
7567 + union map_info *map_context,
7568 + struct request **clone);
7569 +-typedef void (*dm_release_clone_request_fn) (struct request *clone);
7570 ++typedef void (*dm_release_clone_request_fn) (struct request *clone,
7571 ++ union map_info *map_context);
7572 +
7573 + /*
7574 + * Returns:
7575 +diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
7576 +index acc4279ad5e3..412098b24f58 100644
7577 +--- a/include/linux/gpio/consumer.h
7578 ++++ b/include/linux/gpio/consumer.h
7579 +@@ -222,7 +222,7 @@ static inline void gpiod_put(struct gpio_desc *desc)
7580 + might_sleep();
7581 +
7582 + /* GPIO can never have been requested */
7583 +- WARN_ON(1);
7584 ++ WARN_ON(desc);
7585 + }
7586 +
7587 + static inline void gpiod_put_array(struct gpio_descs *descs)
7588 +@@ -230,7 +230,7 @@ static inline void gpiod_put_array(struct gpio_descs *descs)
7589 + might_sleep();
7590 +
7591 + /* GPIO can never have been requested */
7592 +- WARN_ON(1);
7593 ++ WARN_ON(descs);
7594 + }
7595 +
7596 + static inline struct gpio_desc *__must_check
7597 +@@ -283,7 +283,7 @@ static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
7598 + might_sleep();
7599 +
7600 + /* GPIO can never have been requested */
7601 +- WARN_ON(1);
7602 ++ WARN_ON(desc);
7603 + }
7604 +
7605 + static inline void devm_gpiod_put_array(struct device *dev,
7606 +@@ -292,32 +292,32 @@ static inline void devm_gpiod_put_array(struct device *dev,
7607 + might_sleep();
7608 +
7609 + /* GPIO can never have been requested */
7610 +- WARN_ON(1);
7611 ++ WARN_ON(descs);
7612 + }
7613 +
7614 +
7615 + static inline int gpiod_get_direction(const struct gpio_desc *desc)
7616 + {
7617 + /* GPIO can never have been requested */
7618 +- WARN_ON(1);
7619 ++ WARN_ON(desc);
7620 + return -ENOSYS;
7621 + }
7622 + static inline int gpiod_direction_input(struct gpio_desc *desc)
7623 + {
7624 + /* GPIO can never have been requested */
7625 +- WARN_ON(1);
7626 ++ WARN_ON(desc);
7627 + return -ENOSYS;
7628 + }
7629 + static inline int gpiod_direction_output(struct gpio_desc *desc, int value)
7630 + {
7631 + /* GPIO can never have been requested */
7632 +- WARN_ON(1);
7633 ++ WARN_ON(desc);
7634 + return -ENOSYS;
7635 + }
7636 + static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
7637 + {
7638 + /* GPIO can never have been requested */
7639 +- WARN_ON(1);
7640 ++ WARN_ON(desc);
7641 + return -ENOSYS;
7642 + }
7643 +
7644 +@@ -325,7 +325,7 @@ static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value)
7645 + static inline int gpiod_get_value(const struct gpio_desc *desc)
7646 + {
7647 + /* GPIO can never have been requested */
7648 +- WARN_ON(1);
7649 ++ WARN_ON(desc);
7650 + return 0;
7651 + }
7652 + static inline int gpiod_get_array_value(unsigned int array_size,
7653 +@@ -333,25 +333,25 @@ static inline int gpiod_get_array_value(unsigned int array_size,
7654 + int *value_array)
7655 + {
7656 + /* GPIO can never have been requested */
7657 +- WARN_ON(1);
7658 ++ WARN_ON(desc_array);
7659 + return 0;
7660 + }
7661 + static inline void gpiod_set_value(struct gpio_desc *desc, int value)
7662 + {
7663 + /* GPIO can never have been requested */
7664 +- WARN_ON(1);
7665 ++ WARN_ON(desc);
7666 + }
7667 + static inline void gpiod_set_array_value(unsigned int array_size,
7668 + struct gpio_desc **desc_array,
7669 + int *value_array)
7670 + {
7671 + /* GPIO can never have been requested */
7672 +- WARN_ON(1);
7673 ++ WARN_ON(desc_array);
7674 + }
7675 + static inline int gpiod_get_raw_value(const struct gpio_desc *desc)
7676 + {
7677 + /* GPIO can never have been requested */
7678 +- WARN_ON(1);
7679 ++ WARN_ON(desc);
7680 + return 0;
7681 + }
7682 + static inline int gpiod_get_raw_array_value(unsigned int array_size,
7683 +@@ -359,27 +359,27 @@ static inline int gpiod_get_raw_array_value(unsigned int array_size,
7684 + int *value_array)
7685 + {
7686 + /* GPIO can never have been requested */
7687 +- WARN_ON(1);
7688 ++ WARN_ON(desc_array);
7689 + return 0;
7690 + }
7691 + static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value)
7692 + {
7693 + /* GPIO can never have been requested */
7694 +- WARN_ON(1);
7695 ++ WARN_ON(desc);
7696 + }
7697 + static inline int gpiod_set_raw_array_value(unsigned int array_size,
7698 + struct gpio_desc **desc_array,
7699 + int *value_array)
7700 + {
7701 + /* GPIO can never have been requested */
7702 +- WARN_ON(1);
7703 ++ WARN_ON(desc_array);
7704 + return 0;
7705 + }
7706 +
7707 + static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc)
7708 + {
7709 + /* GPIO can never have been requested */
7710 +- WARN_ON(1);
7711 ++ WARN_ON(desc);
7712 + return 0;
7713 + }
7714 + static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
7715 +@@ -387,25 +387,25 @@ static inline int gpiod_get_array_value_cansleep(unsigned int array_size,
7716 + int *value_array)
7717 + {
7718 + /* GPIO can never have been requested */
7719 +- WARN_ON(1);
7720 ++ WARN_ON(desc_array);
7721 + return 0;
7722 + }
7723 + static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
7724 + {
7725 + /* GPIO can never have been requested */
7726 +- WARN_ON(1);
7727 ++ WARN_ON(desc);
7728 + }
7729 + static inline void gpiod_set_array_value_cansleep(unsigned int array_size,
7730 + struct gpio_desc **desc_array,
7731 + int *value_array)
7732 + {
7733 + /* GPIO can never have been requested */
7734 +- WARN_ON(1);
7735 ++ WARN_ON(desc_array);
7736 + }
7737 + static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc)
7738 + {
7739 + /* GPIO can never have been requested */
7740 +- WARN_ON(1);
7741 ++ WARN_ON(desc);
7742 + return 0;
7743 + }
7744 + static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
7745 +@@ -413,55 +413,55 @@ static inline int gpiod_get_raw_array_value_cansleep(unsigned int array_size,
7746 + int *value_array)
7747 + {
7748 + /* GPIO can never have been requested */
7749 +- WARN_ON(1);
7750 ++ WARN_ON(desc_array);
7751 + return 0;
7752 + }
7753 + static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc,
7754 + int value)
7755 + {
7756 + /* GPIO can never have been requested */
7757 +- WARN_ON(1);
7758 ++ WARN_ON(desc);
7759 + }
7760 + static inline int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
7761 + struct gpio_desc **desc_array,
7762 + int *value_array)
7763 + {
7764 + /* GPIO can never have been requested */
7765 +- WARN_ON(1);
7766 ++ WARN_ON(desc_array);
7767 + return 0;
7768 + }
7769 +
7770 + static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
7771 + {
7772 + /* GPIO can never have been requested */
7773 +- WARN_ON(1);
7774 ++ WARN_ON(desc);
7775 + return -ENOSYS;
7776 + }
7777 +
7778 + static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
7779 + {
7780 + /* GPIO can never have been requested */
7781 +- WARN_ON(1);
7782 ++ WARN_ON(desc);
7783 + return -ENOSYS;
7784 + }
7785 +
7786 + static inline int gpiod_is_active_low(const struct gpio_desc *desc)
7787 + {
7788 + /* GPIO can never have been requested */
7789 +- WARN_ON(1);
7790 ++ WARN_ON(desc);
7791 + return 0;
7792 + }
7793 + static inline int gpiod_cansleep(const struct gpio_desc *desc)
7794 + {
7795 + /* GPIO can never have been requested */
7796 +- WARN_ON(1);
7797 ++ WARN_ON(desc);
7798 + return 0;
7799 + }
7800 +
7801 + static inline int gpiod_to_irq(const struct gpio_desc *desc)
7802 + {
7803 + /* GPIO can never have been requested */
7804 +- WARN_ON(1);
7805 ++ WARN_ON(desc);
7806 + return -EINVAL;
7807 + }
7808 +
7809 +@@ -469,7 +469,7 @@ static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
7810 + const char *name)
7811 + {
7812 + /* GPIO can never have been requested */
7813 +- WARN_ON(1);
7814 ++ WARN_ON(desc);
7815 + return -EINVAL;
7816 + }
7817 +
7818 +@@ -481,7 +481,7 @@ static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
7819 + static inline int desc_to_gpio(const struct gpio_desc *desc)
7820 + {
7821 + /* GPIO can never have been requested */
7822 +- WARN_ON(1);
7823 ++ WARN_ON(desc);
7824 + return -EINVAL;
7825 + }
7826 +
7827 +diff --git a/include/media/cec.h b/include/media/cec.h
7828 +index dc4b412e8fa1..59bf280e9715 100644
7829 +--- a/include/media/cec.h
7830 ++++ b/include/media/cec.h
7831 +@@ -333,67 +333,6 @@ void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
7832 + u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
7833 + unsigned int *offset);
7834 +
7835 +-/**
7836 +- * cec_set_edid_phys_addr() - find and set the physical address
7837 +- *
7838 +- * @edid: pointer to the EDID data
7839 +- * @size: size in bytes of the EDID data
7840 +- * @phys_addr: the new physical address
7841 +- *
7842 +- * This function finds the location of the physical address in the EDID
7843 +- * and fills in the given physical address and updates the checksum
7844 +- * at the end of the EDID block. It does nothing if the EDID doesn't
7845 +- * contain a physical address.
7846 +- */
7847 +-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
7848 +-
7849 +-/**
7850 +- * cec_phys_addr_for_input() - calculate the PA for an input
7851 +- *
7852 +- * @phys_addr: the physical address of the parent
7853 +- * @input: the number of the input port, must be between 1 and 15
7854 +- *
7855 +- * This function calculates a new physical address based on the input
7856 +- * port number. For example:
7857 +- *
7858 +- * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
7859 +- *
7860 +- * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
7861 +- *
7862 +- * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
7863 +- *
7864 +- * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
7865 +- *
7866 +- * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
7867 +- */
7868 +-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
7869 +-
7870 +-/**
7871 +- * cec_phys_addr_validate() - validate a physical address from an EDID
7872 +- *
7873 +- * @phys_addr: the physical address to validate
7874 +- * @parent: if not %NULL, then this is filled with the parents PA.
7875 +- * @port: if not %NULL, then this is filled with the input port.
7876 +- *
7877 +- * This validates a physical address as read from an EDID. If the
7878 +- * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
7879 +- * then it will return -EINVAL.
7880 +- *
7881 +- * The parent PA is passed into %parent and the input port is passed into
7882 +- * %port. For example:
7883 +- *
7884 +- * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
7885 +- *
7886 +- * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
7887 +- *
7888 +- * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
7889 +- *
7890 +- * PA = f.f.f.f: has parent f.f.f.f and input port 0.
7891 +- *
7892 +- * Return: 0 if the PA is valid, -EINVAL if not.
7893 +- */
7894 +-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
7895 +-
7896 + #else
7897 +
7898 + static inline int cec_register_adapter(struct cec_adapter *adap,
7899 +@@ -428,25 +367,6 @@ static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
7900 + return CEC_PHYS_ADDR_INVALID;
7901 + }
7902 +
7903 +-static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
7904 +- u16 phys_addr)
7905 +-{
7906 +-}
7907 +-
7908 +-static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
7909 +-{
7910 +- return CEC_PHYS_ADDR_INVALID;
7911 +-}
7912 +-
7913 +-static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
7914 +-{
7915 +- if (parent)
7916 +- *parent = phys_addr;
7917 +- if (port)
7918 +- *port = 0;
7919 +- return 0;
7920 +-}
7921 +-
7922 + #endif
7923 +
7924 + /**
7925 +diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
7926 +index 17cb27df1b81..4e7732d3908c 100644
7927 +--- a/include/media/v4l2-dv-timings.h
7928 ++++ b/include/media/v4l2-dv-timings.h
7929 +@@ -234,4 +234,10 @@ v4l2_hdmi_rx_colorimetry(const struct hdmi_avi_infoframe *avi,
7930 + const struct hdmi_vendor_infoframe *hdmi,
7931 + unsigned int height);
7932 +
7933 ++u16 v4l2_get_edid_phys_addr(const u8 *edid, unsigned int size,
7934 ++ unsigned int *offset);
7935 ++void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
7936 ++u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input);
7937 ++int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
7938 ++
7939 + #endif
7940 +diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
7941 +index 67e0a990144a..468deae5d603 100644
7942 +--- a/include/net/cfg80211.h
7943 ++++ b/include/net/cfg80211.h
7944 +@@ -6562,6 +6562,21 @@ int cfg80211_external_auth_request(struct net_device *netdev,
7945 + struct cfg80211_external_auth_params *params,
7946 + gfp_t gfp);
7947 +
7948 ++/**
7949 ++ * cfg80211_iftype_allowed - check whether the interface can be allowed
7950 ++ * @wiphy: the wiphy
7951 ++ * @iftype: interface type
7952 ++ * @is_4addr: use_4addr flag, must be '0' when check_swif is '1'
7953 ++ * @check_swif: check iftype against software interfaces
7954 ++ *
7955 ++ * Check whether the interface is allowed to operate; additionally, this API
7956 ++ * can be used to check iftype against the software interfaces when
7957 ++ * check_swif is '1'.
7958 ++ */
7959 ++bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
7960 ++ bool is_4addr, u8 check_swif);
7961 ++
7962 ++
7963 + /* Logging, debugging and troubleshooting/diagnostic helpers. */
7964 +
7965 + /* wiphy_printk helpers, similar to dev_printk */
7966 +diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
7967 +index 7b8c9e19bad1..0f3cb13db8e9 100644
7968 +--- a/include/uapi/linux/keyctl.h
7969 ++++ b/include/uapi/linux/keyctl.h
7970 +@@ -65,7 +65,12 @@
7971 +
7972 + /* keyctl structures */
7973 + struct keyctl_dh_params {
7974 +- __s32 private;
7975 ++ union {
7976 ++#ifndef __cplusplus
7977 ++ __s32 private;
7978 ++#endif
7979 ++ __s32 priv;
7980 ++ };
7981 + __s32 prime;
7982 + __s32 base;
7983 + };
7984 +diff --git a/kernel/module.c b/kernel/module.c
7985 +index 3fda10c549a2..0d86fc73d63d 100644
7986 +--- a/kernel/module.c
7987 ++++ b/kernel/module.c
7988 +@@ -76,14 +76,9 @@
7989 +
7990 + /*
7991 + * Modules' sections will be aligned on page boundaries
7992 +- * to ensure complete separation of code and data, but
7993 +- * only when CONFIG_STRICT_MODULE_RWX=y
7994 ++ * to ensure complete separation of code and data
7995 + */
7996 +-#ifdef CONFIG_STRICT_MODULE_RWX
7997 + # define debug_align(X) ALIGN(X, PAGE_SIZE)
7998 +-#else
7999 +-# define debug_align(X) (X)
8000 +-#endif
8001 +
8002 + /* If this is set, the section belongs in the init part of the module */
8003 + #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
8004 +@@ -1699,6 +1694,8 @@ static int add_usage_links(struct module *mod)
8005 + return ret;
8006 + }
8007 +
8008 ++static void module_remove_modinfo_attrs(struct module *mod, int end);
8009 ++
8010 + static int module_add_modinfo_attrs(struct module *mod)
8011 + {
8012 + struct module_attribute *attr;
8013 +@@ -1713,24 +1710,34 @@ static int module_add_modinfo_attrs(struct module *mod)
8014 + return -ENOMEM;
8015 +
8016 + temp_attr = mod->modinfo_attrs;
8017 +- for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
8018 ++ for (i = 0; (attr = modinfo_attrs[i]); i++) {
8019 + if (!attr->test || attr->test(mod)) {
8020 + memcpy(temp_attr, attr, sizeof(*temp_attr));
8021 + sysfs_attr_init(&temp_attr->attr);
8022 + error = sysfs_create_file(&mod->mkobj.kobj,
8023 + &temp_attr->attr);
8024 ++ if (error)
8025 ++ goto error_out;
8026 + ++temp_attr;
8027 + }
8028 + }
8029 ++
8030 ++ return 0;
8031 ++
8032 ++error_out:
8033 ++ if (i > 0)
8034 ++ module_remove_modinfo_attrs(mod, --i);
8035 + return error;
8036 + }
8037 +
8038 +-static void module_remove_modinfo_attrs(struct module *mod)
8039 ++static void module_remove_modinfo_attrs(struct module *mod, int end)
8040 + {
8041 + struct module_attribute *attr;
8042 + int i;
8043 +
8044 + for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
8045 ++ if (end >= 0 && i > end)
8046 ++ break;
8047 + /* pick a field to test for end of list */
8048 + if (!attr->attr.name)
8049 + break;
8050 +@@ -1818,7 +1825,7 @@ static int mod_sysfs_setup(struct module *mod,
8051 + return 0;
8052 +
8053 + out_unreg_modinfo_attrs:
8054 +- module_remove_modinfo_attrs(mod);
8055 ++ module_remove_modinfo_attrs(mod, -1);
8056 + out_unreg_param:
8057 + module_param_sysfs_remove(mod);
8058 + out_unreg_holders:
8059 +@@ -1854,7 +1861,7 @@ static void mod_sysfs_fini(struct module *mod)
8060 + {
8061 + }
8062 +
8063 +-static void module_remove_modinfo_attrs(struct module *mod)
8064 ++static void module_remove_modinfo_attrs(struct module *mod, int end)
8065 + {
8066 + }
8067 +
8068 +@@ -1870,7 +1877,7 @@ static void init_param_lock(struct module *mod)
8069 + static void mod_sysfs_teardown(struct module *mod)
8070 + {
8071 + del_usage_links(mod);
8072 +- module_remove_modinfo_attrs(mod);
8073 ++ module_remove_modinfo_attrs(mod, -1);
8074 + module_param_sysfs_remove(mod);
8075 + kobject_put(mod->mkobj.drivers_dir);
8076 + kobject_put(mod->holders_dir);
8077 +diff --git a/kernel/resource.c b/kernel/resource.c
8078 +index 30e1bc68503b..bce773cc5e41 100644
8079 +--- a/kernel/resource.c
8080 ++++ b/kernel/resource.c
8081 +@@ -318,24 +318,27 @@ int release_resource(struct resource *old)
8082 +
8083 + EXPORT_SYMBOL(release_resource);
8084 +
8085 +-/*
8086 +- * Finds the lowest iomem resource existing within [res->start.res->end).
8087 +- * The caller must specify res->start, res->end, res->flags, and optionally
8088 +- * desc. If found, returns 0, res is overwritten, if not found, returns -1.
8089 +- * This function walks the whole tree and not just first level children until
8090 +- * and unless first_level_children_only is true.
8091 ++/**
8092 ++ * Finds the lowest iomem resource that covers part of [start..end]. The
8093 ++ * caller must specify start, end, flags, and desc (which may be
8094 ++ * IORES_DESC_NONE).
8095 ++ *
8096 ++ * If a resource is found, returns 0 and *res is overwritten with the part
8097 ++ * of the resource that's within [start..end]; if none is found, returns
8098 ++ * -ENODEV. Returns -EINVAL for invalid parameters.
8099 ++ *
8100 ++ * This function walks the whole tree and not just first level children
8101 ++ * unless @first_level_children_only is true.
8102 + */
8103 +-static int find_next_iomem_res(struct resource *res, unsigned long desc,
8104 +- bool first_level_children_only)
8105 ++static int find_next_iomem_res(resource_size_t start, resource_size_t end,
8106 ++ unsigned long flags, unsigned long desc,
8107 ++ bool first_level_children_only,
8108 ++ struct resource *res)
8109 + {
8110 +- resource_size_t start, end;
8111 + struct resource *p;
8112 + bool sibling_only = false;
8113 +
8114 + BUG_ON(!res);
8115 +-
8116 +- start = res->start;
8117 +- end = res->end;
8118 + BUG_ON(start >= end);
8119 +
8120 + if (first_level_children_only)
8121 +@@ -344,7 +347,7 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
8122 + read_lock(&resource_lock);
8123 +
8124 + for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
8125 +- if ((p->flags & res->flags) != res->flags)
8126 ++ if ((p->flags & flags) != flags)
8127 + continue;
8128 + if ((desc != IORES_DESC_NONE) && (desc != p->desc))
8129 + continue;
8130 +@@ -352,39 +355,38 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
8131 + p = NULL;
8132 + break;
8133 + }
8134 +- if ((p->end >= start) && (p->start < end))
8135 ++ if ((p->end >= start) && (p->start <= end))
8136 + break;
8137 + }
8138 +
8139 ++ if (p) {
8140 ++ /* copy data */
8141 ++ res->start = max(start, p->start);
8142 ++ res->end = min(end, p->end);
8143 ++ res->flags = p->flags;
8144 ++ res->desc = p->desc;
8145 ++ }
8146 ++
8147 + read_unlock(&resource_lock);
8148 +- if (!p)
8149 +- return -1;
8150 +- /* copy data */
8151 +- if (res->start < p->start)
8152 +- res->start = p->start;
8153 +- if (res->end > p->end)
8154 +- res->end = p->end;
8155 +- res->flags = p->flags;
8156 +- res->desc = p->desc;
8157 +- return 0;
8158 ++ return p ? 0 : -ENODEV;
8159 + }
8160 +
8161 +-static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
8162 +- bool first_level_children_only,
8163 +- void *arg,
8164 ++static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
8165 ++ unsigned long flags, unsigned long desc,
8166 ++ bool first_level_children_only, void *arg,
8167 + int (*func)(struct resource *, void *))
8168 + {
8169 +- u64 orig_end = res->end;
8170 ++ struct resource res;
8171 + int ret = -1;
8172 +
8173 +- while ((res->start < res->end) &&
8174 +- !find_next_iomem_res(res, desc, first_level_children_only)) {
8175 +- ret = (*func)(res, arg);
8176 ++ while (start < end &&
8177 ++ !find_next_iomem_res(start, end, flags, desc,
8178 ++ first_level_children_only, &res)) {
8179 ++ ret = (*func)(&res, arg);
8180 + if (ret)
8181 + break;
8182 +
8183 +- res->start = res->end + 1;
8184 +- res->end = orig_end;
8185 ++ start = res.end + 1;
8186 + }
8187 +
8188 + return ret;
8189 +@@ -407,13 +409,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
8190 + int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
8191 + u64 end, void *arg, int (*func)(struct resource *, void *))
8192 + {
8193 +- struct resource res;
8194 +-
8195 +- res.start = start;
8196 +- res.end = end;
8197 +- res.flags = flags;
8198 +-
8199 +- return __walk_iomem_res_desc(&res, desc, false, arg, func);
8200 ++ return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func);
8201 + }
8202 + EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
8203 +
8204 +@@ -427,13 +423,9 @@ EXPORT_SYMBOL_GPL(walk_iomem_res_desc);
8205 + int walk_system_ram_res(u64 start, u64 end, void *arg,
8206 + int (*func)(struct resource *, void *))
8207 + {
8208 +- struct resource res;
8209 ++ unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8210 +
8211 +- res.start = start;
8212 +- res.end = end;
8213 +- res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8214 +-
8215 +- return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
8216 ++ return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
8217 + arg, func);
8218 + }
8219 +
8220 +@@ -444,13 +436,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
8221 + int walk_mem_res(u64 start, u64 end, void *arg,
8222 + int (*func)(struct resource *, void *))
8223 + {
8224 +- struct resource res;
8225 ++ unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
8226 +
8227 +- res.start = start;
8228 +- res.end = end;
8229 +- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
8230 +-
8231 +- return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
8232 ++ return __walk_iomem_res_desc(start, end, flags, IORES_DESC_NONE, true,
8233 + arg, func);
8234 + }
8235 +
8236 +@@ -464,25 +452,25 @@ int walk_mem_res(u64 start, u64 end, void *arg,
8237 + int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
8238 + void *arg, int (*func)(unsigned long, unsigned long, void *))
8239 + {
8240 ++ resource_size_t start, end;
8241 ++ unsigned long flags;
8242 + struct resource res;
8243 + unsigned long pfn, end_pfn;
8244 +- u64 orig_end;
8245 + int ret = -1;
8246 +
8247 +- res.start = (u64) start_pfn << PAGE_SHIFT;
8248 +- res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
8249 +- res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8250 +- orig_end = res.end;
8251 +- while ((res.start < res.end) &&
8252 +- (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
8253 ++ start = (u64) start_pfn << PAGE_SHIFT;
8254 ++ end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
8255 ++ flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
8256 ++ while (start < end &&
8257 ++ !find_next_iomem_res(start, end, flags, IORES_DESC_NONE,
8258 ++ true, &res)) {
8259 + pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
8260 + end_pfn = (res.end + 1) >> PAGE_SHIFT;
8261 + if (end_pfn > pfn)
8262 + ret = (*func)(pfn, end_pfn - pfn, arg);
8263 + if (ret)
8264 + break;
8265 +- res.start = res.end + 1;
8266 +- res.end = orig_end;
8267 ++ start = res.end + 1;
8268 + }
8269 + return ret;
8270 + }
8271 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8272 +index 75f322603d44..49ed38914669 100644
8273 +--- a/kernel/sched/fair.c
8274 ++++ b/kernel/sched/fair.c
8275 +@@ -4420,6 +4420,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
8276 + if (likely(cfs_rq->runtime_remaining > 0))
8277 + return;
8278 +
8279 ++ if (cfs_rq->throttled)
8280 ++ return;
8281 + /*
8282 + * if we're unable to extend our runtime we resched so that the active
8283 + * hierarchy can be throttled
8284 +@@ -4615,6 +4617,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
8285 + if (!cfs_rq_throttled(cfs_rq))
8286 + goto next;
8287 +
8288 ++ /* By the above check, this should never be true */
8289 ++ SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
8290 ++
8291 + runtime = -cfs_rq->runtime_remaining + 1;
8292 + if (runtime > remaining)
8293 + runtime = remaining;
8294 +diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
8295 +index 443edcddac8a..c2708e1f0c69 100644
8296 +--- a/kernel/time/timekeeping.c
8297 ++++ b/kernel/time/timekeeping.c
8298 +@@ -823,7 +823,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
8299 +
8300 + } while (read_seqcount_retry(&tk_core.seq, seq));
8301 +
8302 +- return base + nsecs;
8303 ++ return ktime_add_ns(base, nsecs);
8304 + }
8305 + EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset);
8306 +
8307 +diff --git a/mm/migrate.c b/mm/migrate.c
8308 +index b2ea7d1e6f24..0c48191a9036 100644
8309 +--- a/mm/migrate.c
8310 ++++ b/mm/migrate.c
8311 +@@ -2328,16 +2328,13 @@ next:
8312 + */
8313 + static void migrate_vma_collect(struct migrate_vma *migrate)
8314 + {
8315 +- struct mm_walk mm_walk;
8316 +-
8317 +- mm_walk.pmd_entry = migrate_vma_collect_pmd;
8318 +- mm_walk.pte_entry = NULL;
8319 +- mm_walk.pte_hole = migrate_vma_collect_hole;
8320 +- mm_walk.hugetlb_entry = NULL;
8321 +- mm_walk.test_walk = NULL;
8322 +- mm_walk.vma = migrate->vma;
8323 +- mm_walk.mm = migrate->vma->vm_mm;
8324 +- mm_walk.private = migrate;
8325 ++ struct mm_walk mm_walk = {
8326 ++ .pmd_entry = migrate_vma_collect_pmd,
8327 ++ .pte_hole = migrate_vma_collect_hole,
8328 ++ .vma = migrate->vma,
8329 ++ .mm = migrate->vma->vm_mm,
8330 ++ .private = migrate,
8331 ++ };
8332 +
8333 + mmu_notifier_invalidate_range_start(mm_walk.mm,
8334 + migrate->start,
8335 +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
8336 +index 0b7b36fa0d5c..36f244125d24 100644
8337 +--- a/net/batman-adv/bat_iv_ogm.c
8338 ++++ b/net/batman-adv/bat_iv_ogm.c
8339 +@@ -463,17 +463,23 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
8340 + * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached
8341 + * @buff_pos: current position in the skb
8342 + * @packet_len: total length of the skb
8343 +- * @tvlv_len: tvlv length of the previously considered OGM
8344 ++ * @ogm_packet: potential OGM in buffer
8345 + *
8346 + * Return: true if there is enough space for another OGM, false otherwise.
8347 + */
8348 +-static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
8349 +- __be16 tvlv_len)
8350 ++static bool
8351 ++batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
8352 ++ const struct batadv_ogm_packet *ogm_packet)
8353 + {
8354 + int next_buff_pos = 0;
8355 +
8356 +- next_buff_pos += buff_pos + BATADV_OGM_HLEN;
8357 +- next_buff_pos += ntohs(tvlv_len);
8358 ++ /* check if there is enough space for the header */
8359 ++ next_buff_pos += buff_pos + sizeof(*ogm_packet);
8360 ++ if (next_buff_pos > packet_len)
8361 ++ return false;
8362 ++
8363 ++ /* check if there is enough space for the optional TVLV */
8364 ++ next_buff_pos += ntohs(ogm_packet->tvlv_len);
8365 +
8366 + return (next_buff_pos <= packet_len) &&
8367 + (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES);
8368 +@@ -501,7 +507,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
8369 +
8370 + /* adjust all flags and log packets */
8371 + while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
8372 +- batadv_ogm_packet->tvlv_len)) {
8373 ++ batadv_ogm_packet)) {
8374 + /* we might have aggregated direct link packets with an
8375 + * ordinary base packet
8376 + */
8377 +@@ -1852,7 +1858,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,
8378 +
8379 + /* unpack the aggregated packets and process them one by one */
8380 + while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb),
8381 +- ogm_packet->tvlv_len)) {
8382 ++ ogm_packet)) {
8383 + batadv_iv_ogm_process(skb, ogm_offset, if_incoming);
8384 +
8385 + ogm_offset += BATADV_OGM_HLEN;
8386 +diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
8387 +index 0d9459b69bdb..c32820963b8e 100644
8388 +--- a/net/batman-adv/netlink.c
8389 ++++ b/net/batman-adv/netlink.c
8390 +@@ -118,7 +118,7 @@ batadv_netlink_get_ifindex(const struct nlmsghdr *nlh, int attrtype)
8391 + {
8392 + struct nlattr *attr = nlmsg_find_attr(nlh, GENL_HDRLEN, attrtype);
8393 +
8394 +- return attr ? nla_get_u32(attr) : 0;
8395 ++ return (attr && nla_len(attr) == sizeof(u32)) ? nla_get_u32(attr) : 0;
8396 + }
8397 +
8398 + /**
8399 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
8400 +index c59638574cf8..f101a6460b44 100644
8401 +--- a/net/mac80211/util.c
8402 ++++ b/net/mac80211/util.c
8403 +@@ -3527,9 +3527,7 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
8404 + }
8405 +
8406 + /* Always allow software iftypes */
8407 +- if (local->hw.wiphy->software_iftypes & BIT(iftype) ||
8408 +- (iftype == NL80211_IFTYPE_AP_VLAN &&
8409 +- local->hw.wiphy->flags & WIPHY_FLAG_4ADDR_AP)) {
8410 ++ if (cfg80211_iftype_allowed(local->hw.wiphy, iftype, 0, 1)) {
8411 + if (radar_detect)
8412 + return -EINVAL;
8413 + return 0;
8414 +@@ -3564,7 +3562,8 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata,
8415 +
8416 + if (sdata_iter == sdata ||
8417 + !ieee80211_sdata_running(sdata_iter) ||
8418 +- local->hw.wiphy->software_iftypes & BIT(wdev_iter->iftype))
8419 ++ cfg80211_iftype_allowed(local->hw.wiphy,
8420 ++ wdev_iter->iftype, 0, 1))
8421 + continue;
8422 +
8423 + params.iftype_num[wdev_iter->iftype]++;
8424 +diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
8425 +index 9c7da811d130..98f193fd5315 100644
8426 +--- a/net/vmw_vsock/hyperv_transport.c
8427 ++++ b/net/vmw_vsock/hyperv_transport.c
8428 +@@ -320,6 +320,11 @@ static void hvs_close_connection(struct vmbus_channel *chan)
8429 + lock_sock(sk);
8430 + hvs_do_close_lock_held(vsock_sk(sk), true);
8431 + release_sock(sk);
8432 ++
8433 ++ /* Release the refcnt for the channel that's opened in
8434 ++ * hvs_open_connection().
8435 ++ */
8436 ++ sock_put(sk);
8437 + }
8438 +
8439 + static void hvs_open_connection(struct vmbus_channel *chan)
8440 +@@ -388,6 +393,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
8441 + }
8442 +
8443 + set_per_channel_state(chan, conn_from_host ? new : sk);
8444 ++
8445 ++ /* This reference will be dropped by hvs_close_connection(). */
8446 ++ sock_hold(conn_from_host ? new : sk);
8447 + vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
8448 +
8449 + /* Set the pending send size to max packet size to always get
8450 +diff --git a/net/wireless/core.c b/net/wireless/core.c
8451 +index 2a46ec3cb72c..68660781aa51 100644
8452 +--- a/net/wireless/core.c
8453 ++++ b/net/wireless/core.c
8454 +@@ -1335,10 +1335,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
8455 + }
8456 + break;
8457 + case NETDEV_PRE_UP:
8458 +- if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)) &&
8459 +- !(wdev->iftype == NL80211_IFTYPE_AP_VLAN &&
8460 +- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP &&
8461 +- wdev->use_4addr))
8462 ++ if (!cfg80211_iftype_allowed(wdev->wiphy, wdev->iftype,
8463 ++ wdev->use_4addr, 0))
8464 + return notifier_from_errno(-EOPNOTSUPP);
8465 +
8466 + if (rfkill_blocked(rdev->rfkill))
8467 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
8468 +index 8e2f03ab4cc9..2a85bff6a8f3 100644
8469 +--- a/net/wireless/nl80211.c
8470 ++++ b/net/wireless/nl80211.c
8471 +@@ -3210,9 +3210,7 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
8472 + return err;
8473 + }
8474 +
8475 +- if (!(rdev->wiphy.interface_modes & (1 << type)) &&
8476 +- !(type == NL80211_IFTYPE_AP_VLAN && params.use_4addr &&
8477 +- rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP))
8478 ++ if (!cfg80211_iftype_allowed(&rdev->wiphy, type, params.use_4addr, 0))
8479 + return -EOPNOTSUPP;
8480 +
8481 + err = nl80211_parse_mon_options(rdev, type, info, &params);
8482 +diff --git a/net/wireless/util.c b/net/wireless/util.c
8483 +index d57e2f679a3e..c14e8f6e5e19 100644
8484 +--- a/net/wireless/util.c
8485 ++++ b/net/wireless/util.c
8486 +@@ -1670,7 +1670,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
8487 + for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
8488 + num_interfaces += params->iftype_num[iftype];
8489 + if (params->iftype_num[iftype] > 0 &&
8490 +- !(wiphy->software_iftypes & BIT(iftype)))
8491 ++ !cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
8492 + used_iftypes |= BIT(iftype);
8493 + }
8494 +
8495 +@@ -1692,7 +1692,7 @@ int cfg80211_iter_combinations(struct wiphy *wiphy,
8496 + return -ENOMEM;
8497 +
8498 + for (iftype = 0; iftype < NUM_NL80211_IFTYPES; iftype++) {
8499 +- if (wiphy->software_iftypes & BIT(iftype))
8500 ++ if (cfg80211_iftype_allowed(wiphy, iftype, 0, 1))
8501 + continue;
8502 + for (j = 0; j < c->n_limits; j++) {
8503 + all_iftypes |= limits[j].types;
8504 +@@ -1895,3 +1895,26 @@ EXPORT_SYMBOL(rfc1042_header);
8505 + const unsigned char bridge_tunnel_header[] __aligned(2) =
8506 + { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
8507 + EXPORT_SYMBOL(bridge_tunnel_header);
8508 ++
8509 ++bool cfg80211_iftype_allowed(struct wiphy *wiphy, enum nl80211_iftype iftype,
8510 ++ bool is_4addr, u8 check_swif)
8511 ++
8512 ++{
8513 ++ bool is_vlan = iftype == NL80211_IFTYPE_AP_VLAN;
8514 ++
8515 ++ switch (check_swif) {
8516 ++ case 0:
8517 ++ if (is_vlan && is_4addr)
8518 ++ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
8519 ++ return wiphy->interface_modes & BIT(iftype);
8520 ++ case 1:
8521 ++ if (!(wiphy->software_iftypes & BIT(iftype)) && is_vlan)
8522 ++ return wiphy->flags & WIPHY_FLAG_4ADDR_AP;
8523 ++ return wiphy->software_iftypes & BIT(iftype);
8524 ++ default:
8525 ++ break;
8526 ++ }
8527 ++
8528 ++ return false;
8529 ++}
8530 ++EXPORT_SYMBOL(cfg80211_iftype_allowed);
8531 +diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
8532 +index c4a9ddb174bc..5aa75a0a1ced 100755
8533 +--- a/scripts/decode_stacktrace.sh
8534 ++++ b/scripts/decode_stacktrace.sh
8535 +@@ -78,7 +78,7 @@ parse_symbol() {
8536 + fi
8537 +
8538 + # Strip out the base of the path
8539 +- code=${code//^$basepath/""}
8540 ++ code=${code#$basepath/}
8541 +
8542 + # In the case of inlines, move everything to same line
8543 + code=${code//$'\n'/' '}
8544 +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
8545 +index 088ea2ac8570..612f737cee83 100644
8546 +--- a/security/apparmor/policy_unpack.c
8547 ++++ b/security/apparmor/policy_unpack.c
8548 +@@ -223,16 +223,21 @@ static void *kvmemdup(const void *src, size_t len)
8549 + static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
8550 + {
8551 + size_t size = 0;
8552 ++ void *pos = e->pos;
8553 +
8554 + if (!inbounds(e, sizeof(u16)))
8555 +- return 0;
8556 ++ goto fail;
8557 + size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
8558 + e->pos += sizeof(__le16);
8559 + if (!inbounds(e, size))
8560 +- return 0;
8561 ++ goto fail;
8562 + *chunk = e->pos;
8563 + e->pos += size;
8564 + return size;
8565 ++
8566 ++fail:
8567 ++ e->pos = pos;
8568 ++ return 0;
8569 + }
8570 +
8571 + /* unpack control byte */
8572 +@@ -294,49 +299,66 @@ fail:
8573 +
8574 + static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
8575 + {
8576 ++ void *pos = e->pos;
8577 ++
8578 + if (unpack_nameX(e, AA_U32, name)) {
8579 + if (!inbounds(e, sizeof(u32)))
8580 +- return 0;
8581 ++ goto fail;
8582 + if (data)
8583 + *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
8584 + e->pos += sizeof(u32);
8585 + return 1;
8586 + }
8587 ++
8588 ++fail:
8589 ++ e->pos = pos;
8590 + return 0;
8591 + }
8592 +
8593 + static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
8594 + {
8595 ++ void *pos = e->pos;
8596 ++
8597 + if (unpack_nameX(e, AA_U64, name)) {
8598 + if (!inbounds(e, sizeof(u64)))
8599 +- return 0;
8600 ++ goto fail;
8601 + if (data)
8602 + *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
8603 + e->pos += sizeof(u64);
8604 + return 1;
8605 + }
8606 ++
8607 ++fail:
8608 ++ e->pos = pos;
8609 + return 0;
8610 + }
8611 +
8612 + static size_t unpack_array(struct aa_ext *e, const char *name)
8613 + {
8614 ++ void *pos = e->pos;
8615 ++
8616 + if (unpack_nameX(e, AA_ARRAY, name)) {
8617 + int size;
8618 + if (!inbounds(e, sizeof(u16)))
8619 +- return 0;
8620 ++ goto fail;
8621 + size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
8622 + e->pos += sizeof(u16);
8623 + return size;
8624 + }
8625 ++
8626 ++fail:
8627 ++ e->pos = pos;
8628 + return 0;
8629 + }
8630 +
8631 + static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
8632 + {
8633 ++ void *pos = e->pos;
8634 ++
8635 + if (unpack_nameX(e, AA_BLOB, name)) {
8636 + u32 size;
8637 + if (!inbounds(e, sizeof(u32)))
8638 +- return 0;
8639 ++ goto fail;
8640 + size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
8641 + e->pos += sizeof(u32);
8642 + if (inbounds(e, (size_t) size)) {
8643 +@@ -345,6 +367,9 @@ static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
8644 + return size;
8645 + }
8646 + }
8647 ++
8648 ++fail:
8649 ++ e->pos = pos;
8650 + return 0;
8651 + }
8652 +
8653 +@@ -361,9 +386,10 @@ static int unpack_str(struct aa_ext *e, const char **string, const char *name)
8654 + if (src_str[size - 1] != 0)
8655 + goto fail;
8656 + *string = src_str;
8657 ++
8658 ++ return size;
8659 + }
8660 + }
8661 +- return size;
8662 +
8663 + fail:
8664 + e->pos = pos;
8665 +diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
8666 +index b9a6b66aeb0e..d8ba3a6d5042 100644
8667 +--- a/sound/pci/hda/hda_auto_parser.c
8668 ++++ b/sound/pci/hda/hda_auto_parser.c
8669 +@@ -828,6 +828,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
8670 + while (id >= 0) {
8671 + const struct hda_fixup *fix = codec->fixup_list + id;
8672 +
8673 ++ if (++depth > 10)
8674 ++ break;
8675 + if (fix->chained_before)
8676 + apply_fixup(codec, fix->chain_id, action, depth + 1);
8677 +
8678 +@@ -867,8 +869,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
8679 + }
8680 + if (!fix->chained || fix->chained_before)
8681 + break;
8682 +- if (++depth > 10)
8683 +- break;
8684 + id = fix->chain_id;
8685 + }
8686 + }
8687 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
8688 +index a6233775e779..82b0dc9f528f 100644
8689 +--- a/sound/pci/hda/hda_codec.c
8690 ++++ b/sound/pci/hda/hda_codec.c
8691 +@@ -2947,15 +2947,19 @@ static int hda_codec_runtime_resume(struct device *dev)
8692 + #ifdef CONFIG_PM_SLEEP
8693 + static int hda_codec_force_resume(struct device *dev)
8694 + {
8695 ++ struct hda_codec *codec = dev_to_hda_codec(dev);
8696 ++ bool forced_resume = !codec->relaxed_resume;
8697 + int ret;
8698 +
8699 + /* The get/put pair below enforces the runtime resume even if the
8700 + * device hasn't been used at suspend time. This trick is needed to
8701 + * update the jack state change during the sleep.
8702 + */
8703 +- pm_runtime_get_noresume(dev);
8704 ++ if (forced_resume)
8705 ++ pm_runtime_get_noresume(dev);
8706 + ret = pm_runtime_force_resume(dev);
8707 +- pm_runtime_put(dev);
8708 ++ if (forced_resume)
8709 ++ pm_runtime_put(dev);
8710 + return ret;
8711 + }
8712 +
8713 +diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
8714 +index acacc1900265..2003403ce1c8 100644
8715 +--- a/sound/pci/hda/hda_codec.h
8716 ++++ b/sound/pci/hda/hda_codec.h
8717 +@@ -261,6 +261,8 @@ struct hda_codec {
8718 + unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
8719 + unsigned int force_pin_prefix:1; /* Add location prefix */
8720 + unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
8721 ++ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
8722 ++
8723 + #ifdef CONFIG_PM
8724 + unsigned long power_on_acct;
8725 + unsigned long power_off_acct;
8726 +diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
8727 +index bb2bd33b00ec..2609161707a4 100644
8728 +--- a/sound/pci/hda/hda_generic.c
8729 ++++ b/sound/pci/hda/hda_generic.c
8730 +@@ -5991,7 +5991,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
8731 + if (spec->init_hook)
8732 + spec->init_hook(codec);
8733 +
8734 +- snd_hda_apply_verbs(codec);
8735 ++ if (!spec->skip_verbs)
8736 ++ snd_hda_apply_verbs(codec);
8737 +
8738 + init_multi_out(codec);
8739 + init_extra_out(codec);
8740 +diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
8741 +index ce9c293717b9..8933c0f64cc4 100644
8742 +--- a/sound/pci/hda/hda_generic.h
8743 ++++ b/sound/pci/hda/hda_generic.h
8744 +@@ -247,6 +247,7 @@ struct hda_gen_spec {
8745 + unsigned int indep_hp_enabled:1; /* independent HP enabled */
8746 + unsigned int have_aamix_ctl:1;
8747 + unsigned int hp_mic_jack_modes:1;
8748 ++ unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
8749 +
8750 + /* additional mute flags (only effective with auto_mute_via_amp=1) */
8751 + u64 mute_bits;
8752 +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
8753 +index 7a3e34b120b3..c3e3d80ff720 100644
8754 +--- a/sound/pci/hda/hda_intel.c
8755 ++++ b/sound/pci/hda/hda_intel.c
8756 +@@ -329,13 +329,11 @@ enum {
8757 +
8758 + #define AZX_DCAPS_INTEL_SKYLAKE \
8759 + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
8760 ++ AZX_DCAPS_SYNC_WRITE |\
8761 + AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
8762 + AZX_DCAPS_I915_POWERWELL)
8763 +
8764 +-#define AZX_DCAPS_INTEL_BROXTON \
8765 +- (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\
8766 +- AZX_DCAPS_SEPARATE_STREAM_TAG | AZX_DCAPS_I915_COMPONENT |\
8767 +- AZX_DCAPS_I915_POWERWELL)
8768 ++#define AZX_DCAPS_INTEL_BROXTON AZX_DCAPS_INTEL_SKYLAKE
8769 +
8770 + /* quirks for ATI SB / AMD Hudson */
8771 + #define AZX_DCAPS_PRESET_ATI_SB \
8772 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
8773 +index 35931a18418f..e4fbfb5557ab 100644
8774 +--- a/sound/pci/hda/patch_hdmi.c
8775 ++++ b/sound/pci/hda/patch_hdmi.c
8776 +@@ -2293,8 +2293,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
8777 + struct hdmi_spec *spec = codec->spec;
8778 + int pin_idx, pcm_idx;
8779 +
8780 +- if (codec_has_acomp(codec))
8781 ++ if (codec_has_acomp(codec)) {
8782 + snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
8783 ++ codec->relaxed_resume = 0;
8784 ++ }
8785 +
8786 + for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
8787 + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
8788 +@@ -2550,6 +2552,8 @@ static void register_i915_notifier(struct hda_codec *codec)
8789 + spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
8790 + snd_hdac_acomp_register_notifier(&codec->bus->core,
8791 + &spec->drm_audio_ops);
8792 ++ /* no need for forcible resume for jack check thanks to notifier */
8793 ++ codec->relaxed_resume = 1;
8794 + }
8795 +
8796 + /* setup_stream ops override for HSW+ */
8797 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8798 +index 9b5caf099bfb..7f74ebee8c2d 100644
8799 +--- a/sound/pci/hda/patch_realtek.c
8800 ++++ b/sound/pci/hda/patch_realtek.c
8801 +@@ -836,9 +836,11 @@ static int alc_init(struct hda_codec *codec)
8802 + if (spec->init_hook)
8803 + spec->init_hook(codec);
8804 +
8805 ++ spec->gen.skip_verbs = 1; /* applied in below */
8806 + snd_hda_gen_init(codec);
8807 + alc_fix_pll(codec);
8808 + alc_auto_init_amp(codec, spec->init_amp);
8809 ++ snd_hda_apply_verbs(codec); /* apply verbs here after own init */
8810 +
8811 + snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
8812 +
8813 +@@ -5673,6 +5675,7 @@ enum {
8814 + ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
8815 + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
8816 + ALC299_FIXUP_PREDATOR_SPK,
8817 ++ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
8818 + };
8819 +
8820 + static const struct hda_fixup alc269_fixups[] = {
8821 +@@ -6701,6 +6704,16 @@ static const struct hda_fixup alc269_fixups[] = {
8822 + { }
8823 + }
8824 + },
8825 ++ [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
8826 ++ .type = HDA_FIXUP_PINS,
8827 ++ .v.pins = (const struct hda_pintbl[]) {
8828 ++ { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
8829 ++ { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
8830 ++ { }
8831 ++ },
8832 ++ .chained = true,
8833 ++ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
8834 ++ },
8835 + };
8836 +
8837 + static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8838 +@@ -6843,6 +6856,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8839 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
8840 + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8841 + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8842 ++ SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
8843 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
8844 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
8845 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8846 +@@ -6859,6 +6873,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8847 + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
8848 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
8849 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
8850 ++ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
8851 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
8852 + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
8853 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
8854 +@@ -6936,6 +6951,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8855 + SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
8856 + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
8857 + SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
8858 ++ SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
8859 + SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
8860 + SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
8861 + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
8862 +@@ -8798,6 +8814,7 @@ static int patch_alc680(struct hda_codec *codec)
8863 + static const struct hda_device_id snd_hda_id_realtek[] = {
8864 + HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
8865 + HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
8866 ++ HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
8867 + HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
8868 + HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
8869 + HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
8870 +diff --git a/tools/testing/selftests/net/fib_rule_tests.sh b/tools/testing/selftests/net/fib_rule_tests.sh
8871 +index 1ba069967fa2..ba2d9fab28d0 100755
8872 +--- a/tools/testing/selftests/net/fib_rule_tests.sh
8873 ++++ b/tools/testing/selftests/net/fib_rule_tests.sh
8874 +@@ -15,6 +15,7 @@ GW_IP6=2001:db8:1::2
8875 + SRC_IP6=2001:db8:1::3
8876 +
8877 + DEV_ADDR=192.51.100.1
8878 ++DEV_ADDR6=2001:db8:1::1
8879 + DEV=dummy0
8880 +
8881 + log_test()
8882 +@@ -55,8 +56,8 @@ setup()
8883 +
8884 + $IP link add dummy0 type dummy
8885 + $IP link set dev dummy0 up
8886 +- $IP address add 192.51.100.1/24 dev dummy0
8887 +- $IP -6 address add 2001:db8:1::1/64 dev dummy0
8888 ++ $IP address add $DEV_ADDR/24 dev dummy0
8889 ++ $IP -6 address add $DEV_ADDR6/64 dev dummy0
8890 +
8891 + set +e
8892 + }
8893 +diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
8894 +index b20b751286fc..757a17f5ebde 100644
8895 +--- a/virt/kvm/eventfd.c
8896 ++++ b/virt/kvm/eventfd.c
8897 +@@ -44,6 +44,12 @@
8898 +
8899 + static struct workqueue_struct *irqfd_cleanup_wq;
8900 +
8901 ++bool __attribute__((weak))
8902 ++kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args)
8903 ++{
8904 ++ return true;
8905 ++}
8906 ++
8907 + static void
8908 + irqfd_inject(struct work_struct *work)
8909 + {
8910 +@@ -297,6 +303,9 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
8911 + if (!kvm_arch_intc_initialized(kvm))
8912 + return -EAGAIN;
8913 +
8914 ++ if (!kvm_arch_irqfd_allowed(kvm, args))
8915 ++ return -EINVAL;
8916 ++
8917 + irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
8918 + if (!irqfd)
8919 + return -ENOMEM;