Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Wed, 19 Sep 2018 22:41:28
Message-Id: 1537396872.24c320725e8df6e42f0e4ae6d28f333ece085a4e.mpagano@gentoo
1 commit: 24c320725e8df6e42f0e4ae6d28f333ece085a4e
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 19 22:41:12 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Sep 19 22:41:12 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=24c32072
7
8 Linux patch 4.18.9
9
10 0000_README | 4 +
11 1008_linux-4.18.9.patch | 5298 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 5302 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 597262e..6534d27 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -75,6 +75,10 @@ Patch: 1007_linux-4.18.8.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.18.8
21
22 +Patch: 1008_linux-4.18.9.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.18.9
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1008_linux-4.18.9.patch b/1008_linux-4.18.9.patch
31 new file mode 100644
32 index 0000000..877b17a
33 --- /dev/null
34 +++ b/1008_linux-4.18.9.patch
35 @@ -0,0 +1,5298 @@
36 +diff --git a/Makefile b/Makefile
37 +index 0d73431f66cd..1178348fb9ca 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,7 +1,7 @@
41 + # SPDX-License-Identifier: GPL-2.0
42 + VERSION = 4
43 + PATCHLEVEL = 18
44 +-SUBLEVEL = 8
45 ++SUBLEVEL = 9
46 + EXTRAVERSION =
47 + NAME = Merciless Moray
48 +
49 +diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
50 +index 47b74fbc403c..37bafd44e36d 100644
51 +--- a/arch/arc/boot/dts/axs10x_mb.dtsi
52 ++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
53 +@@ -9,6 +9,10 @@
54 + */
55 +
56 + / {
57 ++ aliases {
58 ++ ethernet = &gmac;
59 ++ };
60 ++
61 + axs10x_mb {
62 + compatible = "simple-bus";
63 + #address-cells = <1>;
64 +@@ -68,7 +72,7 @@
65 + };
66 + };
67 +
68 +- ethernet@0x18000 {
69 ++ gmac: ethernet@0x18000 {
70 + #interrupt-cells = <1>;
71 + compatible = "snps,dwmac";
72 + reg = < 0x18000 0x2000 >;
73 +@@ -81,6 +85,7 @@
74 + max-speed = <100>;
75 + resets = <&creg_rst 5>;
76 + reset-names = "stmmaceth";
77 ++ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
78 + };
79 +
80 + ehci@0x40000 {
81 +diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
82 +index 006aa3de5348..d00f283094d3 100644
83 +--- a/arch/arc/boot/dts/hsdk.dts
84 ++++ b/arch/arc/boot/dts/hsdk.dts
85 +@@ -25,6 +25,10 @@
86 + bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
87 + };
88 +
89 ++ aliases {
90 ++ ethernet = &gmac;
91 ++ };
92 ++
93 + cpus {
94 + #address-cells = <1>;
95 + #size-cells = <0>;
96 +@@ -163,7 +167,7 @@
97 + #clock-cells = <0>;
98 + };
99 +
100 +- ethernet@8000 {
101 ++ gmac: ethernet@8000 {
102 + #interrupt-cells = <1>;
103 + compatible = "snps,dwmac";
104 + reg = <0x8000 0x2000>;
105 +@@ -176,6 +180,7 @@
106 + phy-handle = <&phy0>;
107 + resets = <&cgu_rst HSDK_ETH_RESET>;
108 + reset-names = "stmmaceth";
109 ++ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
110 +
111 + mdio {
112 + #address-cells = <1>;
113 +diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
114 +index a635ea972304..df848c44dacd 100644
115 +--- a/arch/arc/configs/axs101_defconfig
116 ++++ b/arch/arc/configs/axs101_defconfig
117 +@@ -1,5 +1,4 @@
118 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
119 +-# CONFIG_SWAP is not set
120 + CONFIG_SYSVIPC=y
121 + CONFIG_POSIX_MQUEUE=y
122 + # CONFIG_CROSS_MEMORY_ATTACH is not set
123 +diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
124 +index aa507e423075..bcbdc0494faa 100644
125 +--- a/arch/arc/configs/axs103_defconfig
126 ++++ b/arch/arc/configs/axs103_defconfig
127 +@@ -1,5 +1,4 @@
128 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
129 +-# CONFIG_SWAP is not set
130 + CONFIG_SYSVIPC=y
131 + CONFIG_POSIX_MQUEUE=y
132 + # CONFIG_CROSS_MEMORY_ATTACH is not set
133 +diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
134 +index eba07f468654..d145bce7ebdf 100644
135 +--- a/arch/arc/configs/axs103_smp_defconfig
136 ++++ b/arch/arc/configs/axs103_smp_defconfig
137 +@@ -1,5 +1,4 @@
138 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
139 +-# CONFIG_SWAP is not set
140 + CONFIG_SYSVIPC=y
141 + CONFIG_POSIX_MQUEUE=y
142 + # CONFIG_CROSS_MEMORY_ATTACH is not set
143 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
144 +index d496ef579859..ca46153d7915 100644
145 +--- a/arch/arm64/kvm/hyp/switch.c
146 ++++ b/arch/arm64/kvm/hyp/switch.c
147 +@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
148 + val = read_sysreg(cpacr_el1);
149 + val |= CPACR_EL1_TTA;
150 + val &= ~CPACR_EL1_ZEN;
151 +- if (!update_fp_enabled(vcpu))
152 ++ if (!update_fp_enabled(vcpu)) {
153 + val &= ~CPACR_EL1_FPEN;
154 ++ __activate_traps_fpsimd32(vcpu);
155 ++ }
156 +
157 + write_sysreg(val, cpacr_el1);
158 +
159 +@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
160 +
161 + val = CPTR_EL2_DEFAULT;
162 + val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
163 +- if (!update_fp_enabled(vcpu))
164 ++ if (!update_fp_enabled(vcpu)) {
165 + val |= CPTR_EL2_TFP;
166 ++ __activate_traps_fpsimd32(vcpu);
167 ++ }
168 +
169 + write_sysreg(val, cptr_el2);
170 + }
171 +@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
172 + if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
173 + write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
174 +
175 +- __activate_traps_fpsimd32(vcpu);
176 + if (has_vhe())
177 + activate_traps_vhe(vcpu);
178 + else
179 +diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
180 +index 4f33dbc67348..7096915f26e0 100644
181 +--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
182 ++++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
183 +@@ -184,7 +184,7 @@
184 + #address-cells = <1>;
185 + #size-cells = <0>;
186 + compatible = "mscc,ocelot-miim";
187 +- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
188 ++ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
189 + interrupts = <14>;
190 + status = "disabled";
191 +
192 +diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
193 +index 8505db478904..1d92efb82c37 100644
194 +--- a/arch/mips/cavium-octeon/octeon-platform.c
195 ++++ b/arch/mips/cavium-octeon/octeon-platform.c
196 +@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
197 + return 0;
198 +
199 + pd = of_find_device_by_node(ehci_node);
200 ++ of_node_put(ehci_node);
201 + if (!pd)
202 + return 0;
203 +
204 +@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
205 + return 0;
206 +
207 + pd = of_find_device_by_node(ohci_node);
208 ++ of_node_put(ohci_node);
209 + if (!pd)
210 + return 0;
211 +
212 +diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
213 +index 5ba6fcc26fa7..94a78dbbc91f 100644
214 +--- a/arch/mips/generic/init.c
215 ++++ b/arch/mips/generic/init.c
216 +@@ -204,6 +204,7 @@ void __init arch_init_irq(void)
217 + "mti,cpu-interrupt-controller");
218 + if (!cpu_has_veic && !intc_node)
219 + mips_cpu_irq_init();
220 ++ of_node_put(intc_node);
221 +
222 + irqchip_init();
223 + }
224 +diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
225 +index cea8ad864b3f..57b34257be2b 100644
226 +--- a/arch/mips/include/asm/io.h
227 ++++ b/arch/mips/include/asm/io.h
228 +@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
229 + /*
230 + * ISA I/O bus memory addresses are 1:1 with the physical address.
231 + */
232 +-static inline unsigned long isa_virt_to_bus(volatile void * address)
233 ++static inline unsigned long isa_virt_to_bus(volatile void *address)
234 + {
235 +- return (unsigned long)address - PAGE_OFFSET;
236 ++ return virt_to_phys(address);
237 + }
238 +
239 +-static inline void * isa_bus_to_virt(unsigned long address)
240 ++static inline void *isa_bus_to_virt(unsigned long address)
241 + {
242 +- return (void *)(address + PAGE_OFFSET);
243 ++ return phys_to_virt(address);
244 + }
245 +
246 + #define isa_page_to_bus page_to_phys
247 +diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
248 +index 019035d7225c..8f845f6e5f42 100644
249 +--- a/arch/mips/kernel/vdso.c
250 ++++ b/arch/mips/kernel/vdso.c
251 +@@ -13,6 +13,7 @@
252 + #include <linux/err.h>
253 + #include <linux/init.h>
254 + #include <linux/ioport.h>
255 ++#include <linux/kernel.h>
256 + #include <linux/mm.h>
257 + #include <linux/sched.h>
258 + #include <linux/slab.h>
259 +@@ -20,6 +21,7 @@
260 +
261 + #include <asm/abi.h>
262 + #include <asm/mips-cps.h>
263 ++#include <asm/page.h>
264 + #include <asm/vdso.h>
265 +
266 + /* Kernel-provided data used by the VDSO. */
267 +@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
268 + vvar_size = gic_size + PAGE_SIZE;
269 + size = vvar_size + image->size;
270 +
271 ++ /*
272 ++ * Find a region that's large enough for us to perform the
273 ++ * colour-matching alignment below.
274 ++ */
275 ++ if (cpu_has_dc_aliases)
276 ++ size += shm_align_mask + 1;
277 ++
278 + base = get_unmapped_area(NULL, 0, size, 0, 0);
279 + if (IS_ERR_VALUE(base)) {
280 + ret = base;
281 + goto out;
282 + }
283 +
284 ++ /*
285 ++ * If we suffer from dcache aliasing, ensure that the VDSO data page
286 ++ * mapping is coloured the same as the kernel's mapping of that memory.
287 ++ * This ensures that when the kernel updates the VDSO data userland
288 ++ * will observe it without requiring cache invalidations.
289 ++ */
290 ++ if (cpu_has_dc_aliases) {
291 ++ base = __ALIGN_MASK(base, shm_align_mask);
292 ++ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
293 ++ }
294 ++
295 + data_addr = base + gic_size;
296 + vdso_addr = data_addr + PAGE_SIZE;
297 +
298 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
299 +index e12dfa48b478..a5893b2cdc0e 100644
300 +--- a/arch/mips/mm/c-r4k.c
301 ++++ b/arch/mips/mm/c-r4k.c
302 +@@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
303 + static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
304 + {
305 + /* Catch bad driver code */
306 +- BUG_ON(size == 0);
307 ++ if (WARN_ON(size == 0))
308 ++ return;
309 +
310 + preempt_disable();
311 + if (cpu_has_inclusive_pcaches) {
312 +@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
313 + static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
314 + {
315 + /* Catch bad driver code */
316 +- BUG_ON(size == 0);
317 ++ if (WARN_ON(size == 0))
318 ++ return;
319 +
320 + preempt_disable();
321 + if (cpu_has_inclusive_pcaches) {
322 +diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
323 +index 01ee40f11f3a..76234a14b97d 100644
324 +--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
325 ++++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
326 +@@ -9,6 +9,7 @@
327 +
328 + #include <linux/slab.h>
329 + #include <linux/cpumask.h>
330 ++#include <linux/kmemleak.h>
331 + #include <linux/percpu.h>
332 +
333 + struct vmemmap_backing {
334 +@@ -82,6 +83,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
335 +
336 + pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
337 + pgtable_gfp_flags(mm, GFP_KERNEL));
338 ++ /*
339 ++ * Don't scan the PGD for pointers, it contains references to PUDs but
340 ++ * those references are not full pointers and so can't be recognised by
341 ++ * kmemleak.
342 ++ */
343 ++ kmemleak_no_scan(pgd);
344 ++
345 + /*
346 + * With hugetlb, we don't clear the second half of the page table.
347 + * If we share the same slab cache with the pmd or pud level table,
348 +@@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
349 +
350 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
351 + {
352 +- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
353 +- pgtable_gfp_flags(mm, GFP_KERNEL));
354 ++ pud_t *pud;
355 ++
356 ++ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
357 ++ pgtable_gfp_flags(mm, GFP_KERNEL));
358 ++ /*
359 ++ * Tell kmemleak to ignore the PUD, that means don't scan it for
360 ++ * pointers and don't consider it a leak. PUDs are typically only
361 ++ * referred to by their PGD, but kmemleak is not able to recognise those
362 ++ * as pointers, leading to false leak reports.
363 ++ */
364 ++ kmemleak_ignore(pud);
365 ++
366 ++ return pud;
367 + }
368 +
369 + static inline void pud_free(struct mm_struct *mm, pud_t *pud)
370 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
371 +index 176f911ee983..7efc42538ccf 100644
372 +--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
373 ++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
374 +@@ -738,10 +738,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
375 + gpa, shift);
376 + kvmppc_radix_tlbie_page(kvm, gpa, shift);
377 + if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
378 +- unsigned long npages = 1;
379 ++ unsigned long psize = PAGE_SIZE;
380 + if (shift)
381 +- npages = 1ul << (shift - PAGE_SHIFT);
382 +- kvmppc_update_dirty_map(memslot, gfn, npages);
383 ++ psize = 1ul << shift;
384 ++ kvmppc_update_dirty_map(memslot, gfn, psize);
385 + }
386 + }
387 + return 0;
388 +diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
389 +index 81b2cbce7df8..7c324eff2f22 100644
390 +--- a/arch/powerpc/platforms/4xx/msi.c
391 ++++ b/arch/powerpc/platforms/4xx/msi.c
392 +@@ -146,13 +146,19 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
393 + const u32 *sdr_addr;
394 + dma_addr_t msi_phys;
395 + void *msi_virt;
396 ++ int err;
397 +
398 + sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
399 + if (!sdr_addr)
400 +- return -1;
401 ++ return -EINVAL;
402 +
403 +- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
404 +- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
405 ++ msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
406 ++ if (!msi_data)
407 ++ return -EINVAL;
408 ++
409 ++ msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
410 ++ if (!msi_mask)
411 ++ return -EINVAL;
412 +
413 + msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
414 + if (!msi->msi_dev)
415 +@@ -160,30 +166,30 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
416 +
417 + msi->msi_regs = of_iomap(msi->msi_dev, 0);
418 + if (!msi->msi_regs) {
419 +- dev_err(&dev->dev, "of_iomap problem failed\n");
420 +- return -ENOMEM;
421 ++ dev_err(&dev->dev, "of_iomap failed\n");
422 ++ err = -ENOMEM;
423 ++ goto node_put;
424 + }
425 + dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
426 + (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
427 +
428 + msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
429 +- if (!msi_virt)
430 +- return -ENOMEM;
431 ++ if (!msi_virt) {
432 ++ err = -ENOMEM;
433 ++ goto iounmap;
434 ++ }
435 + msi->msi_addr_hi = upper_32_bits(msi_phys);
436 + msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
437 + dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
438 + msi->msi_addr_hi, msi->msi_addr_lo);
439 +
440 ++ mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
441 ++ mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
442 ++
443 + /* Progam the Interrupt handler Termination addr registers */
444 + out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
445 + out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
446 +
447 +- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
448 +- if (!msi_data)
449 +- return -1;
450 +- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
451 +- if (!msi_mask)
452 +- return -1;
453 + /* Program MSI Expected data and Mask bits */
454 + out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
455 + out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
456 +@@ -191,6 +197,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
457 + dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
458 +
459 + return 0;
460 ++
461 ++iounmap:
462 ++ iounmap(msi->msi_regs);
463 ++node_put:
464 ++ of_node_put(msi->msi_dev);
465 ++ return err;
466 + }
467 +
468 + static int ppc4xx_of_msi_remove(struct platform_device *dev)
469 +@@ -209,7 +221,6 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
470 + msi_bitmap_free(&msi->bitmap);
471 + iounmap(msi->msi_regs);
472 + of_node_put(msi->msi_dev);
473 +- kfree(msi);
474 +
475 + return 0;
476 + }
477 +@@ -223,18 +234,16 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
478 +
479 + dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
480 +
481 +- msi = kzalloc(sizeof(*msi), GFP_KERNEL);
482 +- if (!msi) {
483 +- dev_err(&dev->dev, "No memory for MSI structure\n");
484 ++ msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
485 ++ if (!msi)
486 + return -ENOMEM;
487 +- }
488 + dev->dev.platform_data = msi;
489 +
490 + /* Get MSI ranges */
491 + err = of_address_to_resource(dev->dev.of_node, 0, &res);
492 + if (err) {
493 + dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
494 +- goto error_out;
495 ++ return err;
496 + }
497 +
498 + msi_irqs = of_irq_count(dev->dev.of_node);
499 +@@ -243,7 +252,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
500 +
501 + err = ppc4xx_setup_pcieh_hw(dev, res, msi);
502 + if (err)
503 +- goto error_out;
504 ++ return err;
505 +
506 + err = ppc4xx_msi_init_allocator(dev, msi);
507 + if (err) {
508 +@@ -256,7 +265,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
509 + phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
510 + phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
511 + }
512 +- return err;
513 ++ return 0;
514 +
515 + error_out:
516 + ppc4xx_of_msi_remove(dev);
517 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
518 +index 8cdf91f5d3a4..c773465b2c95 100644
519 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
520 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
521 +@@ -437,8 +437,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
522 + int i;
523 +
524 + for (i = 0; i < npu->mmio_atsd_count; i++) {
525 +- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
526 +- return i;
527 ++ if (!test_bit(i, &npu->mmio_atsd_usage))
528 ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
529 ++ return i;
530 + }
531 +
532 + return -ENOSPC;
533 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
534 +index 8a4868a3964b..cb098e962ffe 100644
535 +--- a/arch/powerpc/platforms/pseries/setup.c
536 ++++ b/arch/powerpc/platforms/pseries/setup.c
537 +@@ -647,6 +647,15 @@ void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
538 + }
539 + }
540 +
541 ++static void pseries_disable_sriov_resources(struct pci_dev *pdev)
542 ++{
543 ++ int i;
544 ++
545 ++ pci_warn(pdev, "No hypervisor support for SR-IOV on this device, IOV BARs disabled.\n");
546 ++ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
547 ++ pdev->resource[i + PCI_IOV_RESOURCES].flags = 0;
548 ++}
549 ++
550 + static void pseries_pci_fixup_resources(struct pci_dev *pdev)
551 + {
552 + const int *indexes;
553 +@@ -654,10 +663,10 @@ static void pseries_pci_fixup_resources(struct pci_dev *pdev)
554 +
555 + /*Firmware must support open sriov otherwise dont configure*/
556 + indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
557 +- if (!indexes)
558 +- return;
559 +- /* Assign the addresses from device tree*/
560 +- of_pci_set_vf_bar_size(pdev, indexes);
561 ++ if (indexes)
562 ++ of_pci_set_vf_bar_size(pdev, indexes);
563 ++ else
564 ++ pseries_disable_sriov_resources(pdev);
565 + }
566 +
567 + static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
568 +@@ -669,10 +678,10 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
569 + return;
570 + /*Firmware must support open sriov otherwise dont configure*/
571 + indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
572 +- if (!indexes)
573 +- return;
574 +- /* Assign the addresses from device tree*/
575 +- of_pci_parse_iov_addrs(pdev, indexes);
576 ++ if (indexes)
577 ++ of_pci_parse_iov_addrs(pdev, indexes);
578 ++ else
579 ++ pseries_disable_sriov_resources(pdev);
580 + }
581 +
582 + static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
583 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
584 +index 84c89cb9636f..cbdd8341f17e 100644
585 +--- a/arch/s390/kvm/vsie.c
586 ++++ b/arch/s390/kvm/vsie.c
587 +@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
588 + return set_validity_icpt(scb_s, 0x0039U);
589 +
590 + /* copy only the wrapping keys */
591 +- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
592 ++ if (read_guest_real(vcpu, crycb_addr + 72,
593 ++ vsie_page->crycb.dea_wrapping_key_mask, 56))
594 + return set_validity_icpt(scb_s, 0x0035U);
595 +
596 + scb_s->ecb3 |= ecb3_flags;
597 +diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
598 +index 395c9631e000..75f1e35e7c15 100644
599 +--- a/arch/x86/include/asm/kdebug.h
600 ++++ b/arch/x86/include/asm/kdebug.h
601 +@@ -22,10 +22,20 @@ enum die_val {
602 + DIE_NMIUNKNOWN,
603 + };
604 +
605 ++enum show_regs_mode {
606 ++ SHOW_REGS_SHORT,
607 ++ /*
608 ++ * For when userspace crashed, but we don't think it's our fault, and
609 ++ * therefore don't print kernel registers.
610 ++ */
611 ++ SHOW_REGS_USER,
612 ++ SHOW_REGS_ALL
613 ++};
614 ++
615 + extern void die(const char *, struct pt_regs *,long);
616 + extern int __must_check __die(const char *, struct pt_regs *, long);
617 + extern void show_stack_regs(struct pt_regs *regs);
618 +-extern void __show_regs(struct pt_regs *regs, int all);
619 ++extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
620 + extern void show_iret_regs(struct pt_regs *regs);
621 + extern unsigned long oops_begin(void);
622 + extern void oops_end(unsigned long, struct pt_regs *, int signr);
623 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
624 +index acebb808c4b5..0722b7745382 100644
625 +--- a/arch/x86/include/asm/kvm_host.h
626 ++++ b/arch/x86/include/asm/kvm_host.h
627 +@@ -1198,18 +1198,22 @@ enum emulation_result {
628 + #define EMULTYPE_NO_DECODE (1 << 0)
629 + #define EMULTYPE_TRAP_UD (1 << 1)
630 + #define EMULTYPE_SKIP (1 << 2)
631 +-#define EMULTYPE_RETRY (1 << 3)
632 +-#define EMULTYPE_NO_REEXECUTE (1 << 4)
633 +-#define EMULTYPE_NO_UD_ON_FAIL (1 << 5)
634 +-#define EMULTYPE_VMWARE (1 << 6)
635 ++#define EMULTYPE_ALLOW_RETRY (1 << 3)
636 ++#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)
637 ++#define EMULTYPE_VMWARE (1 << 5)
638 + int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
639 + int emulation_type, void *insn, int insn_len);
640 +
641 + static inline int emulate_instruction(struct kvm_vcpu *vcpu,
642 + int emulation_type)
643 + {
644 +- return x86_emulate_instruction(vcpu, 0,
645 +- emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
646 ++ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
647 ++}
648 ++
649 ++static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
650 ++ void *insn, int insn_len)
651 ++{
652 ++ return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
653 + }
654 +
655 + void kvm_enable_efer_bits(u64);
656 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
657 +index c9b773401fd8..21d1fa5eaa5f 100644
658 +--- a/arch/x86/kernel/apic/vector.c
659 ++++ b/arch/x86/kernel/apic/vector.c
660 +@@ -422,7 +422,7 @@ static int activate_managed(struct irq_data *irqd)
661 + if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
662 + /* Something in the core code broke! Survive gracefully */
663 + pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
664 +- return EINVAL;
665 ++ return -EINVAL;
666 + }
667 +
668 + ret = assign_managed_vector(irqd, vector_searchmask);
669 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
670 +index 0624957aa068..07b5fc00b188 100644
671 +--- a/arch/x86/kernel/cpu/microcode/amd.c
672 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
673 +@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
674 + struct microcode_amd *mc_amd;
675 + struct ucode_cpu_info *uci;
676 + struct ucode_patch *p;
677 ++ enum ucode_state ret;
678 + u32 rev, dummy;
679 +
680 + BUG_ON(raw_smp_processor_id() != cpu);
681 +@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
682 +
683 + /* need to apply patch? */
684 + if (rev >= mc_amd->hdr.patch_id) {
685 +- c->microcode = rev;
686 +- uci->cpu_sig.rev = rev;
687 +- return UCODE_OK;
688 ++ ret = UCODE_OK;
689 ++ goto out;
690 + }
691 +
692 + if (__apply_microcode_amd(mc_amd)) {
693 +@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
694 + cpu, mc_amd->hdr.patch_id);
695 + return UCODE_ERROR;
696 + }
697 +- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
698 +- mc_amd->hdr.patch_id);
699 +
700 +- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
701 +- c->microcode = mc_amd->hdr.patch_id;
702 ++ rev = mc_amd->hdr.patch_id;
703 ++ ret = UCODE_UPDATED;
704 ++
705 ++ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
706 +
707 +- return UCODE_UPDATED;
708 ++out:
709 ++ uci->cpu_sig.rev = rev;
710 ++ c->microcode = rev;
711 ++
712 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
713 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
714 ++ boot_cpu_data.microcode = rev;
715 ++
716 ++ return ret;
717 + }
718 +
719 + static int install_equiv_cpu_table(const u8 *buf)
720 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
721 +index 97ccf4c3b45b..16936a24795c 100644
722 +--- a/arch/x86/kernel/cpu/microcode/intel.c
723 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
724 +@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
725 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
726 + struct cpuinfo_x86 *c = &cpu_data(cpu);
727 + struct microcode_intel *mc;
728 ++ enum ucode_state ret;
729 + static int prev_rev;
730 + u32 rev;
731 +
732 +@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
733 + */
734 + rev = intel_get_microcode_revision();
735 + if (rev >= mc->hdr.rev) {
736 +- uci->cpu_sig.rev = rev;
737 +- c->microcode = rev;
738 +- return UCODE_OK;
739 ++ ret = UCODE_OK;
740 ++ goto out;
741 + }
742 +
743 + /*
744 +@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
745 + prev_rev = rev;
746 + }
747 +
748 ++ ret = UCODE_UPDATED;
749 ++
750 ++out:
751 + uci->cpu_sig.rev = rev;
752 +- c->microcode = rev;
753 ++ c->microcode = rev;
754 ++
755 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
756 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
757 ++ boot_cpu_data.microcode = rev;
758 +
759 +- return UCODE_UPDATED;
760 ++ return ret;
761 + }
762 +
763 + static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
764 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
765 +index 17b02adc79aa..0c5a9fc6e36d 100644
766 +--- a/arch/x86/kernel/dumpstack.c
767 ++++ b/arch/x86/kernel/dumpstack.c
768 +@@ -155,7 +155,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
769 + * they can be printed in the right context.
770 + */
771 + if (!partial && on_stack(info, regs, sizeof(*regs))) {
772 +- __show_regs(regs, 0);
773 ++ __show_regs(regs, SHOW_REGS_SHORT);
774 +
775 + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
776 + IRET_FRAME_SIZE)) {
777 +@@ -353,7 +353,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
778 + oops_exit();
779 +
780 + /* Executive summary in case the oops scrolled away */
781 +- __show_regs(&exec_summary_regs, true);
782 ++ __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
783 +
784 + if (!signr)
785 + return;
786 +@@ -416,14 +416,9 @@ void die(const char *str, struct pt_regs *regs, long err)
787 +
788 + void show_regs(struct pt_regs *regs)
789 + {
790 +- bool all = true;
791 +-
792 + show_regs_print_info(KERN_DEFAULT);
793 +
794 +- if (IS_ENABLED(CONFIG_X86_32))
795 +- all = !user_mode(regs);
796 +-
797 +- __show_regs(regs, all);
798 ++ __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
799 +
800 + /*
801 + * When in-kernel, we also print out the stack at the time of the fault..
802 +diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
803 +index 0ae659de21eb..666d1825390d 100644
804 +--- a/arch/x86/kernel/process_32.c
805 ++++ b/arch/x86/kernel/process_32.c
806 +@@ -59,7 +59,7 @@
807 + #include <asm/intel_rdt_sched.h>
808 + #include <asm/proto.h>
809 +
810 +-void __show_regs(struct pt_regs *regs, int all)
811 ++void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
812 + {
813 + unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
814 + unsigned long d0, d1, d2, d3, d6, d7;
815 +@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
816 + printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
817 + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
818 +
819 +- if (!all)
820 ++ if (mode != SHOW_REGS_ALL)
821 + return;
822 +
823 + cr0 = read_cr0();
824 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
825 +index 4344a032ebe6..0091a733c1cf 100644
826 +--- a/arch/x86/kernel/process_64.c
827 ++++ b/arch/x86/kernel/process_64.c
828 +@@ -62,7 +62,7 @@
829 + __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
830 +
831 + /* Prints also some state that isn't saved in the pt_regs */
832 +-void __show_regs(struct pt_regs *regs, int all)
833 ++void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
834 + {
835 + unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
836 + unsigned long d0, d1, d2, d3, d6, d7;
837 +@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
838 + printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
839 + regs->r13, regs->r14, regs->r15);
840 +
841 +- if (!all)
842 ++ if (mode == SHOW_REGS_SHORT)
843 + return;
844 +
845 ++ if (mode == SHOW_REGS_USER) {
846 ++ rdmsrl(MSR_FS_BASE, fs);
847 ++ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
848 ++ printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
849 ++ fs, shadowgs);
850 ++ return;
851 ++ }
852 ++
853 + asm("movl %%ds,%0" : "=r" (ds));
854 + asm("movl %%cs,%0" : "=r" (cs));
855 + asm("movl %%es,%0" : "=r" (es));
856 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
857 +index 42f1ba92622a..97d41754769e 100644
858 +--- a/arch/x86/kvm/mmu.c
859 ++++ b/arch/x86/kvm/mmu.c
860 +@@ -4960,7 +4960,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
861 + int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
862 + void *insn, int insn_len)
863 + {
864 +- int r, emulation_type = EMULTYPE_RETRY;
865 ++ int r, emulation_type = 0;
866 + enum emulation_result er;
867 + bool direct = vcpu->arch.mmu.direct_map;
868 +
869 +@@ -4973,10 +4973,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
870 + r = RET_PF_INVALID;
871 + if (unlikely(error_code & PFERR_RSVD_MASK)) {
872 + r = handle_mmio_page_fault(vcpu, cr2, direct);
873 +- if (r == RET_PF_EMULATE) {
874 +- emulation_type = 0;
875 ++ if (r == RET_PF_EMULATE)
876 + goto emulate;
877 +- }
878 + }
879 +
880 + if (r == RET_PF_INVALID) {
881 +@@ -5003,8 +5001,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
882 + return 1;
883 + }
884 +
885 +- if (mmio_info_in_cache(vcpu, cr2, direct))
886 +- emulation_type = 0;
887 ++ /*
888 ++ * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
889 ++ * optimistically try to just unprotect the page and let the processor
890 ++ * re-execute the instruction that caused the page fault. Do not allow
891 ++ * retrying MMIO emulation, as it's not only pointless but could also
892 ++ * cause us to enter an infinite loop because the processor will keep
893 ++ * faulting on the non-existent MMIO address. Retrying an instruction
894 ++ * from a nested guest is also pointless and dangerous as we are only
895 ++ * explicitly shadowing L1's page tables, i.e. unprotecting something
896 ++ * for L1 isn't going to magically fix whatever issue cause L2 to fail.
897 ++ */
898 ++ if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
899 ++ emulation_type = EMULTYPE_ALLOW_RETRY;
900 + emulate:
901 + /*
902 + * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
903 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
904 +index 9799f86388e7..ef772e5634d4 100644
905 +--- a/arch/x86/kvm/svm.c
906 ++++ b/arch/x86/kvm/svm.c
907 +@@ -3875,8 +3875,8 @@ static int emulate_on_interception(struct vcpu_svm *svm)
908 +
909 + static int rsm_interception(struct vcpu_svm *svm)
910 + {
911 +- return x86_emulate_instruction(&svm->vcpu, 0, 0,
912 +- rsm_ins_bytes, 2) == EMULATE_DONE;
913 ++ return kvm_emulate_instruction_from_buffer(&svm->vcpu,
914 ++ rsm_ins_bytes, 2) == EMULATE_DONE;
915 + }
916 +
917 + static int rdpmc_interception(struct vcpu_svm *svm)
918 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
919 +index 9869bfd0c601..d0c3be353bb6 100644
920 +--- a/arch/x86/kvm/vmx.c
921 ++++ b/arch/x86/kvm/vmx.c
922 +@@ -7539,8 +7539,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
923 + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
924 + return kvm_skip_emulated_instruction(vcpu);
925 + else
926 +- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
927 +- NULL, 0) == EMULATE_DONE;
928 ++ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
929 ++ EMULATE_DONE;
930 + }
931 +
932 + return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
933 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
934 +index 94cd63081471..97fcac34e007 100644
935 +--- a/arch/x86/kvm/x86.c
936 ++++ b/arch/x86/kvm/x86.c
937 +@@ -5810,7 +5810,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
938 + gpa_t gpa = cr2;
939 + kvm_pfn_t pfn;
940 +
941 +- if (emulation_type & EMULTYPE_NO_REEXECUTE)
942 ++ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
943 ++ return false;
944 ++
945 ++ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
946 + return false;
947 +
948 + if (!vcpu->arch.mmu.direct_map) {
949 +@@ -5898,7 +5901,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
950 + */
951 + vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
952 +
953 +- if (!(emulation_type & EMULTYPE_RETRY))
954 ++ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
955 ++ return false;
956 ++
957 ++ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
958 + return false;
959 +
960 + if (x86_page_table_writing_insn(ctxt))
961 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
962 +index d1f1612672c7..045338ac1667 100644
963 +--- a/arch/x86/mm/fault.c
964 ++++ b/arch/x86/mm/fault.c
965 +@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
966 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
967 + return -1;
968 +
969 +- WARN_ON_ONCE(in_nmi());
970 +-
971 + /*
972 + * Synchronize this task's top level page-table
973 + * with the 'reference' page table.
974 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
975 +index 58c6efa9f9a9..9fe5952d117d 100644
976 +--- a/block/bfq-cgroup.c
977 ++++ b/block/bfq-cgroup.c
978 +@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
979 +
980 + void bfqg_and_blkg_put(struct bfq_group *bfqg)
981 + {
982 +- bfqg_put(bfqg);
983 +-
984 + blkg_put(bfqg_to_blkg(bfqg));
985 ++
986 ++ bfqg_put(bfqg);
987 + }
988 +
989 + /* @stats = 0 */
990 +diff --git a/block/blk-core.c b/block/blk-core.c
991 +index 746a5eac4541..cbaca5a73f2e 100644
992 +--- a/block/blk-core.c
993 ++++ b/block/blk-core.c
994 +@@ -2161,9 +2161,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
995 + {
996 + const int op = bio_op(bio);
997 +
998 +- if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
999 ++ if (part->policy && op_is_write(op)) {
1000 + char b[BDEVNAME_SIZE];
1001 +
1002 ++ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
1003 ++ return false;
1004 ++
1005 + WARN_ONCE(1,
1006 + "generic_make_request: Trying to write "
1007 + "to read-only block-device %s (partno %d)\n",
1008 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
1009 +index d5f2c21d8531..816923bf874d 100644
1010 +--- a/block/blk-mq-tag.c
1011 ++++ b/block/blk-mq-tag.c
1012 +@@ -402,8 +402,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
1013 + if (tdepth <= tags->nr_reserved_tags)
1014 + return -EINVAL;
1015 +
1016 +- tdepth -= tags->nr_reserved_tags;
1017 +-
1018 + /*
1019 + * If we are allowed to grow beyond the original size, allocate
1020 + * a new set of tags before freeing the old one.
1021 +@@ -423,7 +421,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
1022 + if (tdepth > 16 * BLKDEV_MAX_RQ)
1023 + return -EINVAL;
1024 +
1025 +- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
1026 ++ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
1027 ++ tags->nr_reserved_tags);
1028 + if (!new)
1029 + return -ENOMEM;
1030 + ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
1031 +@@ -440,7 +439,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
1032 + * Don't need (or can't) update reserved tags here, they
1033 + * remain static and should never need resizing.
1034 + */
1035 +- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
1036 ++ sbitmap_queue_resize(&tags->bitmap_tags,
1037 ++ tdepth - tags->nr_reserved_tags);
1038 + }
1039 +
1040 + return 0;
1041 +diff --git a/block/partitions/aix.c b/block/partitions/aix.c
1042 +index 007f95eea0e1..903f3ed175d0 100644
1043 +--- a/block/partitions/aix.c
1044 ++++ b/block/partitions/aix.c
1045 +@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
1046 + u32 vgda_sector = 0;
1047 + u32 vgda_len = 0;
1048 + int numlvs = 0;
1049 +- struct pvd *pvd;
1050 ++ struct pvd *pvd = NULL;
1051 + struct lv_info {
1052 + unsigned short pps_per_lv;
1053 + unsigned short pps_found;
1054 +@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
1055 + if (lvip[i].pps_per_lv)
1056 + foundlvs += 1;
1057 + }
1058 ++ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
1059 ++ pvd = alloc_pvd(state, vgda_sector + 17);
1060 + }
1061 + put_dev_sector(sect);
1062 + }
1063 +- pvd = alloc_pvd(state, vgda_sector + 17);
1064 + if (pvd) {
1065 + int numpps = be16_to_cpu(pvd->pp_count);
1066 + int psn_part1 = be32_to_cpu(pvd->psn_part1);
1067 +@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
1068 + next_lp_ix += 1;
1069 + }
1070 + for (i = 0; i < state->limit; i += 1)
1071 +- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
1072 ++ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
1073 ++ char tmp[sizeof(n[i].name) + 1]; // null char
1074 ++
1075 ++ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
1076 + pr_warn("partition %s (%u pp's found) is "
1077 + "not contiguous\n",
1078 +- n[i].name, lvip[i].pps_found);
1079 ++ tmp, lvip[i].pps_found);
1080 ++ }
1081 + kfree(pvd);
1082 + }
1083 + kfree(n);
1084 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1085 +index 9706613eecf9..bf64cfa30feb 100644
1086 +--- a/drivers/acpi/acpi_lpss.c
1087 ++++ b/drivers/acpi/acpi_lpss.c
1088 +@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
1089 + #define LPSS_GPIODEF0_DMA_LLP BIT(13)
1090 +
1091 + static DEFINE_MUTEX(lpss_iosf_mutex);
1092 +-static bool lpss_iosf_d3_entered;
1093 ++static bool lpss_iosf_d3_entered = true;
1094 +
1095 + static void lpss_iosf_enter_d3_state(void)
1096 + {
1097 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
1098 +index 2628806c64a2..3d5277a39097 100644
1099 +--- a/drivers/android/binder_alloc.c
1100 ++++ b/drivers/android/binder_alloc.c
1101 +@@ -327,6 +327,35 @@ err_no_vma:
1102 + return vma ? -ENOMEM : -ESRCH;
1103 + }
1104 +
1105 ++
1106 ++static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
1107 ++ struct vm_area_struct *vma)
1108 ++{
1109 ++ if (vma)
1110 ++ alloc->vma_vm_mm = vma->vm_mm;
1111 ++ /*
1112 ++ * If we see alloc->vma is not NULL, buffer data structures set up
1113 ++ * completely. Look at smp_rmb side binder_alloc_get_vma.
1114 ++ * We also want to guarantee new alloc->vma_vm_mm is always visible
1115 ++ * if alloc->vma is set.
1116 ++ */
1117 ++ smp_wmb();
1118 ++ alloc->vma = vma;
1119 ++}
1120 ++
1121 ++static inline struct vm_area_struct *binder_alloc_get_vma(
1122 ++ struct binder_alloc *alloc)
1123 ++{
1124 ++ struct vm_area_struct *vma = NULL;
1125 ++
1126 ++ if (alloc->vma) {
1127 ++ /* Look at description in binder_alloc_set_vma */
1128 ++ smp_rmb();
1129 ++ vma = alloc->vma;
1130 ++ }
1131 ++ return vma;
1132 ++}
1133 ++
1134 + static struct binder_buffer *binder_alloc_new_buf_locked(
1135 + struct binder_alloc *alloc,
1136 + size_t data_size,
1137 +@@ -343,7 +372,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
1138 + size_t size, data_offsets_size;
1139 + int ret;
1140 +
1141 +- if (alloc->vma == NULL) {
1142 ++ if (!binder_alloc_get_vma(alloc)) {
1143 + pr_err("%d: binder_alloc_buf, no vma\n",
1144 + alloc->pid);
1145 + return ERR_PTR(-ESRCH);
1146 +@@ -714,9 +743,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
1147 + buffer->free = 1;
1148 + binder_insert_free_buffer(alloc, buffer);
1149 + alloc->free_async_space = alloc->buffer_size / 2;
1150 +- barrier();
1151 +- alloc->vma = vma;
1152 +- alloc->vma_vm_mm = vma->vm_mm;
1153 ++ binder_alloc_set_vma(alloc, vma);
1154 + mmgrab(alloc->vma_vm_mm);
1155 +
1156 + return 0;
1157 +@@ -743,10 +770,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
1158 + int buffers, page_count;
1159 + struct binder_buffer *buffer;
1160 +
1161 +- BUG_ON(alloc->vma);
1162 +-
1163 + buffers = 0;
1164 + mutex_lock(&alloc->mutex);
1165 ++ BUG_ON(alloc->vma);
1166 ++
1167 + while ((n = rb_first(&alloc->allocated_buffers))) {
1168 + buffer = rb_entry(n, struct binder_buffer, rb_node);
1169 +
1170 +@@ -889,7 +916,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1171 + */
1172 + void binder_alloc_vma_close(struct binder_alloc *alloc)
1173 + {
1174 +- WRITE_ONCE(alloc->vma, NULL);
1175 ++ binder_alloc_set_vma(alloc, NULL);
1176 + }
1177 +
1178 + /**
1179 +@@ -924,7 +951,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
1180 +
1181 + index = page - alloc->pages;
1182 + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1183 +- vma = alloc->vma;
1184 ++ vma = binder_alloc_get_vma(alloc);
1185 + if (vma) {
1186 + if (!mmget_not_zero(alloc->vma_vm_mm))
1187 + goto err_mmget;
1188 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
1189 +index 09620c2ffa0f..704a761f94b2 100644
1190 +--- a/drivers/ata/libahci.c
1191 ++++ b/drivers/ata/libahci.c
1192 +@@ -2107,7 +2107,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1193 + struct ahci_host_priv *hpriv = ap->host->private_data;
1194 + void __iomem *port_mmio = ahci_port_base(ap);
1195 + struct ata_device *dev = ap->link.device;
1196 +- u32 devslp, dm, dito, mdat, deto;
1197 ++ u32 devslp, dm, dito, mdat, deto, dito_conf;
1198 + int rc;
1199 + unsigned int err_mask;
1200 +
1201 +@@ -2131,8 +2131,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1202 + return;
1203 + }
1204 +
1205 +- /* device sleep was already enabled */
1206 +- if (devslp & PORT_DEVSLP_ADSE)
1207 ++ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
1208 ++ dito = devslp_idle_timeout / (dm + 1);
1209 ++ if (dito > 0x3ff)
1210 ++ dito = 0x3ff;
1211 ++
1212 ++ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
1213 ++
1214 ++ /* device sleep was already enabled and same dito */
1215 ++ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
1216 + return;
1217 +
1218 + /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
1219 +@@ -2140,11 +2147,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1220 + if (rc)
1221 + return;
1222 +
1223 +- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
1224 +- dito = devslp_idle_timeout / (dm + 1);
1225 +- if (dito > 0x3ff)
1226 +- dito = 0x3ff;
1227 +-
1228 + /* Use the nominal value 10 ms if the read MDAT is zero,
1229 + * the nominal value of DETO is 20 ms.
1230 + */
1231 +@@ -2162,6 +2164,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1232 + deto = 20;
1233 + }
1234 +
1235 ++ /* Make dito, mdat, deto bits to 0s */
1236 ++ devslp &= ~GENMASK_ULL(24, 2);
1237 + devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
1238 + (mdat << PORT_DEVSLP_MDAT_OFFSET) |
1239 + (deto << PORT_DEVSLP_DETO_OFFSET) |
1240 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
1241 +index f5e560188a18..622ab8edc035 100644
1242 +--- a/drivers/base/memory.c
1243 ++++ b/drivers/base/memory.c
1244 +@@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev,
1245 + struct zone *default_zone;
1246 + int nid;
1247 +
1248 +- /*
1249 +- * The block contains more than one zone can not be offlined.
1250 +- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
1251 +- */
1252 +- if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
1253 +- return sprintf(buf, "none\n");
1254 +-
1255 +- start_pfn = valid_start_pfn;
1256 +- nr_pages = valid_end_pfn - start_pfn;
1257 +-
1258 + /*
1259 + * Check the existing zone. Make sure that we do that only on the
1260 + * online nodes otherwise the page_zone is not reliable
1261 + */
1262 + if (mem->state == MEM_ONLINE) {
1263 ++ /*
1264 ++ * The block contains more than one zone can not be offlined.
1265 ++ * This can happen e.g. for ZONE_DMA and ZONE_DMA32
1266 ++ */
1267 ++ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
1268 ++ &valid_start_pfn, &valid_end_pfn))
1269 ++ return sprintf(buf, "none\n");
1270 ++ start_pfn = valid_start_pfn;
1271 + strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
1272 + goto out;
1273 + }
1274 +
1275 +- nid = pfn_to_nid(start_pfn);
1276 ++ nid = mem->nid;
1277 + default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
1278 + strcat(buf, default_zone->name);
1279 +
1280 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1281 +index 3fb95c8d9fd8..15a5ce5bba3d 100644
1282 +--- a/drivers/block/nbd.c
1283 ++++ b/drivers/block/nbd.c
1284 +@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1285 + case NBD_SET_SOCK:
1286 + return nbd_add_socket(nbd, arg, false);
1287 + case NBD_SET_BLKSIZE:
1288 ++ if (!arg || !is_power_of_2(arg) || arg < 512 ||
1289 ++ arg > PAGE_SIZE)
1290 ++ return -EINVAL;
1291 + nbd_size_set(nbd, arg,
1292 + div_s64(config->bytesize, arg));
1293 + return 0;
1294 +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
1295 +index b3f83cd96f33..01f59be71433 100644
1296 +--- a/drivers/block/pktcdvd.c
1297 ++++ b/drivers/block/pktcdvd.c
1298 +@@ -67,7 +67,7 @@
1299 + #include <scsi/scsi.h>
1300 + #include <linux/debugfs.h>
1301 + #include <linux/device.h>
1302 +-
1303 ++#include <linux/nospec.h>
1304 + #include <linux/uaccess.h>
1305 +
1306 + #define DRIVER_NAME "pktcdvd"
1307 +@@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
1308 + {
1309 + if (dev_minor >= MAX_WRITERS)
1310 + return NULL;
1311 ++
1312 ++ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
1313 + return pkt_devs[dev_minor];
1314 + }
1315 +
1316 +diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
1317 +index f3c643a0473c..5f953ca8ac5b 100644
1318 +--- a/drivers/bluetooth/Kconfig
1319 ++++ b/drivers/bluetooth/Kconfig
1320 +@@ -159,6 +159,7 @@ config BT_HCIUART_LL
1321 + config BT_HCIUART_3WIRE
1322 + bool "Three-wire UART (H5) protocol support"
1323 + depends on BT_HCIUART
1324 ++ depends on BT_HCIUART_SERDEV
1325 + help
1326 + The HCI Three-wire UART Transport Layer makes it possible to
1327 + user the Bluetooth HCI over a serial port interface. The HCI
1328 +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1329 +index 6116cd05e228..9086edc9066b 100644
1330 +--- a/drivers/char/tpm/tpm_i2c_infineon.c
1331 ++++ b/drivers/char/tpm/tpm_i2c_infineon.c
1332 +@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1333 + /* Lock the adapter for the duration of the whole sequence. */
1334 + if (!tpm_dev.client->adapter->algo->master_xfer)
1335 + return -EOPNOTSUPP;
1336 +- i2c_lock_adapter(tpm_dev.client->adapter);
1337 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1338 +
1339 + if (tpm_dev.chip_type == SLB9645) {
1340 + /* use a combined read for newer chips
1341 +@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1342 + }
1343 +
1344 + out:
1345 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1346 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1347 + /* take care of 'guard time' */
1348 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1349 +
1350 +@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1351 +
1352 + if (!tpm_dev.client->adapter->algo->master_xfer)
1353 + return -EOPNOTSUPP;
1354 +- i2c_lock_adapter(tpm_dev.client->adapter);
1355 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1356 +
1357 + /* prepend the 'register address' to the buffer */
1358 + tpm_dev.buf[0] = addr;
1359 +@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1360 + usleep_range(sleep_low, sleep_hi);
1361 + }
1362 +
1363 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1364 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1365 + /* take care of 'guard time' */
1366 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1367 +
1368 +diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1369 +index 424ff2fde1f2..9914f6973463 100644
1370 +--- a/drivers/char/tpm/tpm_tis_spi.c
1371 ++++ b/drivers/char/tpm/tpm_tis_spi.c
1372 +@@ -199,6 +199,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1373 + static int tpm_tis_spi_probe(struct spi_device *dev)
1374 + {
1375 + struct tpm_tis_spi_phy *phy;
1376 ++ int irq;
1377 +
1378 + phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1379 + GFP_KERNEL);
1380 +@@ -211,7 +212,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1381 + if (!phy->iobuf)
1382 + return -ENOMEM;
1383 +
1384 +- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1385 ++ /* If the SPI device has an IRQ then use that */
1386 ++ if (dev->irq > 0)
1387 ++ irq = dev->irq;
1388 ++ else
1389 ++ irq = -1;
1390 ++
1391 ++ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
1392 + NULL);
1393 + }
1394 +
1395 +diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
1396 +index bb2a6f2f5516..a985bf5e1ac6 100644
1397 +--- a/drivers/clk/clk-scmi.c
1398 ++++ b/drivers/clk/clk-scmi.c
1399 +@@ -38,7 +38,6 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
1400 + static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1401 + unsigned long *parent_rate)
1402 + {
1403 +- int step;
1404 + u64 fmin, fmax, ftmp;
1405 + struct scmi_clk *clk = to_scmi_clk(hw);
1406 +
1407 +@@ -60,9 +59,9 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1408 +
1409 + ftmp = rate - fmin;
1410 + ftmp += clk->info->range.step_size - 1; /* to round up */
1411 +- step = do_div(ftmp, clk->info->range.step_size);
1412 ++ do_div(ftmp, clk->info->range.step_size);
1413 +
1414 +- return step * clk->info->range.step_size + fmin;
1415 ++ return ftmp * clk->info->range.step_size + fmin;
1416 + }
1417 +
1418 + static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1419 +diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
1420 +index fd49b24fd6af..99e2aace8078 100644
1421 +--- a/drivers/dax/pmem.c
1422 ++++ b/drivers/dax/pmem.c
1423 +@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
1424 + if (rc)
1425 + return rc;
1426 +
1427 +- rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
1428 +- &dax_pmem->ref);
1429 +- if (rc)
1430 ++ rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
1431 ++ if (rc) {
1432 ++ percpu_ref_exit(&dax_pmem->ref);
1433 + return rc;
1434 ++ }
1435 +
1436 + dax_pmem->pgmap.ref = &dax_pmem->ref;
1437 + addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
1438 +- if (IS_ERR(addr))
1439 ++ if (IS_ERR(addr)) {
1440 ++ devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
1441 ++ percpu_ref_exit(&dax_pmem->ref);
1442 + return PTR_ERR(addr);
1443 ++ }
1444 +
1445 + rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
1446 + &dax_pmem->ref);
1447 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1448 +index e9db895916c3..1aa67bb5d8c0 100644
1449 +--- a/drivers/firmware/google/vpd.c
1450 ++++ b/drivers/firmware/google/vpd.c
1451 +@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
1452 + sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
1453 + kfree(sec->raw_name);
1454 + memunmap(sec->baseaddr);
1455 ++ sec->enabled = false;
1456 + }
1457 +
1458 + return 0;
1459 +@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
1460 + ret = vpd_section_init("rw", &rw_vpd,
1461 + physaddr + sizeof(struct vpd_cbmem) +
1462 + header.ro_size, header.rw_size);
1463 +- if (ret)
1464 ++ if (ret) {
1465 ++ vpd_section_destroy(&ro_vpd);
1466 + return ret;
1467 ++ }
1468 + }
1469 +
1470 + return 0;
1471 +diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
1472 +index b23d9a36be1f..51c7d1b84c2e 100644
1473 +--- a/drivers/gpio/gpio-ml-ioh.c
1474 ++++ b/drivers/gpio/gpio-ml-ioh.c
1475 +@@ -496,9 +496,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
1476 + return 0;
1477 +
1478 + err_gpiochip_add:
1479 ++ chip = chip_save;
1480 + while (--i >= 0) {
1481 +- chip--;
1482 + gpiochip_remove(&chip->gpio);
1483 ++ chip++;
1484 + }
1485 + kfree(chip_save);
1486 +
1487 +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
1488 +index 1e66f808051c..2e33fd552899 100644
1489 +--- a/drivers/gpio/gpio-pxa.c
1490 ++++ b/drivers/gpio/gpio-pxa.c
1491 +@@ -241,6 +241,17 @@ int pxa_irq_to_gpio(int irq)
1492 + return irq_gpio0;
1493 + }
1494 +
1495 ++static bool pxa_gpio_has_pinctrl(void)
1496 ++{
1497 ++ switch (gpio_type) {
1498 ++ case PXA3XX_GPIO:
1499 ++ return false;
1500 ++
1501 ++ default:
1502 ++ return true;
1503 ++ }
1504 ++}
1505 ++
1506 + static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1507 + {
1508 + struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
1509 +@@ -255,9 +266,11 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1510 + unsigned long flags;
1511 + int ret;
1512 +
1513 +- ret = pinctrl_gpio_direction_input(chip->base + offset);
1514 +- if (!ret)
1515 +- return 0;
1516 ++ if (pxa_gpio_has_pinctrl()) {
1517 ++ ret = pinctrl_gpio_direction_input(chip->base + offset);
1518 ++ if (!ret)
1519 ++ return 0;
1520 ++ }
1521 +
1522 + spin_lock_irqsave(&gpio_lock, flags);
1523 +
1524 +@@ -282,9 +295,11 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
1525 +
1526 + writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
1527 +
1528 +- ret = pinctrl_gpio_direction_output(chip->base + offset);
1529 +- if (ret)
1530 +- return ret;
1531 ++ if (pxa_gpio_has_pinctrl()) {
1532 ++ ret = pinctrl_gpio_direction_output(chip->base + offset);
1533 ++ if (ret)
1534 ++ return ret;
1535 ++ }
1536 +
1537 + spin_lock_irqsave(&gpio_lock, flags);
1538 +
1539 +@@ -348,8 +363,12 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
1540 + pchip->chip.set = pxa_gpio_set;
1541 + pchip->chip.to_irq = pxa_gpio_to_irq;
1542 + pchip->chip.ngpio = ngpio;
1543 +- pchip->chip.request = gpiochip_generic_request;
1544 +- pchip->chip.free = gpiochip_generic_free;
1545 ++
1546 ++ if (pxa_gpio_has_pinctrl()) {
1547 ++ pchip->chip.request = gpiochip_generic_request;
1548 ++ pchip->chip.free = gpiochip_generic_free;
1549 ++ }
1550 ++
1551 + #ifdef CONFIG_OF_GPIO
1552 + pchip->chip.of_node = np;
1553 + pchip->chip.of_xlate = pxa_gpio_of_xlate;
1554 +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
1555 +index 94396caaca75..d5d79727c55d 100644
1556 +--- a/drivers/gpio/gpio-tegra.c
1557 ++++ b/drivers/gpio/gpio-tegra.c
1558 +@@ -720,4 +720,4 @@ static int __init tegra_gpio_init(void)
1559 + {
1560 + return platform_driver_register(&tegra_gpio_driver);
1561 + }
1562 +-postcore_initcall(tegra_gpio_init);
1563 ++subsys_initcall(tegra_gpio_init);
1564 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
1565 +index a576b8bbb3cd..dea40b322191 100644
1566 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
1567 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
1568 +@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
1569 + }
1570 + }
1571 +
1572 +-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
1573 ++static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
1574 + struct dc_link *link,
1575 + struct psr_context *psr_context)
1576 + {
1577 +@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
1578 +
1579 + /* notifyDMCUMsg */
1580 + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1581 ++
1582 ++ return true;
1583 + }
1584 +
1585 + static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
1586 +@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
1587 + * least a few frames. Should never hit the max retry assert below.
1588 + */
1589 + if (wait == true) {
1590 +- for (retryCount = 0; retryCount <= 1000; retryCount++) {
1591 +- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
1592 +- if (enable) {
1593 +- if (psr_state != 0)
1594 +- break;
1595 +- } else {
1596 +- if (psr_state == 0)
1597 +- break;
1598 ++ for (retryCount = 0; retryCount <= 1000; retryCount++) {
1599 ++ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
1600 ++ if (enable) {
1601 ++ if (psr_state != 0)
1602 ++ break;
1603 ++ } else {
1604 ++ if (psr_state == 0)
1605 ++ break;
1606 ++ }
1607 ++ udelay(500);
1608 + }
1609 +- udelay(500);
1610 +- }
1611 +
1612 +- /* assert if max retry hit */
1613 +- ASSERT(retryCount <= 1000);
1614 ++ /* assert if max retry hit */
1615 ++ if (retryCount >= 1000)
1616 ++ ASSERT(0);
1617 + }
1618 + }
1619 +
1620 +-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1621 ++static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1622 + struct dc_link *link,
1623 + struct psr_context *psr_context)
1624 + {
1625 +@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1626 +
1627 + /* If microcontroller is not running, do nothing */
1628 + if (dmcu->dmcu_state != DMCU_RUNNING)
1629 +- return;
1630 ++ return false;
1631 +
1632 + link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
1633 + psr_context->psrExitLinkTrainingRequired);
1634 +@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1635 +
1636 + /* notifyDMCUMsg */
1637 + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1638 ++
1639 ++ /* waitDMCUReadyForCmd */
1640 ++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
1641 ++
1642 ++ return true;
1643 + }
1644 +
1645 + static void dcn10_psr_wait_loop(
1646 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
1647 +index de60f940030d..4550747fb61c 100644
1648 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
1649 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
1650 +@@ -48,7 +48,7 @@ struct dmcu_funcs {
1651 + const char *src,
1652 + unsigned int bytes);
1653 + void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
1654 +- void (*setup_psr)(struct dmcu *dmcu,
1655 ++ bool (*setup_psr)(struct dmcu *dmcu,
1656 + struct dc_link *link,
1657 + struct psr_context *psr_context);
1658 + void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
1659 +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
1660 +index 48685cddbad1..c73bd003f845 100644
1661 +--- a/drivers/gpu/ipu-v3/ipu-common.c
1662 ++++ b/drivers/gpu/ipu-v3/ipu-common.c
1663 +@@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
1664 + return -ENODEV;
1665 +
1666 + ipu->id = of_alias_get_id(np, "ipu");
1667 ++ if (ipu->id < 0)
1668 ++ ipu->id = 0;
1669 +
1670 + if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1671 + IS_ENABLED(CONFIG_DRM)) {
1672 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1673 +index c7981ddd8776..e80bcd71fe1e 100644
1674 +--- a/drivers/hid/hid-ids.h
1675 ++++ b/drivers/hid/hid-ids.h
1676 +@@ -528,6 +528,7 @@
1677 +
1678 + #define I2C_VENDOR_ID_RAYD 0x2386
1679 + #define I2C_PRODUCT_ID_RAYD_3118 0x3118
1680 ++#define I2C_PRODUCT_ID_RAYD_4B33 0x4B33
1681 +
1682 + #define USB_VENDOR_ID_HANWANG 0x0b57
1683 + #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
1684 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1685 +index ab93dd5927c3..b23c4b5854d8 100644
1686 +--- a/drivers/hid/hid-input.c
1687 ++++ b/drivers/hid/hid-input.c
1688 +@@ -1579,6 +1579,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
1689 + input_dev->dev.parent = &hid->dev;
1690 +
1691 + hidinput->input = input_dev;
1692 ++ hidinput->application = application;
1693 + list_add_tail(&hidinput->list, &hid->inputs);
1694 +
1695 + INIT_LIST_HEAD(&hidinput->reports);
1696 +@@ -1674,8 +1675,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
1697 + struct hid_input *hidinput;
1698 +
1699 + list_for_each_entry(hidinput, &hid->inputs, list) {
1700 +- if (hidinput->report &&
1701 +- hidinput->report->application == report->application)
1702 ++ if (hidinput->application == report->application)
1703 + return hidinput;
1704 + }
1705 +
1706 +@@ -1812,6 +1812,7 @@ void hidinput_disconnect(struct hid_device *hid)
1707 + input_unregister_device(hidinput->input);
1708 + else
1709 + input_free_device(hidinput->input);
1710 ++ kfree(hidinput->name);
1711 + kfree(hidinput);
1712 + }
1713 +
1714 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1715 +index 45968f7970f8..15c934ef6b18 100644
1716 +--- a/drivers/hid/hid-multitouch.c
1717 ++++ b/drivers/hid/hid-multitouch.c
1718 +@@ -1167,7 +1167,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1719 + struct hid_usage *usage,
1720 + enum latency_mode latency,
1721 + bool surface_switch,
1722 +- bool button_switch)
1723 ++ bool button_switch,
1724 ++ bool *inputmode_found)
1725 + {
1726 + struct mt_device *td = hid_get_drvdata(hdev);
1727 + struct mt_class *cls = &td->mtclass;
1728 +@@ -1179,6 +1180,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1729 +
1730 + switch (usage->hid) {
1731 + case HID_DG_INPUTMODE:
1732 ++ /*
1733 ++ * Some elan panels wrongly declare 2 input mode features,
1734 ++ * and silently ignore when we set the value in the second
1735 ++ * field. Skip the second feature and hope for the best.
1736 ++ */
1737 ++ if (*inputmode_found)
1738 ++ return false;
1739 ++
1740 + if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
1741 + report_len = hid_report_len(report);
1742 + buf = hid_alloc_report_buf(report, GFP_KERNEL);
1743 +@@ -1194,6 +1203,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1744 + }
1745 +
1746 + field->value[index] = td->inputmode_value;
1747 ++ *inputmode_found = true;
1748 + return true;
1749 +
1750 + case HID_DG_CONTACTMAX:
1751 +@@ -1231,6 +1241,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1752 + struct hid_usage *usage;
1753 + int i, j;
1754 + bool update_report;
1755 ++ bool inputmode_found = false;
1756 +
1757 + rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
1758 + list_for_each_entry(rep, &rep_enum->report_list, list) {
1759 +@@ -1249,7 +1260,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1760 + usage,
1761 + latency,
1762 + surface_switch,
1763 +- button_switch))
1764 ++ button_switch,
1765 ++ &inputmode_found))
1766 + update_report = true;
1767 + }
1768 + }
1769 +@@ -1476,6 +1488,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
1770 + */
1771 + hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
1772 +
1773 ++ if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
1774 ++ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
1775 ++
1776 + timer_setup(&td->release_timer, mt_expired_timeout, 0);
1777 +
1778 + ret = hid_parse(hdev);
1779 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1780 +index eae0cb3ddec6..5fd1159fc095 100644
1781 +--- a/drivers/hid/i2c-hid/i2c-hid.c
1782 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
1783 +@@ -174,6 +174,8 @@ static const struct i2c_hid_quirks {
1784 + I2C_HID_QUIRK_RESEND_REPORT_DESCR },
1785 + { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
1786 + I2C_HID_QUIRK_RESEND_REPORT_DESCR },
1787 ++ { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_4B33,
1788 ++ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
1789 + { 0, 0 }
1790 + };
1791 +
1792 +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
1793 +index 658dc765753b..553adccb05d7 100644
1794 +--- a/drivers/hv/hv.c
1795 ++++ b/drivers/hv/hv.c
1796 +@@ -242,6 +242,10 @@ int hv_synic_alloc(void)
1797 +
1798 + return 0;
1799 + err:
1800 ++ /*
1801 ++ * Any memory allocations that succeeded will be freed when
1802 ++ * the caller cleans up by calling hv_synic_free()
1803 ++ */
1804 + return -ENOMEM;
1805 + }
1806 +
1807 +@@ -254,12 +258,10 @@ void hv_synic_free(void)
1808 + struct hv_per_cpu_context *hv_cpu
1809 + = per_cpu_ptr(hv_context.cpu_context, cpu);
1810 +
1811 +- if (hv_cpu->synic_event_page)
1812 +- free_page((unsigned long)hv_cpu->synic_event_page);
1813 +- if (hv_cpu->synic_message_page)
1814 +- free_page((unsigned long)hv_cpu->synic_message_page);
1815 +- if (hv_cpu->post_msg_page)
1816 +- free_page((unsigned long)hv_cpu->post_msg_page);
1817 ++ kfree(hv_cpu->clk_evt);
1818 ++ free_page((unsigned long)hv_cpu->synic_event_page);
1819 ++ free_page((unsigned long)hv_cpu->synic_message_page);
1820 ++ free_page((unsigned long)hv_cpu->post_msg_page);
1821 + }
1822 +
1823 + kfree(hv_context.hv_numa_map);
1824 +diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
1825 +index 60e4d0e939a3..715b6fdb4989 100644
1826 +--- a/drivers/i2c/busses/i2c-aspeed.c
1827 ++++ b/drivers/i2c/busses/i2c-aspeed.c
1828 +@@ -868,7 +868,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
1829 + if (!match)
1830 + bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1831 + else
1832 +- bus->get_clk_reg_val = match->data;
1833 ++ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
1834 +
1835 + /* Initialize the I2C adapter */
1836 + spin_lock_init(&bus->lock);
1837 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1838 +index aa726607645e..45fcf0c37a9e 100644
1839 +--- a/drivers/i2c/busses/i2c-i801.c
1840 ++++ b/drivers/i2c/busses/i2c-i801.c
1841 +@@ -139,6 +139,7 @@
1842 +
1843 + #define SBREG_BAR 0x10
1844 + #define SBREG_SMBCTRL 0xc6000c
1845 ++#define SBREG_SMBCTRL_DNV 0xcf000c
1846 +
1847 + /* Host status bits for SMBPCISTS */
1848 + #define SMBPCISTS_INTS BIT(3)
1849 +@@ -1396,7 +1397,11 @@ static void i801_add_tco(struct i801_priv *priv)
1850 + spin_unlock(&p2sb_spinlock);
1851 +
1852 + res = &tco_res[ICH_RES_MEM_OFF];
1853 +- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1854 ++ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1855 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1856 ++ else
1857 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1858 ++
1859 + res->end = res->start + 3;
1860 + res->flags = IORESOURCE_MEM;
1861 +
1862 +diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
1863 +index 9a71e50d21f1..0c51c0ffdda9 100644
1864 +--- a/drivers/i2c/busses/i2c-xiic.c
1865 ++++ b/drivers/i2c/busses/i2c-xiic.c
1866 +@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1867 + {
1868 + u8 rx_watermark;
1869 + struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1870 ++ unsigned long flags;
1871 +
1872 + /* Clear and enable Rx full interrupt. */
1873 + xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
1874 +@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1875 + rx_watermark = IIC_RX_FIFO_DEPTH;
1876 + xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1877 +
1878 ++ local_irq_save(flags);
1879 + if (!(msg->flags & I2C_M_NOSTART))
1880 + /* write the address */
1881 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1882 +@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1883 +
1884 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1885 + msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1886 ++ local_irq_restore(flags);
1887 ++
1888 + if (i2c->nmsgs == 1)
1889 + /* very last, enable bus not busy as well */
1890 + xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
1891 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1892 +index bff10ab141b0..dafcb6f019b3 100644
1893 +--- a/drivers/infiniband/core/cma.c
1894 ++++ b/drivers/infiniband/core/cma.c
1895 +@@ -1445,9 +1445,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
1896 + (addr->src_addr.ss_family == AF_IB ||
1897 + rdma_protocol_roce(id->device, port_num));
1898 +
1899 +- return !addr->dev_addr.bound_dev_if ||
1900 +- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1901 +- addr->dev_addr.bound_dev_if == net_dev->ifindex);
1902 ++ /*
1903 ++ * Net namespaces must match, and if the listner is listening
1904 ++ * on a specific netdevice than netdevice must match as well.
1905 ++ */
1906 ++ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1907 ++ (!!addr->dev_addr.bound_dev_if ==
1908 ++ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1909 ++ return true;
1910 ++ else
1911 ++ return false;
1912 + }
1913 +
1914 + static struct rdma_id_private *cma_find_listener(
1915 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
1916 +index 63b5b3edabcb..8dc336a85128 100644
1917 +--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
1918 ++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
1919 +@@ -494,6 +494,9 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
1920 + step_idx = 1;
1921 + } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
1922 + step_idx = 0;
1923 ++ } else {
1924 ++ ret = -EINVAL;
1925 ++ goto err_dma_alloc_l1;
1926 + }
1927 +
1928 + /* set HEM base address to hardware */
1929 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1930 +index a6e11be0ea0f..c00925ed9da8 100644
1931 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1932 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1933 +@@ -273,7 +273,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1934 + switch (wr->opcode) {
1935 + case IB_WR_SEND_WITH_IMM:
1936 + case IB_WR_RDMA_WRITE_WITH_IMM:
1937 +- ud_sq_wqe->immtdata = wr->ex.imm_data;
1938 ++ ud_sq_wqe->immtdata =
1939 ++ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
1940 + break;
1941 + default:
1942 + ud_sq_wqe->immtdata = 0;
1943 +@@ -371,7 +372,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1944 + switch (wr->opcode) {
1945 + case IB_WR_SEND_WITH_IMM:
1946 + case IB_WR_RDMA_WRITE_WITH_IMM:
1947 +- rc_sq_wqe->immtdata = wr->ex.imm_data;
1948 ++ rc_sq_wqe->immtdata =
1949 ++ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
1950 + break;
1951 + case IB_WR_SEND_WITH_INV:
1952 + rc_sq_wqe->inv_key =
1953 +@@ -1931,7 +1933,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
1954 + case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
1955 + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
1956 + wc->wc_flags = IB_WC_WITH_IMM;
1957 +- wc->ex.imm_data = cqe->immtdata;
1958 ++ wc->ex.imm_data =
1959 ++ cpu_to_be32(le32_to_cpu(cqe->immtdata));
1960 + break;
1961 + case HNS_ROCE_V2_OPCODE_SEND:
1962 + wc->opcode = IB_WC_RECV;
1963 +@@ -1940,7 +1943,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
1964 + case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
1965 + wc->opcode = IB_WC_RECV;
1966 + wc->wc_flags = IB_WC_WITH_IMM;
1967 +- wc->ex.imm_data = cqe->immtdata;
1968 ++ wc->ex.imm_data =
1969 ++ cpu_to_be32(le32_to_cpu(cqe->immtdata));
1970 + break;
1971 + case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
1972 + wc->opcode = IB_WC_RECV;
1973 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1974 +index d47675f365c7..7e2c740e0df5 100644
1975 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1976 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1977 +@@ -768,7 +768,7 @@ struct hns_roce_v2_cqe {
1978 + __le32 byte_4;
1979 + union {
1980 + __le32 rkey;
1981 +- __be32 immtdata;
1982 ++ __le32 immtdata;
1983 + };
1984 + __le32 byte_12;
1985 + __le32 byte_16;
1986 +@@ -926,7 +926,7 @@ struct hns_roce_v2_cq_db {
1987 + struct hns_roce_v2_ud_send_wqe {
1988 + __le32 byte_4;
1989 + __le32 msg_len;
1990 +- __be32 immtdata;
1991 ++ __le32 immtdata;
1992 + __le32 byte_16;
1993 + __le32 byte_20;
1994 + __le32 byte_24;
1995 +@@ -1012,7 +1012,7 @@ struct hns_roce_v2_rc_send_wqe {
1996 + __le32 msg_len;
1997 + union {
1998 + __le32 inv_key;
1999 +- __be32 immtdata;
2000 ++ __le32 immtdata;
2001 + };
2002 + __le32 byte_16;
2003 + __le32 byte_20;
2004 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
2005 +index 6709328d90f8..c7e034963738 100644
2006 +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
2007 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
2008 +@@ -822,6 +822,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
2009 + if (neigh && list_empty(&neigh->list)) {
2010 + kref_get(&mcast->ah->ref);
2011 + neigh->ah = mcast->ah;
2012 ++ neigh->ah->valid = 1;
2013 + list_add_tail(&neigh->list, &mcast->neigh_list);
2014 + }
2015 + }
2016 +diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
2017 +index 54fe190fd4bc..48c5ccab00a0 100644
2018 +--- a/drivers/input/touchscreen/atmel_mxt_ts.c
2019 ++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
2020 +@@ -1658,10 +1658,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
2021 + break;
2022 + case MXT_TOUCH_MULTI_T9:
2023 + data->multitouch = MXT_TOUCH_MULTI_T9;
2024 ++ /* Only handle messages from first T9 instance */
2025 + data->T9_reportid_min = min_id;
2026 +- data->T9_reportid_max = max_id;
2027 +- data->num_touchids = object->num_report_ids
2028 +- * mxt_obj_instances(object);
2029 ++ data->T9_reportid_max = min_id +
2030 ++ object->num_report_ids - 1;
2031 ++ data->num_touchids = object->num_report_ids;
2032 + break;
2033 + case MXT_SPT_MESSAGECOUNT_T44:
2034 + data->T44_address = object->start_address;
2035 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
2036 +index 1d647104bccc..b73c6a7bf7f2 100644
2037 +--- a/drivers/iommu/arm-smmu-v3.c
2038 ++++ b/drivers/iommu/arm-smmu-v3.c
2039 +@@ -24,6 +24,7 @@
2040 + #include <linux/acpi_iort.h>
2041 + #include <linux/bitfield.h>
2042 + #include <linux/bitops.h>
2043 ++#include <linux/crash_dump.h>
2044 + #include <linux/delay.h>
2045 + #include <linux/dma-iommu.h>
2046 + #include <linux/err.h>
2047 +@@ -2211,8 +2212,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2048 + reg &= ~clr;
2049 + reg |= set;
2050 + writel_relaxed(reg | GBPA_UPDATE, gbpa);
2051 +- return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2052 +- 1, ARM_SMMU_POLL_TIMEOUT_US);
2053 ++ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2054 ++ 1, ARM_SMMU_POLL_TIMEOUT_US);
2055 ++
2056 ++ if (ret)
2057 ++ dev_err(smmu->dev, "GBPA not responding to update\n");
2058 ++ return ret;
2059 + }
2060 +
2061 + static void arm_smmu_free_msis(void *data)
2062 +@@ -2392,8 +2397,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2063 +
2064 + /* Clear CR0 and sync (disables SMMU and queue processing) */
2065 + reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2066 +- if (reg & CR0_SMMUEN)
2067 ++ if (reg & CR0_SMMUEN) {
2068 ++ if (is_kdump_kernel()) {
2069 ++ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
2070 ++ arm_smmu_device_disable(smmu);
2071 ++ return -EBUSY;
2072 ++ }
2073 ++
2074 + dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2075 ++ }
2076 +
2077 + ret = arm_smmu_device_disable(smmu);
2078 + if (ret)
2079 +@@ -2491,10 +2503,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2080 + enables |= CR0_SMMUEN;
2081 + } else {
2082 + ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2083 +- if (ret) {
2084 +- dev_err(smmu->dev, "GBPA not responding to update\n");
2085 ++ if (ret)
2086 + return ret;
2087 +- }
2088 + }
2089 + ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2090 + ARM_SMMU_CR0ACK);
2091 +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
2092 +index 09b47260c74b..feb1664815b7 100644
2093 +--- a/drivers/iommu/ipmmu-vmsa.c
2094 ++++ b/drivers/iommu/ipmmu-vmsa.c
2095 +@@ -73,7 +73,7 @@ struct ipmmu_vmsa_domain {
2096 + struct io_pgtable_ops *iop;
2097 +
2098 + unsigned int context_id;
2099 +- spinlock_t lock; /* Protects mappings */
2100 ++ struct mutex mutex; /* Protects mappings */
2101 + };
2102 +
2103 + static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
2104 +@@ -595,7 +595,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
2105 + if (!domain)
2106 + return NULL;
2107 +
2108 +- spin_lock_init(&domain->lock);
2109 ++ mutex_init(&domain->mutex);
2110 +
2111 + return &domain->io_domain;
2112 + }
2113 +@@ -641,7 +641,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
2114 + struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2115 + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
2116 + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
2117 +- unsigned long flags;
2118 + unsigned int i;
2119 + int ret = 0;
2120 +
2121 +@@ -650,7 +649,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
2122 + return -ENXIO;
2123 + }
2124 +
2125 +- spin_lock_irqsave(&domain->lock, flags);
2126 ++ mutex_lock(&domain->mutex);
2127 +
2128 + if (!domain->mmu) {
2129 + /* The domain hasn't been used yet, initialize it. */
2130 +@@ -674,7 +673,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
2131 + } else
2132 + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
2133 +
2134 +- spin_unlock_irqrestore(&domain->lock, flags);
2135 ++ mutex_unlock(&domain->mutex);
2136 +
2137 + if (ret < 0)
2138 + return ret;
2139 +diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
2140 +index 25c1ce811053..1fdd09ebb3f1 100644
2141 +--- a/drivers/macintosh/via-pmu.c
2142 ++++ b/drivers/macintosh/via-pmu.c
2143 +@@ -534,8 +534,9 @@ init_pmu(void)
2144 + int timeout;
2145 + struct adb_request req;
2146 +
2147 +- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
2148 +- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
2149 ++ /* Negate TREQ. Set TACK to input and TREQ to output. */
2150 ++ out_8(&via[B], in_8(&via[B]) | TREQ);
2151 ++ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
2152 +
2153 + pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
2154 + timeout = 100000;
2155 +@@ -1418,8 +1419,8 @@ pmu_sr_intr(void)
2156 + struct adb_request *req;
2157 + int bite = 0;
2158 +
2159 +- if (via[B] & TREQ) {
2160 +- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
2161 ++ if (in_8(&via[B]) & TREQ) {
2162 ++ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
2163 + out_8(&via[IFR], SR_INT);
2164 + return NULL;
2165 + }
2166 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2167 +index ce14a3d1f609..44df244807e5 100644
2168 +--- a/drivers/md/dm-cache-target.c
2169 ++++ b/drivers/md/dm-cache-target.c
2170 +@@ -2250,7 +2250,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2171 + {0, 2, "Invalid number of cache feature arguments"},
2172 + };
2173 +
2174 +- int r;
2175 ++ int r, mode_ctr = 0;
2176 + unsigned argc;
2177 + const char *arg;
2178 + struct cache_features *cf = &ca->features;
2179 +@@ -2264,14 +2264,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2180 + while (argc--) {
2181 + arg = dm_shift_arg(as);
2182 +
2183 +- if (!strcasecmp(arg, "writeback"))
2184 ++ if (!strcasecmp(arg, "writeback")) {
2185 + cf->io_mode = CM_IO_WRITEBACK;
2186 ++ mode_ctr++;
2187 ++ }
2188 +
2189 +- else if (!strcasecmp(arg, "writethrough"))
2190 ++ else if (!strcasecmp(arg, "writethrough")) {
2191 + cf->io_mode = CM_IO_WRITETHROUGH;
2192 ++ mode_ctr++;
2193 ++ }
2194 +
2195 +- else if (!strcasecmp(arg, "passthrough"))
2196 ++ else if (!strcasecmp(arg, "passthrough")) {
2197 + cf->io_mode = CM_IO_PASSTHROUGH;
2198 ++ mode_ctr++;
2199 ++ }
2200 +
2201 + else if (!strcasecmp(arg, "metadata2"))
2202 + cf->metadata_version = 2;
2203 +@@ -2282,6 +2288,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2204 + }
2205 + }
2206 +
2207 ++ if (mode_ctr > 1) {
2208 ++ *error = "Duplicate cache io_mode features requested";
2209 ++ return -EINVAL;
2210 ++ }
2211 ++
2212 + return 0;
2213 + }
2214 +
2215 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2216 +index 2031506a0ecd..49107c52c8e6 100644
2217 +--- a/drivers/md/raid5.c
2218 ++++ b/drivers/md/raid5.c
2219 +@@ -4521,6 +4521,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2220 + s->failed++;
2221 + if (rdev && !test_bit(Faulty, &rdev->flags))
2222 + do_recovery = 1;
2223 ++ else if (!rdev) {
2224 ++ rdev = rcu_dereference(
2225 ++ conf->disks[i].replacement);
2226 ++ if (rdev && !test_bit(Faulty, &rdev->flags))
2227 ++ do_recovery = 1;
2228 ++ }
2229 + }
2230 +
2231 + if (test_bit(R5_InJournal, &dev->flags))
2232 +diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
2233 +index a0d0b53c91d7..a5de65dcf784 100644
2234 +--- a/drivers/media/dvb-frontends/helene.c
2235 ++++ b/drivers/media/dvb-frontends/helene.c
2236 +@@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
2237 + helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
2238 +
2239 + /* 0x81 - 0x94 */
2240 +- data[0] = 0x18; /* xtal 24 MHz */
2241 ++ if (priv->xtal == SONY_HELENE_XTAL_16000)
2242 ++ data[0] = 0x10; /* xtal 16 MHz */
2243 ++ else
2244 ++ data[0] = 0x18; /* xtal 24 MHz */
2245 + data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
2246 + data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
2247 + data[3] = 0x80; /* REFOUT signal output 500mVpp */
2248 +diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
2249 +index 7be636237acf..0f324055cc9f 100644
2250 +--- a/drivers/media/platform/davinci/vpif_display.c
2251 ++++ b/drivers/media/platform/davinci/vpif_display.c
2252 +@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
2253 + return err;
2254 + }
2255 +
2256 ++static void free_vpif_objs(void)
2257 ++{
2258 ++ int i;
2259 ++
2260 ++ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
2261 ++ kfree(vpif_obj.dev[i]);
2262 ++}
2263 ++
2264 + static int vpif_async_bound(struct v4l2_async_notifier *notifier,
2265 + struct v4l2_subdev *subdev,
2266 + struct v4l2_async_subdev *asd)
2267 +@@ -1255,11 +1263,6 @@ static __init int vpif_probe(struct platform_device *pdev)
2268 + return -EINVAL;
2269 + }
2270 +
2271 +- if (!pdev->dev.platform_data) {
2272 +- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
2273 +- return -EINVAL;
2274 +- }
2275 +-
2276 + vpif_dev = &pdev->dev;
2277 + err = initialize_vpif();
2278 +
2279 +@@ -1271,7 +1274,7 @@ static __init int vpif_probe(struct platform_device *pdev)
2280 + err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
2281 + if (err) {
2282 + v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
2283 +- return err;
2284 ++ goto vpif_free;
2285 + }
2286 +
2287 + while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
2288 +@@ -1314,7 +1317,10 @@ static __init int vpif_probe(struct platform_device *pdev)
2289 + if (vpif_obj.sd[i])
2290 + vpif_obj.sd[i]->grp_id = 1 << i;
2291 + }
2292 +- vpif_probe_complete();
2293 ++ err = vpif_probe_complete();
2294 ++ if (err) {
2295 ++ goto probe_subdev_out;
2296 ++ }
2297 + } else {
2298 + vpif_obj.notifier.subdevs = vpif_obj.config->asd;
2299 + vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
2300 +@@ -1334,6 +1340,8 @@ probe_subdev_out:
2301 + kfree(vpif_obj.sd);
2302 + vpif_unregister:
2303 + v4l2_device_unregister(&vpif_obj.v4l2_dev);
2304 ++vpif_free:
2305 ++ free_vpif_objs();
2306 +
2307 + return err;
2308 + }
2309 +@@ -1355,8 +1363,8 @@ static int vpif_remove(struct platform_device *device)
2310 + ch = vpif_obj.dev[i];
2311 + /* Unregister video device */
2312 + video_unregister_device(&ch->video_dev);
2313 +- kfree(vpif_obj.dev[i]);
2314 + }
2315 ++ free_vpif_objs();
2316 +
2317 + return 0;
2318 + }
2319 +diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
2320 +index 226f36ef7419..2bf65805f2c1 100644
2321 +--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
2322 ++++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
2323 +@@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2324 + !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
2325 + return -ENOLINK;
2326 +
2327 +- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
2328 +- data_type;
2329 +-
2330 + if (tg->enabled) {
2331 + /* Config Test Generator */
2332 + struct v4l2_mbus_framefmt *f =
2333 +@@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2334 + writel_relaxed(val, csid->base +
2335 + CAMSS_CSID_TG_DT_n_CGG_0(0));
2336 +
2337 ++ dt = csid_get_fmt_entry(
2338 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
2339 ++
2340 + /* 5:0 data type */
2341 + val = dt;
2342 + writel_relaxed(val, csid->base +
2343 +@@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2344 + val = tg->payload_mode;
2345 + writel_relaxed(val, csid->base +
2346 + CAMSS_CSID_TG_DT_n_CGG_2(0));
2347 ++
2348 ++ df = csid_get_fmt_entry(
2349 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
2350 + } else {
2351 + struct csid_phy_config *phy = &csid->phy;
2352 +
2353 +@@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2354 +
2355 + writel_relaxed(val,
2356 + csid->base + CAMSS_CSID_CORE_CTRL_1);
2357 ++
2358 ++ dt = csid_get_fmt_entry(
2359 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
2360 ++ df = csid_get_fmt_entry(
2361 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
2362 + }
2363 +
2364 + /* Config LUT */
2365 +
2366 + dt_shift = (cid % 4) * 8;
2367 +- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
2368 +- decode_format;
2369 +
2370 + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
2371 + val &= ~(0xff << dt_shift);
2372 +diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
2373 +index daef72d410a3..dc5ae8025832 100644
2374 +--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
2375 ++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
2376 +@@ -339,6 +339,7 @@ enum rcar_csi2_pads {
2377 +
2378 + struct rcar_csi2_info {
2379 + int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
2380 ++ int (*confirm_start)(struct rcar_csi2 *priv);
2381 + const struct rcsi2_mbps_reg *hsfreqrange;
2382 + unsigned int csi0clkfreqrange;
2383 + bool clear_ulps;
2384 +@@ -545,6 +546,13 @@ static int rcsi2_start(struct rcar_csi2 *priv)
2385 + if (ret)
2386 + return ret;
2387 +
2388 ++ /* Confirm start */
2389 ++ if (priv->info->confirm_start) {
2390 ++ ret = priv->info->confirm_start(priv);
2391 ++ if (ret)
2392 ++ return ret;
2393 ++ }
2394 ++
2395 + /* Clear Ultra Low Power interrupt. */
2396 + if (priv->info->clear_ulps)
2397 + rcsi2_write(priv, INTSTATE_REG,
2398 +@@ -880,6 +888,11 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
2399 + }
2400 +
2401 + static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
2402 ++{
2403 ++ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
2404 ++}
2405 ++
2406 ++static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
2407 + {
2408 + static const struct phtw_value step1[] = {
2409 + { .data = 0xed, .code = 0x34 },
2410 +@@ -890,12 +903,6 @@ static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
2411 + { /* sentinel */ },
2412 + };
2413 +
2414 +- int ret;
2415 +-
2416 +- ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
2417 +- if (ret)
2418 +- return ret;
2419 +-
2420 + return rcsi2_phtw_write_array(priv, step1);
2421 + }
2422 +
2423 +@@ -949,6 +956,7 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
2424 +
2425 + static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
2426 + .init_phtw = rcsi2_init_phtw_v3m_e3,
2427 ++ .confirm_start = rcsi2_confirm_start_v3m_e3,
2428 + };
2429 +
2430 + static const struct of_device_id rcar_csi2_of_table[] = {
2431 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2432 +index a80251ed3143..780548dd650e 100644
2433 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
2434 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2435 +@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
2436 + static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
2437 + {
2438 + struct s5p_mfc_dev *dev = ctx->dev;
2439 +- struct s5p_mfc_buf *dst_buf, *src_buf;
2440 +- size_t dec_y_addr;
2441 ++ struct s5p_mfc_buf *dst_buf, *src_buf;
2442 ++ u32 dec_y_addr;
2443 + unsigned int frame_type;
2444 +
2445 + /* Make sure we actually have a new frame before continuing. */
2446 + frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
2447 + if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
2448 + return;
2449 +- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
2450 ++ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
2451 +
2452 + /* Copy timestamp / timecode from decoded src to dst and set
2453 + appropriate flags. */
2454 + src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
2455 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
2456 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
2457 +- == dec_y_addr) {
2458 +- dst_buf->b->timecode =
2459 +- src_buf->b->timecode;
2460 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
2461 ++
2462 ++ if (addr == dec_y_addr) {
2463 ++ dst_buf->b->timecode = src_buf->b->timecode;
2464 + dst_buf->b->vb2_buf.timestamp =
2465 + src_buf->b->vb2_buf.timestamp;
2466 + dst_buf->b->flags &=
2467 +@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
2468 + {
2469 + struct s5p_mfc_dev *dev = ctx->dev;
2470 + struct s5p_mfc_buf *dst_buf;
2471 +- size_t dspl_y_addr;
2472 ++ u32 dspl_y_addr;
2473 + unsigned int frame_type;
2474 +
2475 +- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
2476 ++ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
2477 + if (IS_MFCV6_PLUS(dev))
2478 + frame_type = s5p_mfc_hw_call(dev->mfc_ops,
2479 + get_disp_frame_type, ctx);
2480 +@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
2481 + /* The MFC returns address of the buffer, now we have to
2482 + * check which videobuf does it correspond to */
2483 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
2484 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
2485 ++
2486 + /* Check if this is the buffer we're looking for */
2487 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
2488 +- == dspl_y_addr) {
2489 ++ if (addr == dspl_y_addr) {
2490 + list_del(&dst_buf->list);
2491 + ctx->dst_queue_cnt--;
2492 + dst_buf->b->sequence = ctx->sequence;
2493 +diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
2494 +index 0d4fdd34a710..9ce8b4d79d1f 100644
2495 +--- a/drivers/media/usb/dvb-usb/dw2102.c
2496 ++++ b/drivers/media/usb/dvb-usb/dw2102.c
2497 +@@ -2101,14 +2101,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
2498 + }
2499 + };
2500 +
2501 +-static struct dvb_usb_device_properties *p1100;
2502 + static const struct dvb_usb_device_description d1100 = {
2503 + "Prof 1100 USB ",
2504 + {&dw2102_table[PROF_1100], NULL},
2505 + {NULL},
2506 + };
2507 +
2508 +-static struct dvb_usb_device_properties *s660;
2509 + static const struct dvb_usb_device_description d660 = {
2510 + "TeVii S660 USB",
2511 + {&dw2102_table[TEVII_S660], NULL},
2512 +@@ -2127,14 +2125,12 @@ static const struct dvb_usb_device_description d480_2 = {
2513 + {NULL},
2514 + };
2515 +
2516 +-static struct dvb_usb_device_properties *p7500;
2517 + static const struct dvb_usb_device_description d7500 = {
2518 + "Prof 7500 USB DVB-S2",
2519 + {&dw2102_table[PROF_7500], NULL},
2520 + {NULL},
2521 + };
2522 +
2523 +-static struct dvb_usb_device_properties *s421;
2524 + static const struct dvb_usb_device_description d421 = {
2525 + "TeVii S421 PCI",
2526 + {&dw2102_table[TEVII_S421], NULL},
2527 +@@ -2334,6 +2330,11 @@ static int dw2102_probe(struct usb_interface *intf,
2528 + const struct usb_device_id *id)
2529 + {
2530 + int retval = -ENOMEM;
2531 ++ struct dvb_usb_device_properties *p1100;
2532 ++ struct dvb_usb_device_properties *s660;
2533 ++ struct dvb_usb_device_properties *p7500;
2534 ++ struct dvb_usb_device_properties *s421;
2535 ++
2536 + p1100 = kmemdup(&s6x0_properties,
2537 + sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
2538 + if (!p1100)
2539 +@@ -2402,8 +2403,16 @@ static int dw2102_probe(struct usb_interface *intf,
2540 + 0 == dvb_usb_device_init(intf, &t220_properties,
2541 + THIS_MODULE, NULL, adapter_nr) ||
2542 + 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
2543 +- THIS_MODULE, NULL, adapter_nr))
2544 ++ THIS_MODULE, NULL, adapter_nr)) {
2545 ++
2546 ++ /* clean up copied properties */
2547 ++ kfree(s421);
2548 ++ kfree(p7500);
2549 ++ kfree(s660);
2550 ++ kfree(p1100);
2551 ++
2552 + return 0;
2553 ++ }
2554 +
2555 + retval = -ENODEV;
2556 + kfree(s421);
2557 +diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
2558 +index 6c8438311d3b..ff5e41ac4723 100644
2559 +--- a/drivers/media/usb/em28xx/em28xx-cards.c
2560 ++++ b/drivers/media/usb/em28xx/em28xx-cards.c
2561 +@@ -3376,7 +3376,9 @@ void em28xx_free_device(struct kref *ref)
2562 + if (!dev->disconnected)
2563 + em28xx_release_resources(dev);
2564 +
2565 +- kfree(dev->alt_max_pkt_size_isoc);
2566 ++ if (dev->ts == PRIMARY_TS)
2567 ++ kfree(dev->alt_max_pkt_size_isoc);
2568 ++
2569 + kfree(dev);
2570 + }
2571 + EXPORT_SYMBOL_GPL(em28xx_free_device);
2572 +diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
2573 +index f70845e7d8c6..45b24776a695 100644
2574 +--- a/drivers/media/usb/em28xx/em28xx-core.c
2575 ++++ b/drivers/media/usb/em28xx/em28xx-core.c
2576 +@@ -655,12 +655,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
2577 + rc = em28xx_write_reg_bits(dev,
2578 + EM2874_R5F_TS_ENABLE,
2579 + start ? EM2874_TS1_CAPTURE_ENABLE : 0x00,
2580 +- EM2874_TS1_CAPTURE_ENABLE);
2581 ++ EM2874_TS1_CAPTURE_ENABLE | EM2874_TS1_FILTER_ENABLE | EM2874_TS1_NULL_DISCARD);
2582 + else
2583 + rc = em28xx_write_reg_bits(dev,
2584 + EM2874_R5F_TS_ENABLE,
2585 + start ? EM2874_TS2_CAPTURE_ENABLE : 0x00,
2586 +- EM2874_TS2_CAPTURE_ENABLE);
2587 ++ EM2874_TS2_CAPTURE_ENABLE | EM2874_TS2_FILTER_ENABLE | EM2874_TS2_NULL_DISCARD);
2588 + } else {
2589 + /* FIXME: which is the best order? */
2590 + /* video registers are sampled by VREF */
2591 +diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
2592 +index b778d8a1983e..a73faf12f7e4 100644
2593 +--- a/drivers/media/usb/em28xx/em28xx-dvb.c
2594 ++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
2595 +@@ -218,7 +218,9 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
2596 + dvb_alt = dev->dvb_alt_isoc;
2597 + }
2598 +
2599 +- usb_set_interface(udev, dev->ifnum, dvb_alt);
2600 ++ if (!dev->board.has_dual_ts)
2601 ++ usb_set_interface(udev, dev->ifnum, dvb_alt);
2602 ++
2603 + rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
2604 + if (rc < 0)
2605 + return rc;
2606 +diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
2607 +index 31112f622b88..475e5b3790ed 100644
2608 +--- a/drivers/memory/ti-aemif.c
2609 ++++ b/drivers/memory/ti-aemif.c
2610 +@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
2611 + if (ret < 0)
2612 + goto error;
2613 + }
2614 +- } else {
2615 ++ } else if (pdata) {
2616 + for (i = 0; i < pdata->num_sub_devices; i++) {
2617 + pdata->sub_devices[i].dev.parent = dev;
2618 + ret = platform_device_register(&pdata->sub_devices[i]);
2619 +diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
2620 +index 36dcd98977d6..4f545fdc6ebc 100644
2621 +--- a/drivers/mfd/rave-sp.c
2622 ++++ b/drivers/mfd/rave-sp.c
2623 +@@ -776,6 +776,13 @@ static int rave_sp_probe(struct serdev_device *serdev)
2624 + return ret;
2625 +
2626 + serdev_device_set_baudrate(serdev, baud);
2627 ++ serdev_device_set_flow_control(serdev, false);
2628 ++
2629 ++ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
2630 ++ if (ret) {
2631 ++ dev_err(dev, "Failed to set parity\n");
2632 ++ return ret;
2633 ++ }
2634 +
2635 + ret = rave_sp_get_status(sp);
2636 + if (ret) {
2637 +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
2638 +index 47012c0899cd..7a30546880a4 100644
2639 +--- a/drivers/mfd/ti_am335x_tscadc.c
2640 ++++ b/drivers/mfd/ti_am335x_tscadc.c
2641 +@@ -209,14 +209,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
2642 + * The TSC_ADC_SS controller design assumes the OCP clock is
2643 + * at least 6x faster than the ADC clock.
2644 + */
2645 +- clk = clk_get(&pdev->dev, "adc_tsc_fck");
2646 ++ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
2647 + if (IS_ERR(clk)) {
2648 + dev_err(&pdev->dev, "failed to get TSC fck\n");
2649 + err = PTR_ERR(clk);
2650 + goto err_disable_clk;
2651 + }
2652 + clock_rate = clk_get_rate(clk);
2653 +- clk_put(clk);
2654 + tscadc->clk_div = clock_rate / ADC_CLK;
2655 +
2656 + /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
2657 +diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
2658 +index 7b2dddcdd46d..42f7a12894d6 100644
2659 +--- a/drivers/misc/mic/scif/scif_api.c
2660 ++++ b/drivers/misc/mic/scif/scif_api.c
2661 +@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
2662 + goto scif_bind_exit;
2663 + }
2664 + } else {
2665 +- pn = scif_get_new_port();
2666 +- if (!pn) {
2667 +- ret = -ENOSPC;
2668 ++ ret = scif_get_new_port();
2669 ++ if (ret < 0)
2670 + goto scif_bind_exit;
2671 +- }
2672 ++ pn = ret;
2673 + }
2674 +
2675 + ep->state = SCIFEP_BOUND;
2676 +@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
2677 + err = -EISCONN;
2678 + break;
2679 + case SCIFEP_UNBOUND:
2680 +- ep->port.port = scif_get_new_port();
2681 +- if (!ep->port.port) {
2682 +- err = -ENOSPC;
2683 +- } else {
2684 +- ep->port.node = scif_info.nodeid;
2685 +- ep->conn_async_state = ASYNC_CONN_IDLE;
2686 +- }
2687 ++ err = scif_get_new_port();
2688 ++ if (err < 0)
2689 ++ break;
2690 ++ ep->port.port = err;
2691 ++ ep->port.node = scif_info.nodeid;
2692 ++ ep->conn_async_state = ASYNC_CONN_IDLE;
2693 + /* Fall through */
2694 + case SCIFEP_BOUND:
2695 + /*
2696 +diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
2697 +index 5ec3f5a43718..14a5e9da32bd 100644
2698 +--- a/drivers/misc/ti-st/st_kim.c
2699 ++++ b/drivers/misc/ti-st/st_kim.c
2700 +@@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
2701 + err = gpio_request(kim_gdata->nshutdown, "kim");
2702 + if (unlikely(err)) {
2703 + pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
2704 +- return err;
2705 ++ goto err_sysfs_group;
2706 + }
2707 +
2708 + /* Configure nShutdown GPIO as output=0 */
2709 + err = gpio_direction_output(kim_gdata->nshutdown, 0);
2710 + if (unlikely(err)) {
2711 + pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
2712 +- return err;
2713 ++ goto err_sysfs_group;
2714 + }
2715 + /* get reference of pdev for request_firmware
2716 + */
2717 +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
2718 +index b01d15ec4c56..3e3e6a8f1abc 100644
2719 +--- a/drivers/mtd/nand/raw/nand_base.c
2720 ++++ b/drivers/mtd/nand/raw/nand_base.c
2721 +@@ -2668,8 +2668,8 @@ static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2722 + return subop && instr_idx < subop->ninstrs;
2723 + }
2724 +
2725 +-static int nand_subop_get_start_off(const struct nand_subop *subop,
2726 +- unsigned int instr_idx)
2727 ++static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2728 ++ unsigned int instr_idx)
2729 + {
2730 + if (instr_idx)
2731 + return 0;
2732 +@@ -2688,12 +2688,12 @@ static int nand_subop_get_start_off(const struct nand_subop *subop,
2733 + *
2734 + * Given an address instruction, returns the offset of the first cycle to issue.
2735 + */
2736 +-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2737 +- unsigned int instr_idx)
2738 ++unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2739 ++ unsigned int instr_idx)
2740 + {
2741 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2742 +- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2743 +- return -EINVAL;
2744 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2745 ++ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2746 ++ return 0;
2747 +
2748 + return nand_subop_get_start_off(subop, instr_idx);
2749 + }
2750 +@@ -2710,14 +2710,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2751 + *
2752 + * Given an address instruction, returns the number of address cycle to issue.
2753 + */
2754 +-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2755 +- unsigned int instr_idx)
2756 ++unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2757 ++ unsigned int instr_idx)
2758 + {
2759 + int start_off, end_off;
2760 +
2761 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2762 +- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2763 +- return -EINVAL;
2764 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2765 ++ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2766 ++ return 0;
2767 +
2768 + start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2769 +
2770 +@@ -2742,12 +2742,12 @@ EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2771 + *
2772 + * Given a data instruction, returns the offset to start from.
2773 + */
2774 +-int nand_subop_get_data_start_off(const struct nand_subop *subop,
2775 +- unsigned int instr_idx)
2776 ++unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2777 ++ unsigned int instr_idx)
2778 + {
2779 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2780 +- !nand_instr_is_data(&subop->instrs[instr_idx]))
2781 +- return -EINVAL;
2782 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2783 ++ !nand_instr_is_data(&subop->instrs[instr_idx])))
2784 ++ return 0;
2785 +
2786 + return nand_subop_get_start_off(subop, instr_idx);
2787 + }
2788 +@@ -2764,14 +2764,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2789 + *
2790 + * Returns the length of the chunk of data to send/receive.
2791 + */
2792 +-int nand_subop_get_data_len(const struct nand_subop *subop,
2793 +- unsigned int instr_idx)
2794 ++unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2795 ++ unsigned int instr_idx)
2796 + {
2797 + int start_off = 0, end_off;
2798 +
2799 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2800 +- !nand_instr_is_data(&subop->instrs[instr_idx]))
2801 +- return -EINVAL;
2802 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2803 ++ !nand_instr_is_data(&subop->instrs[instr_idx])))
2804 ++ return 0;
2805 +
2806 + start_off = nand_subop_get_data_start_off(subop, instr_idx);
2807 +
2808 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2809 +index 82ac1d10f239..b4253d0e056b 100644
2810 +--- a/drivers/net/ethernet/marvell/mvneta.c
2811 ++++ b/drivers/net/ethernet/marvell/mvneta.c
2812 +@@ -3196,7 +3196,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2813 +
2814 + on_each_cpu(mvneta_percpu_enable, pp, true);
2815 + mvneta_start_dev(pp);
2816 +- mvneta_port_up(pp);
2817 +
2818 + netdev_update_features(dev);
2819 +
2820 +diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
2821 +index 0c5b68e7da51..9b3167054843 100644
2822 +--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
2823 ++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
2824 +@@ -22,7 +22,7 @@
2825 + #include <linux/mdio-mux.h>
2826 + #include <linux/delay.h>
2827 +
2828 +-#define MDIO_PARAM_OFFSET 0x00
2829 ++#define MDIO_PARAM_OFFSET 0x23c
2830 + #define MDIO_PARAM_MIIM_CYCLE 29
2831 + #define MDIO_PARAM_INTERNAL_SEL 25
2832 + #define MDIO_PARAM_BUS_ID 22
2833 +@@ -30,20 +30,22 @@
2834 + #define MDIO_PARAM_PHY_ID 16
2835 + #define MDIO_PARAM_PHY_DATA 0
2836 +
2837 +-#define MDIO_READ_OFFSET 0x04
2838 ++#define MDIO_READ_OFFSET 0x240
2839 + #define MDIO_READ_DATA_MASK 0xffff
2840 +-#define MDIO_ADDR_OFFSET 0x08
2841 ++#define MDIO_ADDR_OFFSET 0x244
2842 +
2843 +-#define MDIO_CTRL_OFFSET 0x0C
2844 ++#define MDIO_CTRL_OFFSET 0x248
2845 + #define MDIO_CTRL_WRITE_OP 0x1
2846 + #define MDIO_CTRL_READ_OP 0x2
2847 +
2848 +-#define MDIO_STAT_OFFSET 0x10
2849 ++#define MDIO_STAT_OFFSET 0x24c
2850 + #define MDIO_STAT_DONE 1
2851 +
2852 + #define BUS_MAX_ADDR 32
2853 + #define EXT_BUS_START_ADDR 16
2854 +
2855 ++#define MDIO_REG_ADDR_SPACE_SIZE 0x250
2856 ++
2857 + struct iproc_mdiomux_desc {
2858 + void *mux_handle;
2859 + void __iomem *base;
2860 +@@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
2861 + md->dev = &pdev->dev;
2862 +
2863 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2864 ++ if (res->start & 0xfff) {
2865 ++ /* For backward compatibility in case the
2866 ++ * base address is specified with an offset.
2867 ++ */
2868 ++ dev_info(&pdev->dev, "fix base address in dt-blob\n");
2869 ++ res->start &= ~0xfff;
2870 ++ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
2871 ++ }
2872 + md->base = devm_ioremap_resource(&pdev->dev, res);
2873 + if (IS_ERR(md->base)) {
2874 + dev_err(&pdev->dev, "failed to ioremap register\n");
2875 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2876 +index 836e0a47b94a..747c6951b5c1 100644
2877 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2878 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2879 +@@ -3085,6 +3085,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2880 + passive = channel->flags & IEEE80211_CHAN_NO_IR;
2881 + ch->passive = passive;
2882 +
2883 ++ /* the firmware is ignoring the "radar" flag of the
2884 ++ * channel and is scanning actively using Probe Requests
2885 ++ * on "Radar detection"/DFS channels which are not
2886 ++ * marked as "available"
2887 ++ */
2888 ++ ch->passive |= ch->chan_radar;
2889 ++
2890 + ch->freq = channel->center_freq;
2891 + ch->band_center_freq1 = channel->center_freq;
2892 + ch->min_power = 0;
2893 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2894 +index 8c49a26fc571..21eb3a598a86 100644
2895 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2896 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2897 +@@ -1584,6 +1584,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
2898 + cfg->keep_alive_pattern_size = __cpu_to_le32(0);
2899 + cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
2900 + cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
2901 ++ cfg->wmi_send_separate = __cpu_to_le32(0);
2902 ++ cfg->num_ocb_vdevs = __cpu_to_le32(0);
2903 ++ cfg->num_ocb_channels = __cpu_to_le32(0);
2904 ++ cfg->num_ocb_schedules = __cpu_to_le32(0);
2905 ++ cfg->host_capab = __cpu_to_le32(0);
2906 +
2907 + ath10k_wmi_put_host_mem_chunks(ar, chunks);
2908 +
2909 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2910 +index 3e1e340cd834..1cb93d09b8a9 100644
2911 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2912 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2913 +@@ -1670,6 +1670,11 @@ struct wmi_tlv_resource_config {
2914 + __le32 keep_alive_pattern_size;
2915 + __le32 max_tdls_concurrent_sleep_sta;
2916 + __le32 max_tdls_concurrent_buffer_sta;
2917 ++ __le32 wmi_send_separate;
2918 ++ __le32 num_ocb_vdevs;
2919 ++ __le32 num_ocb_channels;
2920 ++ __le32 num_ocb_schedules;
2921 ++ __le32 host_capab;
2922 + } __packed;
2923 +
2924 + struct wmi_tlv_init_cmd {
2925 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2926 +index e60bea4604e4..fcd9d5eeae72 100644
2927 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2928 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2929 +@@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2930 + struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2931 + struct ieee80211_channel *channel;
2932 + int chan_pwr, new_pwr;
2933 ++ u16 ctl = NO_CTL;
2934 +
2935 + if (!chan)
2936 + return;
2937 +
2938 ++ if (!test)
2939 ++ ctl = ath9k_regd_get_ctl(reg, chan);
2940 ++
2941 + channel = chan->chan;
2942 + chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
2943 + new_pwr = min_t(int, chan_pwr, reg->power_limit);
2944 +
2945 +- ah->eep_ops->set_txpower(ah, chan,
2946 +- ath9k_regd_get_ctl(reg, chan),
2947 ++ ah->eep_ops->set_txpower(ah, chan, ctl,
2948 + get_antenna_gain(ah, chan), new_pwr, test);
2949 + }
2950 +
2951 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2952 +index 7fdb152be0bb..a249ee747dc9 100644
2953 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2954 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2955 +@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
2956 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2957 + struct ieee80211_sta *sta = info->status.status_driver_data[0];
2958 +
2959 +- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
2960 ++ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
2961 ++ IEEE80211_TX_STATUS_EOSP)) {
2962 + ieee80211_tx_status(hw, skb);
2963 + return;
2964 + }
2965 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2966 +index 8520523b91b4..d8d8443c1c93 100644
2967 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2968 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2969 +@@ -1003,6 +1003,10 @@ static int iwl_pci_resume(struct device *device)
2970 + if (!trans->op_mode)
2971 + return 0;
2972 +
2973 ++ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
2974 ++ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
2975 ++ return 0;
2976 ++
2977 + /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
2978 + iwl_pcie_conf_msix_hw(trans_pcie);
2979 +
2980 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2981 +index 7229991ae70d..a2a98087eb41 100644
2982 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2983 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2984 +@@ -1539,18 +1539,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
2985 +
2986 + iwl_pcie_enable_rx_wake(trans, true);
2987 +
2988 +- /*
2989 +- * Reconfigure IVAR table in case of MSIX or reset ict table in
2990 +- * MSI mode since HW reset erased it.
2991 +- * Also enables interrupts - none will happen as
2992 +- * the device doesn't know we're waking it up, only when
2993 +- * the opmode actually tells it after this call.
2994 +- */
2995 +- iwl_pcie_conf_msix_hw(trans_pcie);
2996 +- if (!trans_pcie->msix_enabled)
2997 +- iwl_pcie_reset_ict(trans);
2998 +- iwl_enable_interrupts(trans);
2999 +-
3000 + iwl_set_bit(trans, CSR_GP_CNTRL,
3001 + BIT(trans->cfg->csr->flag_mac_access_req));
3002 + iwl_set_bit(trans, CSR_GP_CNTRL,
3003 +@@ -1568,6 +1556,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
3004 + return ret;
3005 + }
3006 +
3007 ++ /*
3008 ++ * Reconfigure IVAR table in case of MSIX or reset ict table in
3009 ++ * MSI mode since HW reset erased it.
3010 ++ * Also enables interrupts - none will happen as
3011 ++ * the device doesn't know we're waking it up, only when
3012 ++ * the opmode actually tells it after this call.
3013 ++ */
3014 ++ iwl_pcie_conf_msix_hw(trans_pcie);
3015 ++ if (!trans_pcie->msix_enabled)
3016 ++ iwl_pcie_reset_ict(trans);
3017 ++ iwl_enable_interrupts(trans);
3018 ++
3019 + iwl_pcie_set_pwr(trans, false);
3020 +
3021 + if (!reset) {
3022 +diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
3023 +index 0f15696195f8..078a4940bc5c 100644
3024 +--- a/drivers/net/wireless/ti/wlcore/rx.c
3025 ++++ b/drivers/net/wireless/ti/wlcore/rx.c
3026 +@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
3027 + static void wl1271_rx_status(struct wl1271 *wl,
3028 + struct wl1271_rx_descriptor *desc,
3029 + struct ieee80211_rx_status *status,
3030 +- u8 beacon)
3031 ++ u8 beacon, u8 probe_rsp)
3032 + {
3033 + memset(status, 0, sizeof(struct ieee80211_rx_status));
3034 +
3035 +@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
3036 + }
3037 + }
3038 +
3039 ++ if (beacon || probe_rsp)
3040 ++ status->boottime_ns = ktime_get_boot_ns();
3041 ++
3042 + if (beacon)
3043 + wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
3044 + status->band);
3045 +@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
3046 + if (ieee80211_is_data_present(hdr->frame_control))
3047 + is_data = 1;
3048 +
3049 +- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
3050 ++ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
3051 ++ ieee80211_is_probe_resp(hdr->frame_control));
3052 + wlcore_hw_set_rx_csum(wl, desc, skb);
3053 +
3054 + seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
3055 +diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
3056 +index cf0aa7cee5b0..a939e8d31735 100644
3057 +--- a/drivers/pci/controller/pcie-mobiveil.c
3058 ++++ b/drivers/pci/controller/pcie-mobiveil.c
3059 +@@ -23,6 +23,8 @@
3060 + #include <linux/platform_device.h>
3061 + #include <linux/slab.h>
3062 +
3063 ++#include "../pci.h"
3064 ++
3065 + /* register offsets and bit positions */
3066 +
3067 + /*
3068 +@@ -130,7 +132,7 @@ struct mobiveil_pcie {
3069 + void __iomem *config_axi_slave_base; /* endpoint config base */
3070 + void __iomem *csr_axi_slave_base; /* root port config base */
3071 + void __iomem *apb_csr_base; /* MSI register base */
3072 +- void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
3073 ++ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
3074 + struct irq_domain *intx_domain;
3075 + raw_spinlock_t intx_mask_lock;
3076 + int irq;
3077 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
3078 +index 47cd0c037433..f96af1467984 100644
3079 +--- a/drivers/pci/switch/switchtec.c
3080 ++++ b/drivers/pci/switch/switchtec.c
3081 +@@ -14,6 +14,8 @@
3082 + #include <linux/poll.h>
3083 + #include <linux/wait.h>
3084 +
3085 ++#include <linux/nospec.h>
3086 ++
3087 + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
3088 + MODULE_VERSION("0.1");
3089 + MODULE_LICENSE("GPL");
3090 +@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
3091 + default:
3092 + if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
3093 + return -EINVAL;
3094 ++ p.port = array_index_nospec(p.port,
3095 ++ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
3096 + p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
3097 + break;
3098 + }
3099 +diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
3100 +index d6d183e9db17..b5903fffb3d0 100644
3101 +--- a/drivers/pinctrl/berlin/berlin.c
3102 ++++ b/drivers/pinctrl/berlin/berlin.c
3103 +@@ -216,10 +216,8 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
3104 + }
3105 +
3106 + /* we will reallocate later */
3107 +- pctrl->functions = devm_kcalloc(&pdev->dev,
3108 +- max_functions,
3109 +- sizeof(*pctrl->functions),
3110 +- GFP_KERNEL);
3111 ++ pctrl->functions = kcalloc(max_functions,
3112 ++ sizeof(*pctrl->functions), GFP_KERNEL);
3113 + if (!pctrl->functions)
3114 + return -ENOMEM;
3115 +
3116 +@@ -257,8 +255,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
3117 + function++;
3118 + }
3119 +
3120 +- if (!found)
3121 ++ if (!found) {
3122 ++ kfree(pctrl->functions);
3123 + return -EINVAL;
3124 ++ }
3125 +
3126 + if (!function->groups) {
3127 + function->groups =
3128 +@@ -267,8 +267,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
3129 + sizeof(char *),
3130 + GFP_KERNEL);
3131 +
3132 +- if (!function->groups)
3133 ++ if (!function->groups) {
3134 ++ kfree(pctrl->functions);
3135 + return -ENOMEM;
3136 ++ }
3137 + }
3138 +
3139 + groups = function->groups;
3140 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
3141 +index 1c6bb15579e1..b04edc22dad7 100644
3142 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
3143 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
3144 +@@ -383,7 +383,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
3145 + const char *name;
3146 + int i, ret;
3147 +
3148 +- if (group > pctldev->num_groups)
3149 ++ if (group >= pctldev->num_groups)
3150 + return;
3151 +
3152 + seq_puts(s, "\n");
3153 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
3154 +index 04ae139671c8..b91db89eb924 100644
3155 +--- a/drivers/pinctrl/pinctrl-amd.c
3156 ++++ b/drivers/pinctrl/pinctrl-amd.c
3157 +@@ -552,7 +552,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
3158 + /* Each status bit covers four pins */
3159 + for (i = 0; i < 4; i++) {
3160 + regval = readl(regs + i);
3161 +- if (!(regval & PIN_IRQ_PENDING))
3162 ++ if (!(regval & PIN_IRQ_PENDING) ||
3163 ++ !(regval & BIT(INTERRUPT_MASK_OFF)))
3164 + continue;
3165 + irq = irq_find_mapping(gc->irq.domain, irqnr + i);
3166 + generic_handle_irq(irq);
3167 +diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
3168 +index fc12badf3805..d84fab616abf 100644
3169 +--- a/drivers/regulator/tps65217-regulator.c
3170 ++++ b/drivers/regulator/tps65217-regulator.c
3171 +@@ -232,6 +232,8 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
3172 + tps->strobes = devm_kcalloc(&pdev->dev,
3173 + TPS65217_NUM_REGULATOR, sizeof(u8),
3174 + GFP_KERNEL);
3175 ++ if (!tps->strobes)
3176 ++ return -ENOMEM;
3177 +
3178 + platform_set_drvdata(pdev, tps);
3179 +
3180 +diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
3181 +index b714a543a91d..8122807db380 100644
3182 +--- a/drivers/rpmsg/rpmsg_core.c
3183 ++++ b/drivers/rpmsg/rpmsg_core.c
3184 +@@ -15,6 +15,7 @@
3185 + #include <linux/module.h>
3186 + #include <linux/rpmsg.h>
3187 + #include <linux/of_device.h>
3188 ++#include <linux/pm_domain.h>
3189 + #include <linux/slab.h>
3190 +
3191 + #include "rpmsg_internal.h"
3192 +@@ -449,6 +450,10 @@ static int rpmsg_dev_probe(struct device *dev)
3193 + struct rpmsg_endpoint *ept = NULL;
3194 + int err;
3195 +
3196 ++ err = dev_pm_domain_attach(dev, true);
3197 ++ if (err)
3198 ++ goto out;
3199 ++
3200 + if (rpdrv->callback) {
3201 + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
3202 + chinfo.src = rpdev->src;
3203 +@@ -490,6 +495,8 @@ static int rpmsg_dev_remove(struct device *dev)
3204 +
3205 + rpdrv->remove(rpdev);
3206 +
3207 ++ dev_pm_domain_detach(dev, true);
3208 ++
3209 + if (rpdev->ept)
3210 + rpmsg_destroy_ept(rpdev->ept);
3211 +
3212 +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
3213 +index 99ba4a770406..27521fc3ef5a 100644
3214 +--- a/drivers/scsi/3w-9xxx.c
3215 ++++ b/drivers/scsi/3w-9xxx.c
3216 +@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3217 +
3218 + if (twa_initialize_device_extension(tw_dev)) {
3219 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
3220 ++ retval = -ENOMEM;
3221 + goto out_free_device_extension;
3222 + }
3223 +
3224 +@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3225 + tw_dev->base_addr = ioremap(mem_addr, mem_len);
3226 + if (!tw_dev->base_addr) {
3227 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
3228 ++ retval = -ENOMEM;
3229 + goto out_release_mem_region;
3230 + }
3231 +
3232 +@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3233 + TW_DISABLE_INTERRUPTS(tw_dev);
3234 +
3235 + /* Initialize the card */
3236 +- if (twa_reset_sequence(tw_dev, 0))
3237 ++ if (twa_reset_sequence(tw_dev, 0)) {
3238 ++ retval = -ENOMEM;
3239 + goto out_iounmap;
3240 ++ }
3241 +
3242 + /* Set host specific parameters */
3243 + if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
3244 +diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
3245 +index cf9f2a09b47d..40c1e6e64f58 100644
3246 +--- a/drivers/scsi/3w-sas.c
3247 ++++ b/drivers/scsi/3w-sas.c
3248 +@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3249 +
3250 + if (twl_initialize_device_extension(tw_dev)) {
3251 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
3252 ++ retval = -ENOMEM;
3253 + goto out_free_device_extension;
3254 + }
3255 +
3256 +@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3257 + tw_dev->base_addr = pci_iomap(pdev, 1, 0);
3258 + if (!tw_dev->base_addr) {
3259 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
3260 ++ retval = -ENOMEM;
3261 + goto out_release_mem_region;
3262 + }
3263 +
3264 +@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3265 + /* Initialize the card */
3266 + if (twl_reset_sequence(tw_dev, 0)) {
3267 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
3268 ++ retval = -ENOMEM;
3269 + goto out_iounmap;
3270 + }
3271 +
3272 +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
3273 +index f6179e3d6953..961ea6f7def8 100644
3274 +--- a/drivers/scsi/3w-xxxx.c
3275 ++++ b/drivers/scsi/3w-xxxx.c
3276 +@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3277 +
3278 + if (tw_initialize_device_extension(tw_dev)) {
3279 + printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
3280 ++ retval = -ENOMEM;
3281 + goto out_free_device_extension;
3282 + }
3283 +
3284 +@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3285 + tw_dev->base_addr = pci_resource_start(pdev, 0);
3286 + if (!tw_dev->base_addr) {
3287 + printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
3288 ++ retval = -ENOMEM;
3289 + goto out_release_mem_region;
3290 + }
3291 +
3292 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
3293 +index 20b249a649dd..902004dc8dc7 100644
3294 +--- a/drivers/scsi/lpfc/lpfc.h
3295 ++++ b/drivers/scsi/lpfc/lpfc.h
3296 +@@ -672,7 +672,7 @@ struct lpfc_hba {
3297 + #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
3298 + #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
3299 + #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
3300 +-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
3301 ++#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
3302 +
3303 + uint32_t hba_flag; /* hba generic flags */
3304 + #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
3305 +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
3306 +index 76a5a99605aa..d723fd1d7b26 100644
3307 +--- a/drivers/scsi/lpfc/lpfc_nvme.c
3308 ++++ b/drivers/scsi/lpfc/lpfc_nvme.c
3309 +@@ -2687,7 +2687,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3310 + struct lpfc_nvme_rport *oldrport;
3311 + struct nvme_fc_remote_port *remote_port;
3312 + struct nvme_fc_port_info rpinfo;
3313 +- struct lpfc_nodelist *prev_ndlp;
3314 ++ struct lpfc_nodelist *prev_ndlp = NULL;
3315 +
3316 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
3317 + "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
3318 +@@ -2736,23 +2736,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3319 + spin_unlock_irq(&vport->phba->hbalock);
3320 + rport = remote_port->private;
3321 + if (oldrport) {
3322 ++ /* New remoteport record does not guarantee valid
3323 ++ * host private memory area.
3324 ++ */
3325 ++ prev_ndlp = oldrport->ndlp;
3326 + if (oldrport == remote_port->private) {
3327 +- /* Same remoteport. Just reuse. */
3328 ++ /* Same remoteport - ndlp should match.
3329 ++ * Just reuse.
3330 ++ */
3331 + lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3332 + LOG_NVME_DISC,
3333 + "6014 Rebinding lport to "
3334 + "remoteport %p wwpn 0x%llx, "
3335 +- "Data: x%x x%x %p x%x x%06x\n",
3336 ++ "Data: x%x x%x %p %p x%x x%06x\n",
3337 + remote_port,
3338 + remote_port->port_name,
3339 + remote_port->port_id,
3340 + remote_port->port_role,
3341 ++ prev_ndlp,
3342 + ndlp,
3343 + ndlp->nlp_type,
3344 + ndlp->nlp_DID);
3345 + return 0;
3346 + }
3347 +- prev_ndlp = rport->ndlp;
3348 +
3349 + /* Sever the ndlp<->rport association
3350 + * before dropping the ndlp ref from
3351 +@@ -2786,13 +2792,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3352 + lpfc_printf_vlog(vport, KERN_INFO,
3353 + LOG_NVME_DISC | LOG_NODE,
3354 + "6022 Binding new rport to "
3355 +- "lport %p Remoteport %p WWNN 0x%llx, "
3356 ++ "lport %p Remoteport %p rport %p WWNN 0x%llx, "
3357 + "Rport WWPN 0x%llx DID "
3358 +- "x%06x Role x%x, ndlp %p\n",
3359 +- lport, remote_port,
3360 ++ "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
3361 ++ lport, remote_port, rport,
3362 + rpinfo.node_name, rpinfo.port_name,
3363 + rpinfo.port_id, rpinfo.port_role,
3364 +- ndlp);
3365 ++ ndlp, prev_ndlp);
3366 + } else {
3367 + lpfc_printf_vlog(vport, KERN_ERR,
3368 + LOG_NVME_DISC | LOG_NODE,
3369 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3370 +index ec550ee0108e..75d34def2361 100644
3371 +--- a/drivers/scsi/qla2xxx/qla_init.c
3372 ++++ b/drivers/scsi/qla2xxx/qla_init.c
3373 +@@ -1074,9 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3374 + case PDS_PLOGI_COMPLETE:
3375 + case PDS_PRLI_PENDING:
3376 + case PDS_PRLI2_PENDING:
3377 +- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
3378 +- __func__, __LINE__, fcport->port_name);
3379 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3380 ++ /* Set discovery state back to GNL to Relogin attempt */
3381 ++ if (qla_dual_mode_enabled(vha) ||
3382 ++ qla_ini_mode_enabled(vha)) {
3383 ++ fcport->disc_state = DSC_GNL;
3384 ++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3385 ++ }
3386 + return;
3387 + case PDS_LOGO_PENDING:
3388 + case PDS_PORT_UNAVAILABLE:
3389 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3390 +index 1027b0cb7fa3..6dc1b1bd8069 100644
3391 +--- a/drivers/scsi/qla2xxx/qla_target.c
3392 ++++ b/drivers/scsi/qla2xxx/qla_target.c
3393 +@@ -982,8 +982,9 @@ void qlt_free_session_done(struct work_struct *work)
3394 +
3395 + logo.id = sess->d_id;
3396 + logo.cmd_count = 0;
3397 ++ if (!own)
3398 ++ qlt_send_first_logo(vha, &logo);
3399 + sess->send_els_logo = 0;
3400 +- qlt_send_first_logo(vha, &logo);
3401 + }
3402 +
3403 + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
3404 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
3405 +index 731ca0d8520a..9f3c263756a8 100644
3406 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
3407 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
3408 +@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
3409 + }
3410 + break;
3411 +
3412 ++ case T268_BUF_TYPE_REQ_MIRROR:
3413 ++ case T268_BUF_TYPE_RSP_MIRROR:
3414 ++ /*
3415 ++ * Mirror pointers are not implemented in the
3416 ++ * driver, instead shadow pointers are used by
3417 ++ * the drier. Skip these entries.
3418 ++ */
3419 ++ qla27xx_skip_entry(ent, buf);
3420 ++ break;
3421 + default:
3422 + ql_dbg(ql_dbg_async, vha, 0xd02b,
3423 + "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
3424 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3425 +index ee5081ba5313..1fc87a3260cc 100644
3426 +--- a/drivers/target/target_core_transport.c
3427 ++++ b/drivers/target/target_core_transport.c
3428 +@@ -316,6 +316,7 @@ void __transport_register_session(
3429 + {
3430 + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
3431 + unsigned char buf[PR_REG_ISID_LEN];
3432 ++ unsigned long flags;
3433 +
3434 + se_sess->se_tpg = se_tpg;
3435 + se_sess->fabric_sess_ptr = fabric_sess_ptr;
3436 +@@ -352,7 +353,7 @@ void __transport_register_session(
3437 + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
3438 + }
3439 +
3440 +- spin_lock_irq(&se_nacl->nacl_sess_lock);
3441 ++ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
3442 + /*
3443 + * The se_nacl->nacl_sess pointer will be set to the
3444 + * last active I_T Nexus for each struct se_node_acl.
3445 +@@ -361,7 +362,7 @@ void __transport_register_session(
3446 +
3447 + list_add_tail(&se_sess->sess_acl_list,
3448 + &se_nacl->acl_sess_list);
3449 +- spin_unlock_irq(&se_nacl->nacl_sess_lock);
3450 ++ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
3451 + }
3452 + list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
3453 +
3454 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3455 +index d8dc3d22051f..b8dc5efc606b 100644
3456 +--- a/drivers/target/target_core_user.c
3457 ++++ b/drivers/target/target_core_user.c
3458 +@@ -1745,9 +1745,11 @@ static int tcmu_configure_device(struct se_device *dev)
3459 +
3460 + info = &udev->uio_info;
3461 +
3462 ++ mutex_lock(&udev->cmdr_lock);
3463 + udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
3464 + sizeof(unsigned long),
3465 + GFP_KERNEL);
3466 ++ mutex_unlock(&udev->cmdr_lock);
3467 + if (!udev->data_bitmap) {
3468 + ret = -ENOMEM;
3469 + goto err_bitmap_alloc;
3470 +@@ -1957,7 +1959,7 @@ static match_table_t tokens = {
3471 + {Opt_hw_block_size, "hw_block_size=%u"},
3472 + {Opt_hw_max_sectors, "hw_max_sectors=%u"},
3473 + {Opt_nl_reply_supported, "nl_reply_supported=%d"},
3474 +- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
3475 ++ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
3476 + {Opt_err, NULL}
3477 + };
3478 +
3479 +@@ -1985,13 +1987,48 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
3480 + return 0;
3481 + }
3482 +
3483 ++static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
3484 ++{
3485 ++ int val, ret;
3486 ++
3487 ++ ret = match_int(arg, &val);
3488 ++ if (ret < 0) {
3489 ++ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
3490 ++ ret);
3491 ++ return ret;
3492 ++ }
3493 ++
3494 ++ if (val <= 0) {
3495 ++ pr_err("Invalid max_data_area %d.\n", val);
3496 ++ return -EINVAL;
3497 ++ }
3498 ++
3499 ++ mutex_lock(&udev->cmdr_lock);
3500 ++ if (udev->data_bitmap) {
3501 ++ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
3502 ++ ret = -EINVAL;
3503 ++ goto unlock;
3504 ++ }
3505 ++
3506 ++ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
3507 ++ if (udev->max_blocks > tcmu_global_max_blocks) {
3508 ++ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
3509 ++ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
3510 ++ udev->max_blocks = tcmu_global_max_blocks;
3511 ++ }
3512 ++
3513 ++unlock:
3514 ++ mutex_unlock(&udev->cmdr_lock);
3515 ++ return ret;
3516 ++}
3517 ++
3518 + static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
3519 + const char *page, ssize_t count)
3520 + {
3521 + struct tcmu_dev *udev = TCMU_DEV(dev);
3522 + char *orig, *ptr, *opts, *arg_p;
3523 + substring_t args[MAX_OPT_ARGS];
3524 +- int ret = 0, token, tmpval;
3525 ++ int ret = 0, token;
3526 +
3527 + opts = kstrdup(page, GFP_KERNEL);
3528 + if (!opts)
3529 +@@ -2044,37 +2081,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
3530 + pr_err("kstrtoint() failed for nl_reply_supported=\n");
3531 + break;
3532 + case Opt_max_data_area_mb:
3533 +- if (dev->export_count) {
3534 +- pr_err("Unable to set max_data_area_mb while exports exist\n");
3535 +- ret = -EINVAL;
3536 +- break;
3537 +- }
3538 +-
3539 +- arg_p = match_strdup(&args[0]);
3540 +- if (!arg_p) {
3541 +- ret = -ENOMEM;
3542 +- break;
3543 +- }
3544 +- ret = kstrtoint(arg_p, 0, &tmpval);
3545 +- kfree(arg_p);
3546 +- if (ret < 0) {
3547 +- pr_err("kstrtoint() failed for max_data_area_mb=\n");
3548 +- break;
3549 +- }
3550 +-
3551 +- if (tmpval <= 0) {
3552 +- pr_err("Invalid max_data_area %d\n", tmpval);
3553 +- ret = -EINVAL;
3554 +- break;
3555 +- }
3556 +-
3557 +- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
3558 +- if (udev->max_blocks > tcmu_global_max_blocks) {
3559 +- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
3560 +- tmpval,
3561 +- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
3562 +- udev->max_blocks = tcmu_global_max_blocks;
3563 +- }
3564 ++ ret = tcmu_set_max_blocks_param(udev, &args[0]);
3565 + break;
3566 + default:
3567 + break;
3568 +diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
3569 +index 45fb284d4c11..e77e63070e99 100644
3570 +--- a/drivers/thermal/rcar_thermal.c
3571 ++++ b/drivers/thermal/rcar_thermal.c
3572 +@@ -598,7 +598,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
3573 + enr_bits |= 3 << (i * 8);
3574 + }
3575 +
3576 +- if (enr_bits)
3577 ++ if (common->base && enr_bits)
3578 + rcar_thermal_common_write(common, ENR, enr_bits);
3579 +
3580 + dev_info(dev, "%d sensor probed\n", i);
3581 +diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
3582 +index 11278836ed12..0bd47007c57f 100644
3583 +--- a/drivers/thermal/thermal_hwmon.c
3584 ++++ b/drivers/thermal/thermal_hwmon.c
3585 +@@ -142,6 +142,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
3586 +
3587 + INIT_LIST_HEAD(&hwmon->tz_list);
3588 + strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
3589 ++ strreplace(hwmon->type, '-', '_');
3590 + hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
3591 + hwmon, NULL, NULL);
3592 + if (IS_ERR(hwmon->device)) {
3593 +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
3594 +index bdd17d2aaafd..b121d8f8f3d7 100644
3595 +--- a/drivers/tty/rocket.c
3596 ++++ b/drivers/tty/rocket.c
3597 +@@ -1881,7 +1881,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
3598 + ByteIO_t UPCIRingInd = 0;
3599 +
3600 + if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
3601 +- pci_enable_device(dev))
3602 ++ pci_enable_device(dev) || i >= NUM_BOARDS)
3603 + return 0;
3604 +
3605 + rcktpt_io_addr[i] = pci_resource_start(dev, 0);
3606 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
3607 +index f68c1121fa7c..6c58ad1abd7e 100644
3608 +--- a/drivers/uio/uio.c
3609 ++++ b/drivers/uio/uio.c
3610 +@@ -622,6 +622,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
3611 + ssize_t retval;
3612 + s32 irq_on;
3613 +
3614 ++ if (count != sizeof(s32))
3615 ++ return -EINVAL;
3616 ++
3617 ++ if (copy_from_user(&irq_on, buf, count))
3618 ++ return -EFAULT;
3619 ++
3620 + mutex_lock(&idev->info_lock);
3621 + if (!idev->info) {
3622 + retval = -EINVAL;
3623 +@@ -633,21 +639,11 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
3624 + goto out;
3625 + }
3626 +
3627 +- if (count != sizeof(s32)) {
3628 +- retval = -EINVAL;
3629 +- goto out;
3630 +- }
3631 +-
3632 + if (!idev->info->irqcontrol) {
3633 + retval = -ENOSYS;
3634 + goto out;
3635 + }
3636 +
3637 +- if (copy_from_user(&irq_on, buf, count)) {
3638 +- retval = -EFAULT;
3639 +- goto out;
3640 +- }
3641 +-
3642 + retval = idev->info->irqcontrol(idev->info, irq_on);
3643 +
3644 + out:
3645 +@@ -955,8 +951,6 @@ int __uio_register_device(struct module *owner,
3646 + if (ret)
3647 + goto err_uio_dev_add_attributes;
3648 +
3649 +- info->uio_dev = idev;
3650 +-
3651 + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
3652 + /*
3653 + * Note that we deliberately don't use devm_request_irq
3654 +@@ -972,6 +966,7 @@ int __uio_register_device(struct module *owner,
3655 + goto err_request_irq;
3656 + }
3657 +
3658 ++ info->uio_dev = idev;
3659 + return 0;
3660 +
3661 + err_request_irq:
3662 +diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
3663 +index 9400a9f6318a..5057b9f0f846 100644
3664 +--- a/fs/autofs/autofs_i.h
3665 ++++ b/fs/autofs/autofs_i.h
3666 +@@ -26,6 +26,7 @@
3667 + #include <linux/list.h>
3668 + #include <linux/completion.h>
3669 + #include <linux/file.h>
3670 ++#include <linux/magic.h>
3671 +
3672 + /* This is the range of ioctl() numbers we claim as ours */
3673 + #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
3674 +@@ -124,7 +125,8 @@ struct autofs_sb_info {
3675 +
3676 + static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
3677 + {
3678 +- return (struct autofs_sb_info *)(sb->s_fs_info);
3679 ++ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
3680 ++ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
3681 + }
3682 +
3683 + static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry)
3684 +diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
3685 +index b51980fc274e..846c052569dd 100644
3686 +--- a/fs/autofs/inode.c
3687 ++++ b/fs/autofs/inode.c
3688 +@@ -10,7 +10,6 @@
3689 + #include <linux/seq_file.h>
3690 + #include <linux/pagemap.h>
3691 + #include <linux/parser.h>
3692 +-#include <linux/magic.h>
3693 +
3694 + #include "autofs_i.h"
3695 +
3696 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3697 +index 53cac20650d8..4ab0bccfa281 100644
3698 +--- a/fs/btrfs/extent-tree.c
3699 ++++ b/fs/btrfs/extent-tree.c
3700 +@@ -5935,7 +5935,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
3701 + * root: the root of the parent directory
3702 + * rsv: block reservation
3703 + * items: the number of items that we need do reservation
3704 +- * qgroup_reserved: used to return the reserved size in qgroup
3705 ++ * use_global_rsv: allow fallback to the global block reservation
3706 + *
3707 + * This function is used to reserve the space for snapshot/subvolume
3708 + * creation and deletion. Those operations are different with the
3709 +@@ -5945,10 +5945,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
3710 + * the space reservation mechanism in start_transaction().
3711 + */
3712 + int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
3713 +- struct btrfs_block_rsv *rsv,
3714 +- int items,
3715 ++ struct btrfs_block_rsv *rsv, int items,
3716 + bool use_global_rsv)
3717 + {
3718 ++ u64 qgroup_num_bytes = 0;
3719 + u64 num_bytes;
3720 + int ret;
3721 + struct btrfs_fs_info *fs_info = root->fs_info;
3722 +@@ -5956,12 +5956,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
3723 +
3724 + if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3725 + /* One for parent inode, two for dir entries */
3726 +- num_bytes = 3 * fs_info->nodesize;
3727 +- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
3728 ++ qgroup_num_bytes = 3 * fs_info->nodesize;
3729 ++ ret = btrfs_qgroup_reserve_meta_prealloc(root,
3730 ++ qgroup_num_bytes, true);
3731 + if (ret)
3732 + return ret;
3733 +- } else {
3734 +- num_bytes = 0;
3735 + }
3736 +
3737 + num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
3738 +@@ -5973,8 +5972,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
3739 + if (ret == -ENOSPC && use_global_rsv)
3740 + ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
3741 +
3742 +- if (ret && num_bytes)
3743 +- btrfs_qgroup_free_meta_prealloc(root, num_bytes);
3744 ++ if (ret && qgroup_num_bytes)
3745 ++ btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
3746 +
3747 + return ret;
3748 + }
3749 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3750 +index b077544b5232..f3d6be0c657b 100644
3751 +--- a/fs/btrfs/ioctl.c
3752 ++++ b/fs/btrfs/ioctl.c
3753 +@@ -3463,6 +3463,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3754 +
3755 + same_lock_start = min_t(u64, loff, dst_loff);
3756 + same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3757 ++ } else {
3758 ++ /*
3759 ++ * If the source and destination inodes are different, the
3760 ++ * source's range end offset matches the source's i_size, that
3761 ++ * i_size is not a multiple of the sector size, and the
3762 ++ * destination range does not go past the destination's i_size,
3763 ++ * we must round down the length to the nearest sector size
3764 ++ * multiple. If we don't do this adjustment we end replacing
3765 ++ * with zeroes the bytes in the range that starts at the
3766 ++ * deduplication range's end offset and ends at the next sector
3767 ++ * size multiple.
3768 ++ */
3769 ++ if (loff + olen == i_size_read(src) &&
3770 ++ dst_loff + len < i_size_read(dst)) {
3771 ++ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3772 ++
3773 ++ len = round_down(i_size_read(src), sz) - loff;
3774 ++ olen = len;
3775 ++ }
3776 + }
3777 +
3778 + again:
3779 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3780 +index 9d02563b2147..44043f809a3c 100644
3781 +--- a/fs/cifs/connect.c
3782 ++++ b/fs/cifs/connect.c
3783 +@@ -2523,7 +2523,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
3784 + if (tcon == NULL)
3785 + return -ENOMEM;
3786 +
3787 +- snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
3788 ++ snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
3789 +
3790 + /* cannot fail */
3791 + nls_codepage = load_nls_default();
3792 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3793 +index 9051b9dfd590..d279fa5472db 100644
3794 +--- a/fs/cifs/inode.c
3795 ++++ b/fs/cifs/inode.c
3796 +@@ -469,6 +469,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
3797 + oparms.cifs_sb = cifs_sb;
3798 + oparms.desired_access = GENERIC_READ;
3799 + oparms.create_options = CREATE_NOT_DIR;
3800 ++ if (backup_cred(cifs_sb))
3801 ++ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
3802 + oparms.disposition = FILE_OPEN;
3803 + oparms.path = path;
3804 + oparms.fid = &fid;
3805 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3806 +index ee6c4a952ce9..5ecbc99f46e4 100644
3807 +--- a/fs/cifs/smb2ops.c
3808 ++++ b/fs/cifs/smb2ops.c
3809 +@@ -626,7 +626,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
3810 + oparms.tcon = tcon;
3811 + oparms.desired_access = FILE_READ_ATTRIBUTES;
3812 + oparms.disposition = FILE_OPEN;
3813 +- oparms.create_options = 0;
3814 ++ if (backup_cred(cifs_sb))
3815 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3816 ++ else
3817 ++ oparms.create_options = 0;
3818 + oparms.fid = &fid;
3819 + oparms.reconnect = false;
3820 +
3821 +@@ -775,7 +778,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
3822 + oparms.tcon = tcon;
3823 + oparms.desired_access = FILE_READ_EA;
3824 + oparms.disposition = FILE_OPEN;
3825 +- oparms.create_options = 0;
3826 ++ if (backup_cred(cifs_sb))
3827 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3828 ++ else
3829 ++ oparms.create_options = 0;
3830 + oparms.fid = &fid;
3831 + oparms.reconnect = false;
3832 +
3833 +@@ -854,7 +860,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
3834 + oparms.tcon = tcon;
3835 + oparms.desired_access = FILE_WRITE_EA;
3836 + oparms.disposition = FILE_OPEN;
3837 +- oparms.create_options = 0;
3838 ++ if (backup_cred(cifs_sb))
3839 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3840 ++ else
3841 ++ oparms.create_options = 0;
3842 + oparms.fid = &fid;
3843 + oparms.reconnect = false;
3844 +
3845 +@@ -1460,7 +1469,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
3846 + oparms.tcon = tcon;
3847 + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
3848 + oparms.disposition = FILE_OPEN;
3849 +- oparms.create_options = 0;
3850 ++ if (backup_cred(cifs_sb))
3851 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3852 ++ else
3853 ++ oparms.create_options = 0;
3854 + oparms.fid = fid;
3855 + oparms.reconnect = false;
3856 +
3857 +@@ -1735,7 +1747,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
3858 + oparms.tcon = tcon;
3859 + oparms.desired_access = FILE_READ_ATTRIBUTES;
3860 + oparms.disposition = FILE_OPEN;
3861 +- oparms.create_options = 0;
3862 ++ if (backup_cred(cifs_sb))
3863 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3864 ++ else
3865 ++ oparms.create_options = 0;
3866 + oparms.fid = &fid;
3867 + oparms.reconnect = false;
3868 +
3869 +@@ -3463,7 +3478,7 @@ struct smb_version_values smb21_values = {
3870 + struct smb_version_values smb3any_values = {
3871 + .version_string = SMB3ANY_VERSION_STRING,
3872 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
3873 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3874 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3875 + .large_lock_type = 0,
3876 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3877 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3878 +@@ -3484,7 +3499,7 @@ struct smb_version_values smb3any_values = {
3879 + struct smb_version_values smbdefault_values = {
3880 + .version_string = SMBDEFAULT_VERSION_STRING,
3881 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
3882 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3883 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3884 + .large_lock_type = 0,
3885 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3886 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3887 +@@ -3505,7 +3520,7 @@ struct smb_version_values smbdefault_values = {
3888 + struct smb_version_values smb30_values = {
3889 + .version_string = SMB30_VERSION_STRING,
3890 + .protocol_id = SMB30_PROT_ID,
3891 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3892 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3893 + .large_lock_type = 0,
3894 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3895 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3896 +@@ -3526,7 +3541,7 @@ struct smb_version_values smb30_values = {
3897 + struct smb_version_values smb302_values = {
3898 + .version_string = SMB302_VERSION_STRING,
3899 + .protocol_id = SMB302_PROT_ID,
3900 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3901 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3902 + .large_lock_type = 0,
3903 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3904 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3905 +@@ -3548,7 +3563,7 @@ struct smb_version_values smb302_values = {
3906 + struct smb_version_values smb311_values = {
3907 + .version_string = SMB311_VERSION_STRING,
3908 + .protocol_id = SMB311_PROT_ID,
3909 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3910 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3911 + .large_lock_type = 0,
3912 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3913 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3914 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3915 +index 44e511a35559..82be1dfeca33 100644
3916 +--- a/fs/cifs/smb2pdu.c
3917 ++++ b/fs/cifs/smb2pdu.c
3918 +@@ -2179,6 +2179,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3919 + if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
3920 + *oplock == SMB2_OPLOCK_LEVEL_NONE)
3921 + req->RequestedOplockLevel = *oplock;
3922 ++ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
3923 ++ (oparms->create_options & CREATE_NOT_FILE))
3924 ++ req->RequestedOplockLevel = *oplock; /* no srv lease support */
3925 + else {
3926 + rc = add_lease_context(server, iov, &n_iov,
3927 + oparms->fid->lease_key, oplock);
3928 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
3929 +index 4d8b1de83143..b6f2dc8163e1 100644
3930 +--- a/fs/f2fs/f2fs.h
3931 ++++ b/fs/f2fs/f2fs.h
3932 +@@ -1680,18 +1680,20 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
3933 + sbi->total_valid_block_count -= diff;
3934 + if (!*count) {
3935 + spin_unlock(&sbi->stat_lock);
3936 +- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
3937 + goto enospc;
3938 + }
3939 + }
3940 + spin_unlock(&sbi->stat_lock);
3941 +
3942 +- if (unlikely(release))
3943 ++ if (unlikely(release)) {
3944 ++ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
3945 + dquot_release_reservation_block(inode, release);
3946 ++ }
3947 + f2fs_i_blocks_write(inode, *count, true, true);
3948 + return 0;
3949 +
3950 + enospc:
3951 ++ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
3952 + dquot_release_reservation_block(inode, release);
3953 + return -ENOSPC;
3954 + }
3955 +@@ -1954,8 +1956,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
3956 + pgoff_t index, bool for_write)
3957 + {
3958 + #ifdef CONFIG_F2FS_FAULT_INJECTION
3959 +- struct page *page = find_lock_page(mapping, index);
3960 ++ struct page *page;
3961 +
3962 ++ if (!for_write)
3963 ++ page = find_get_page_flags(mapping, index,
3964 ++ FGP_LOCK | FGP_ACCESSED);
3965 ++ else
3966 ++ page = find_lock_page(mapping, index);
3967 + if (page)
3968 + return page;
3969 +
3970 +@@ -2812,7 +2819,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3971 + int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3972 + struct writeback_control *wbc,
3973 + bool do_balance, enum iostat_type io_type);
3974 +-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3975 ++int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3976 + bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3977 + void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3978 + void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3979 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
3980 +index 3ffa341cf586..4c9f9bcbd2d9 100644
3981 +--- a/fs/f2fs/file.c
3982 ++++ b/fs/f2fs/file.c
3983 +@@ -1882,7 +1882,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
3984 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3985 + struct super_block *sb = sbi->sb;
3986 + __u32 in;
3987 +- int ret;
3988 ++ int ret = 0;
3989 +
3990 + if (!capable(CAP_SYS_ADMIN))
3991 + return -EPERM;
3992 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
3993 +index 9093be6e7a7d..37ab2d10a872 100644
3994 +--- a/fs/f2fs/gc.c
3995 ++++ b/fs/f2fs/gc.c
3996 +@@ -986,7 +986,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
3997 + goto next;
3998 +
3999 + sum = page_address(sum_page);
4000 +- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
4001 ++ if (type != GET_SUM_TYPE((&sum->footer))) {
4002 ++ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
4003 ++ "type [%d, %d] in SSA and SIT",
4004 ++ segno, type, GET_SUM_TYPE((&sum->footer)));
4005 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
4006 ++ goto next;
4007 ++ }
4008 +
4009 + /*
4010 + * this is to avoid deadlock:
4011 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
4012 +index 043830be5662..2bcb2d36f024 100644
4013 +--- a/fs/f2fs/inline.c
4014 ++++ b/fs/f2fs/inline.c
4015 +@@ -130,6 +130,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
4016 + if (err)
4017 + return err;
4018 +
4019 ++ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
4020 ++ f2fs_put_dnode(dn);
4021 ++ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
4022 ++ f2fs_msg(fio.sbi->sb, KERN_WARNING,
4023 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
4024 ++ "run fsck to fix.",
4025 ++ __func__, dn->inode->i_ino, dn->data_blkaddr);
4026 ++ return -EINVAL;
4027 ++ }
4028 ++
4029 + f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
4030 +
4031 + f2fs_do_read_inline_data(page, dn->inode_page);
4032 +@@ -363,6 +373,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
4033 + if (err)
4034 + goto out;
4035 +
4036 ++ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
4037 ++ f2fs_put_dnode(&dn);
4038 ++ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
4039 ++ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
4040 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
4041 ++ "run fsck to fix.",
4042 ++ __func__, dir->i_ino, dn.data_blkaddr);
4043 ++ err = -EINVAL;
4044 ++ goto out;
4045 ++ }
4046 ++
4047 + f2fs_wait_on_page_writeback(page, DATA, true);
4048 +
4049 + dentry_blk = page_address(page);
4050 +@@ -477,6 +498,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
4051 + return 0;
4052 + recover:
4053 + lock_page(ipage);
4054 ++ f2fs_wait_on_page_writeback(ipage, NODE, true);
4055 + memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
4056 + f2fs_i_depth_write(dir, 0);
4057 + f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
4058 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
4059 +index f121c864f4c0..cf0f944fcaea 100644
4060 +--- a/fs/f2fs/inode.c
4061 ++++ b/fs/f2fs/inode.c
4062 +@@ -197,6 +197,16 @@ static bool sanity_check_inode(struct inode *inode)
4063 + __func__, inode->i_ino);
4064 + return false;
4065 + }
4066 ++
4067 ++ if (f2fs_has_extra_attr(inode) &&
4068 ++ !f2fs_sb_has_extra_attr(sbi->sb)) {
4069 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
4070 ++ f2fs_msg(sbi->sb, KERN_WARNING,
4071 ++ "%s: inode (ino=%lx) is with extra_attr, "
4072 ++ "but extra_attr feature is off",
4073 ++ __func__, inode->i_ino);
4074 ++ return false;
4075 ++ }
4076 + return true;
4077 + }
4078 +
4079 +@@ -249,6 +259,11 @@ static int do_read_inode(struct inode *inode)
4080 +
4081 + get_inline_info(inode, ri);
4082 +
4083 ++ if (!sanity_check_inode(inode)) {
4084 ++ f2fs_put_page(node_page, 1);
4085 ++ return -EINVAL;
4086 ++ }
4087 ++
4088 + fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
4089 + le16_to_cpu(ri->i_extra_isize) : 0;
4090 +
4091 +@@ -330,10 +345,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
4092 + ret = do_read_inode(inode);
4093 + if (ret)
4094 + goto bad_inode;
4095 +- if (!sanity_check_inode(inode)) {
4096 +- ret = -EINVAL;
4097 +- goto bad_inode;
4098 +- }
4099 + make_now:
4100 + if (ino == F2FS_NODE_INO(sbi)) {
4101 + inode->i_mapping->a_ops = &f2fs_node_aops;
4102 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
4103 +index 10643b11bd59..52ed02b0327c 100644
4104 +--- a/fs/f2fs/node.c
4105 ++++ b/fs/f2fs/node.c
4106 +@@ -1633,7 +1633,9 @@ next_step:
4107 + !is_cold_node(page)))
4108 + continue;
4109 + lock_node:
4110 +- if (!trylock_page(page))
4111 ++ if (wbc->sync_mode == WB_SYNC_ALL)
4112 ++ lock_page(page);
4113 ++ else if (!trylock_page(page))
4114 + continue;
4115 +
4116 + if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
4117 +@@ -1968,7 +1970,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
4118 + kmem_cache_free(free_nid_slab, i);
4119 + }
4120 +
4121 +-static void scan_nat_page(struct f2fs_sb_info *sbi,
4122 ++static int scan_nat_page(struct f2fs_sb_info *sbi,
4123 + struct page *nat_page, nid_t start_nid)
4124 + {
4125 + struct f2fs_nm_info *nm_i = NM_I(sbi);
4126 +@@ -1986,7 +1988,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
4127 + break;
4128 +
4129 + blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
4130 +- f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
4131 ++
4132 ++ if (blk_addr == NEW_ADDR)
4133 ++ return -EINVAL;
4134 ++
4135 + if (blk_addr == NULL_ADDR) {
4136 + add_free_nid(sbi, start_nid, true, true);
4137 + } else {
4138 +@@ -1995,6 +2000,8 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
4139 + spin_unlock(&NM_I(sbi)->nid_list_lock);
4140 + }
4141 + }
4142 ++
4143 ++ return 0;
4144 + }
4145 +
4146 + static void scan_curseg_cache(struct f2fs_sb_info *sbi)
4147 +@@ -2050,11 +2057,11 @@ out:
4148 + up_read(&nm_i->nat_tree_lock);
4149 + }
4150 +
4151 +-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4152 ++static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4153 + bool sync, bool mount)
4154 + {
4155 + struct f2fs_nm_info *nm_i = NM_I(sbi);
4156 +- int i = 0;
4157 ++ int i = 0, ret;
4158 + nid_t nid = nm_i->next_scan_nid;
4159 +
4160 + if (unlikely(nid >= nm_i->max_nid))
4161 +@@ -2062,17 +2069,17 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4162 +
4163 + /* Enough entries */
4164 + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
4165 +- return;
4166 ++ return 0;
4167 +
4168 + if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
4169 +- return;
4170 ++ return 0;
4171 +
4172 + if (!mount) {
4173 + /* try to find free nids in free_nid_bitmap */
4174 + scan_free_nid_bits(sbi);
4175 +
4176 + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
4177 +- return;
4178 ++ return 0;
4179 + }
4180 +
4181 + /* readahead nat pages to be scanned */
4182 +@@ -2086,8 +2093,16 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4183 + nm_i->nat_block_bitmap)) {
4184 + struct page *page = get_current_nat_page(sbi, nid);
4185 +
4186 +- scan_nat_page(sbi, page, nid);
4187 ++ ret = scan_nat_page(sbi, page, nid);
4188 + f2fs_put_page(page, 1);
4189 ++
4190 ++ if (ret) {
4191 ++ up_read(&nm_i->nat_tree_lock);
4192 ++ f2fs_bug_on(sbi, !mount);
4193 ++ f2fs_msg(sbi->sb, KERN_ERR,
4194 ++ "NAT is corrupt, run fsck to fix it");
4195 ++ return -EINVAL;
4196 ++ }
4197 + }
4198 +
4199 + nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
4200 +@@ -2108,13 +2123,19 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4201 +
4202 + f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
4203 + nm_i->ra_nid_pages, META_NAT, false);
4204 ++
4205 ++ return 0;
4206 + }
4207 +
4208 +-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
4209 ++int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
4210 + {
4211 ++ int ret;
4212 ++
4213 + mutex_lock(&NM_I(sbi)->build_lock);
4214 +- __f2fs_build_free_nids(sbi, sync, mount);
4215 ++ ret = __f2fs_build_free_nids(sbi, sync, mount);
4216 + mutex_unlock(&NM_I(sbi)->build_lock);
4217 ++
4218 ++ return ret;
4219 + }
4220 +
4221 + /*
4222 +@@ -2801,8 +2822,7 @@ int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
4223 + /* load free nid status from nat_bits table */
4224 + load_free_nid_bitmap(sbi);
4225 +
4226 +- f2fs_build_free_nids(sbi, true, true);
4227 +- return 0;
4228 ++ return f2fs_build_free_nids(sbi, true, true);
4229 + }
4230 +
4231 + void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
4232 +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
4233 +index 38f25f0b193a..ad70e62c5da4 100644
4234 +--- a/fs/f2fs/recovery.c
4235 ++++ b/fs/f2fs/recovery.c
4236 +@@ -241,8 +241,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
4237 + struct page *page = NULL;
4238 + block_t blkaddr;
4239 + unsigned int loop_cnt = 0;
4240 +- unsigned int free_blocks = sbi->user_block_count -
4241 +- valid_user_blocks(sbi);
4242 ++ unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
4243 ++ valid_user_blocks(sbi);
4244 + int err = 0;
4245 +
4246 + /* get node pages in the current segment */
4247 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
4248 +index 9efce174c51a..43fecd5eb252 100644
4249 +--- a/fs/f2fs/segment.c
4250 ++++ b/fs/f2fs/segment.c
4251 +@@ -1643,21 +1643,30 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
4252 + unsigned int start = 0, end = -1;
4253 + unsigned int secno, start_segno;
4254 + bool force = (cpc->reason & CP_DISCARD);
4255 ++ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
4256 +
4257 + mutex_lock(&dirty_i->seglist_lock);
4258 +
4259 + while (1) {
4260 + int i;
4261 ++
4262 ++ if (need_align && end != -1)
4263 ++ end--;
4264 + start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
4265 + if (start >= MAIN_SEGS(sbi))
4266 + break;
4267 + end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
4268 + start + 1);
4269 +
4270 +- for (i = start; i < end; i++)
4271 +- clear_bit(i, prefree_map);
4272 ++ if (need_align) {
4273 ++ start = rounddown(start, sbi->segs_per_sec);
4274 ++ end = roundup(end, sbi->segs_per_sec);
4275 ++ }
4276 +
4277 +- dirty_i->nr_dirty[PRE] -= end - start;
4278 ++ for (i = start; i < end; i++) {
4279 ++ if (test_and_clear_bit(i, prefree_map))
4280 ++ dirty_i->nr_dirty[PRE]--;
4281 ++ }
4282 +
4283 + if (!test_opt(sbi, DISCARD))
4284 + continue;
4285 +@@ -2437,6 +2446,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
4286 + struct discard_policy dpolicy;
4287 + unsigned long long trimmed = 0;
4288 + int err = 0;
4289 ++ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
4290 +
4291 + if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
4292 + return -EINVAL;
4293 +@@ -2454,6 +2464,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
4294 + start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
4295 + end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
4296 + GET_SEGNO(sbi, end);
4297 ++ if (need_align) {
4298 ++ start_segno = rounddown(start_segno, sbi->segs_per_sec);
4299 ++ end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
4300 ++ }
4301 +
4302 + cpc.reason = CP_DISCARD;
4303 + cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
4304 +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
4305 +index f18fc82fbe99..38c549d77a80 100644
4306 +--- a/fs/f2fs/segment.h
4307 ++++ b/fs/f2fs/segment.h
4308 +@@ -448,6 +448,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
4309 + if (test_and_clear_bit(segno, free_i->free_segmap)) {
4310 + free_i->free_segments++;
4311 +
4312 ++ if (IS_CURSEC(sbi, secno))
4313 ++ goto skip_free;
4314 + next = find_next_bit(free_i->free_segmap,
4315 + start_segno + sbi->segs_per_sec, start_segno);
4316 + if (next >= start_segno + sbi->segs_per_sec) {
4317 +@@ -455,6 +457,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
4318 + free_i->free_sections++;
4319 + }
4320 + }
4321 ++skip_free:
4322 + spin_unlock(&free_i->segmap_lock);
4323 + }
4324 +
4325 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4326 +index 3995e926ba3a..128d489acebb 100644
4327 +--- a/fs/f2fs/super.c
4328 ++++ b/fs/f2fs/super.c
4329 +@@ -2229,9 +2229,9 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4330 + return 1;
4331 + }
4332 +
4333 +- if (secs_per_zone > total_sections) {
4334 ++ if (secs_per_zone > total_sections || !secs_per_zone) {
4335 + f2fs_msg(sb, KERN_INFO,
4336 +- "Wrong secs_per_zone (%u > %u)",
4337 ++ "Wrong secs_per_zone / total_sections (%u, %u)",
4338 + secs_per_zone, total_sections);
4339 + return 1;
4340 + }
4341 +@@ -2282,12 +2282,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4342 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4343 + unsigned int ovp_segments, reserved_segments;
4344 + unsigned int main_segs, blocks_per_seg;
4345 ++ unsigned int sit_segs, nat_segs;
4346 ++ unsigned int sit_bitmap_size, nat_bitmap_size;
4347 ++ unsigned int log_blocks_per_seg;
4348 + int i;
4349 +
4350 + total = le32_to_cpu(raw_super->segment_count);
4351 + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
4352 +- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
4353 +- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
4354 ++ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
4355 ++ fsmeta += sit_segs;
4356 ++ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
4357 ++ fsmeta += nat_segs;
4358 + fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
4359 + fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
4360 +
4361 +@@ -2318,6 +2323,18 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4362 + return 1;
4363 + }
4364 +
4365 ++ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
4366 ++ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
4367 ++ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4368 ++
4369 ++ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
4370 ++ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
4371 ++ f2fs_msg(sbi->sb, KERN_ERR,
4372 ++ "Wrong bitmap size: sit: %u, nat:%u",
4373 ++ sit_bitmap_size, nat_bitmap_size);
4374 ++ return 1;
4375 ++ }
4376 ++
4377 + if (unlikely(f2fs_cp_error(sbi))) {
4378 + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
4379 + return 1;
4380 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
4381 +index 2e7e611deaef..bca1236fd6fa 100644
4382 +--- a/fs/f2fs/sysfs.c
4383 ++++ b/fs/f2fs/sysfs.c
4384 +@@ -9,6 +9,7 @@
4385 + * it under the terms of the GNU General Public License version 2 as
4386 + * published by the Free Software Foundation.
4387 + */
4388 ++#include <linux/compiler.h>
4389 + #include <linux/proc_fs.h>
4390 + #include <linux/f2fs_fs.h>
4391 + #include <linux/seq_file.h>
4392 +@@ -286,8 +287,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
4393 + bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
4394 + a->struct_type == GC_THREAD);
4395 +
4396 +- if (gc_entry)
4397 +- down_read(&sbi->sb->s_umount);
4398 ++ if (gc_entry) {
4399 ++ if (!down_read_trylock(&sbi->sb->s_umount))
4400 ++ return -EAGAIN;
4401 ++ }
4402 + ret = __sbi_store(a, sbi, buf, count);
4403 + if (gc_entry)
4404 + up_read(&sbi->sb->s_umount);
4405 +@@ -516,7 +519,8 @@ static struct kobject f2fs_feat = {
4406 + .kset = &f2fs_kset,
4407 + };
4408 +
4409 +-static int segment_info_seq_show(struct seq_file *seq, void *offset)
4410 ++static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
4411 ++ void *offset)
4412 + {
4413 + struct super_block *sb = seq->private;
4414 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
4415 +@@ -543,7 +547,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
4416 + return 0;
4417 + }
4418 +
4419 +-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
4420 ++static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
4421 ++ void *offset)
4422 + {
4423 + struct super_block *sb = seq->private;
4424 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
4425 +@@ -567,7 +572,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
4426 + return 0;
4427 + }
4428 +
4429 +-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
4430 ++static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
4431 ++ void *offset)
4432 + {
4433 + struct super_block *sb = seq->private;
4434 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
4435 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
4436 +index 5d57e818d0c3..6d049dfddb14 100644
4437 +--- a/fs/nfs/callback_proc.c
4438 ++++ b/fs/nfs/callback_proc.c
4439 +@@ -215,9 +215,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
4440 + {
4441 + u32 oldseq, newseq;
4442 +
4443 +- /* Is the stateid still not initialised? */
4444 ++ /* Is the stateid not initialised? */
4445 + if (!pnfs_layout_is_valid(lo))
4446 +- return NFS4ERR_DELAY;
4447 ++ return NFS4ERR_NOMATCHING_LAYOUT;
4448 +
4449 + /* Mismatched stateid? */
4450 + if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
4451 +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
4452 +index a813979b5be0..cb905c0e606c 100644
4453 +--- a/fs/nfs/callback_xdr.c
4454 ++++ b/fs/nfs/callback_xdr.c
4455 +@@ -883,16 +883,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
4456 +
4457 + if (hdr_arg.minorversion == 0) {
4458 + cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
4459 +- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
4460 ++ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
4461 ++ if (cps.clp)
4462 ++ nfs_put_client(cps.clp);
4463 + goto out_invalidcred;
4464 ++ }
4465 + }
4466 +
4467 + cps.minorversion = hdr_arg.minorversion;
4468 + hdr_res.taglen = hdr_arg.taglen;
4469 + hdr_res.tag = hdr_arg.tag;
4470 +- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
4471 ++ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
4472 ++ if (cps.clp)
4473 ++ nfs_put_client(cps.clp);
4474 + return rpc_system_err;
4475 +-
4476 ++ }
4477 + while (status == 0 && nops != hdr_arg.nops) {
4478 + status = process_op(nops, rqstp, &xdr_in,
4479 + rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
4480 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
4481 +index 979631411a0e..d7124fb12041 100644
4482 +--- a/fs/nfs/nfs4client.c
4483 ++++ b/fs/nfs/nfs4client.c
4484 +@@ -1127,7 +1127,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
4485 + nfs_server_copy_userdata(server, parent_server);
4486 +
4487 + /* Get a client representation */
4488 +-#ifdef CONFIG_SUNRPC_XPRT_RDMA
4489 ++#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
4490 + rpc_set_port(data->addr, NFS_RDMA_PORT);
4491 + error = nfs4_set_client(server, data->hostname,
4492 + data->addr,
4493 +@@ -1139,7 +1139,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
4494 + parent_client->cl_net);
4495 + if (!error)
4496 + goto init_server;
4497 +-#endif /* CONFIG_SUNRPC_XPRT_RDMA */
4498 ++#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
4499 +
4500 + rpc_set_port(data->addr, NFS_PORT);
4501 + error = nfs4_set_client(server, data->hostname,
4502 +@@ -1153,7 +1153,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
4503 + if (error < 0)
4504 + goto error;
4505 +
4506 +-#ifdef CONFIG_SUNRPC_XPRT_RDMA
4507 ++#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
4508 + init_server:
4509 + #endif
4510 + error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
4511 +diff --git a/include/linux/hid.h b/include/linux/hid.h
4512 +index 773bcb1d4044..5482dd6ae9ef 100644
4513 +--- a/include/linux/hid.h
4514 ++++ b/include/linux/hid.h
4515 +@@ -520,6 +520,7 @@ struct hid_input {
4516 + const char *name;
4517 + bool registered;
4518 + struct list_head reports; /* the list of reports */
4519 ++ unsigned int application; /* application usage for this input */
4520 + };
4521 +
4522 + enum hid_type {
4523 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
4524 +index 22651e124071..a590419e46c5 100644
4525 +--- a/include/linux/mm_types.h
4526 ++++ b/include/linux/mm_types.h
4527 +@@ -340,7 +340,7 @@ struct kioctx_table;
4528 + struct mm_struct {
4529 + struct vm_area_struct *mmap; /* list of VMAs */
4530 + struct rb_root mm_rb;
4531 +- u32 vmacache_seqnum; /* per-thread vmacache */
4532 ++ u64 vmacache_seqnum; /* per-thread vmacache */
4533 + #ifdef CONFIG_MMU
4534 + unsigned long (*get_unmapped_area) (struct file *filp,
4535 + unsigned long addr, unsigned long len,
4536 +diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
4537 +index 5fe87687664c..d7016dcb245e 100644
4538 +--- a/include/linux/mm_types_task.h
4539 ++++ b/include/linux/mm_types_task.h
4540 +@@ -32,7 +32,7 @@
4541 + #define VMACACHE_MASK (VMACACHE_SIZE - 1)
4542 +
4543 + struct vmacache {
4544 +- u32 seqnum;
4545 ++ u64 seqnum;
4546 + struct vm_area_struct *vmas[VMACACHE_SIZE];
4547 + };
4548 +
4549 +diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
4550 +index 3e8ec3b8a39c..87c635d6c773 100644
4551 +--- a/include/linux/mtd/rawnand.h
4552 ++++ b/include/linux/mtd/rawnand.h
4553 +@@ -986,14 +986,14 @@ struct nand_subop {
4554 + unsigned int last_instr_end_off;
4555 + };
4556 +
4557 +-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
4558 +- unsigned int op_id);
4559 +-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
4560 +- unsigned int op_id);
4561 +-int nand_subop_get_data_start_off(const struct nand_subop *subop,
4562 +- unsigned int op_id);
4563 +-int nand_subop_get_data_len(const struct nand_subop *subop,
4564 +- unsigned int op_id);
4565 ++unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
4566 ++ unsigned int op_id);
4567 ++unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
4568 ++ unsigned int op_id);
4569 ++unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
4570 ++ unsigned int op_id);
4571 ++unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
4572 ++ unsigned int op_id);
4573 +
4574 + /**
4575 + * struct nand_op_parser_addr_constraints - Constraints for address instructions
4576 +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
4577 +index 5c7f010676a7..47a3441cf4c4 100644
4578 +--- a/include/linux/vm_event_item.h
4579 ++++ b/include/linux/vm_event_item.h
4580 +@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
4581 + #ifdef CONFIG_DEBUG_VM_VMACACHE
4582 + VMACACHE_FIND_CALLS,
4583 + VMACACHE_FIND_HITS,
4584 +- VMACACHE_FULL_FLUSHES,
4585 + #endif
4586 + #ifdef CONFIG_SWAP
4587 + SWAP_RA,
4588 +diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
4589 +index a5b3aa8d281f..a09b28f76460 100644
4590 +--- a/include/linux/vmacache.h
4591 ++++ b/include/linux/vmacache.h
4592 +@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
4593 + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
4594 + }
4595 +
4596 +-extern void vmacache_flush_all(struct mm_struct *mm);
4597 + extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
4598 + extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
4599 + unsigned long addr);
4600 +@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
4601 + static inline void vmacache_invalidate(struct mm_struct *mm)
4602 + {
4603 + mm->vmacache_seqnum++;
4604 +-
4605 +- /* deal with overflows */
4606 +- if (unlikely(mm->vmacache_seqnum == 0))
4607 +- vmacache_flush_all(mm);
4608 + }
4609 +
4610 + #endif /* __LINUX_VMACACHE_H */
4611 +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
4612 +index 7363f18e65a5..813282cc8af6 100644
4613 +--- a/include/uapi/linux/ethtool.h
4614 ++++ b/include/uapi/linux/ethtool.h
4615 +@@ -902,13 +902,13 @@ struct ethtool_rx_flow_spec {
4616 + static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
4617 + {
4618 + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
4619 +-};
4620 ++}
4621 +
4622 + static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
4623 + {
4624 + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
4625 + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4626 +-};
4627 ++}
4628 +
4629 + /**
4630 + * struct ethtool_rxnfc - command to get or set RX flow classification rules
4631 +diff --git a/kernel/cpu.c b/kernel/cpu.c
4632 +index f80afc674f02..517907b082df 100644
4633 +--- a/kernel/cpu.c
4634 ++++ b/kernel/cpu.c
4635 +@@ -608,15 +608,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
4636 + bool bringup = st->bringup;
4637 + enum cpuhp_state state;
4638 +
4639 ++ if (WARN_ON_ONCE(!st->should_run))
4640 ++ return;
4641 ++
4642 + /*
4643 + * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
4644 + * that if we see ->should_run we also see the rest of the state.
4645 + */
4646 + smp_mb();
4647 +
4648 +- if (WARN_ON_ONCE(!st->should_run))
4649 +- return;
4650 +-
4651 + cpuhp_lock_acquire(bringup);
4652 +
4653 + if (st->single) {
4654 +@@ -928,7 +928,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
4655 + ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
4656 + if (ret) {
4657 + st->target = prev_state;
4658 +- undo_cpu_down(cpu, st);
4659 ++ if (st->state < prev_state)
4660 ++ undo_cpu_down(cpu, st);
4661 + break;
4662 + }
4663 + }
4664 +@@ -981,7 +982,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
4665 + * to do the further cleanups.
4666 + */
4667 + ret = cpuhp_down_callbacks(cpu, st, target);
4668 +- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4669 ++ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4670 + cpuhp_reset_state(st, prev_state);
4671 + __cpuhp_kick_ap(st);
4672 + }
4673 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4674 +index f89a78e2792b..443941aa784e 100644
4675 +--- a/kernel/time/clocksource.c
4676 ++++ b/kernel/time/clocksource.c
4677 +@@ -129,19 +129,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
4678 + spin_unlock_irqrestore(&watchdog_lock, *flags);
4679 + }
4680 +
4681 ++static int clocksource_watchdog_kthread(void *data);
4682 ++static void __clocksource_change_rating(struct clocksource *cs, int rating);
4683 ++
4684 + /*
4685 + * Interval: 0.5sec Threshold: 0.0625s
4686 + */
4687 + #define WATCHDOG_INTERVAL (HZ >> 1)
4688 + #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
4689 +
4690 ++static void clocksource_watchdog_work(struct work_struct *work)
4691 ++{
4692 ++ /*
4693 ++ * We cannot directly run clocksource_watchdog_kthread() here, because
4694 ++ * clocksource_select() calls timekeeping_notify() which uses
4695 ++ * stop_machine(). One cannot use stop_machine() from a workqueue() due
4696 ++ * lock inversions wrt CPU hotplug.
4697 ++ *
4698 ++ * Also, we only ever run this work once or twice during the lifetime
4699 ++ * of the kernel, so there is no point in creating a more permanent
4700 ++ * kthread for this.
4701 ++ *
4702 ++ * If kthread_run fails the next watchdog scan over the
4703 ++ * watchdog_list will find the unstable clock again.
4704 ++ */
4705 ++ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
4706 ++}
4707 ++
4708 + static void __clocksource_unstable(struct clocksource *cs)
4709 + {
4710 + cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
4711 + cs->flags |= CLOCK_SOURCE_UNSTABLE;
4712 +
4713 + /*
4714 +- * If the clocksource is registered clocksource_watchdog_work() will
4715 ++ * If the clocksource is registered clocksource_watchdog_kthread() will
4716 + * re-rate and re-select.
4717 + */
4718 + if (list_empty(&cs->list)) {
4719 +@@ -152,7 +173,7 @@ static void __clocksource_unstable(struct clocksource *cs)
4720 + if (cs->mark_unstable)
4721 + cs->mark_unstable(cs);
4722 +
4723 +- /* kick clocksource_watchdog_work() */
4724 ++ /* kick clocksource_watchdog_kthread() */
4725 + if (finished_booting)
4726 + schedule_work(&watchdog_work);
4727 + }
4728 +@@ -162,7 +183,7 @@ static void __clocksource_unstable(struct clocksource *cs)
4729 + * @cs: clocksource to be marked unstable
4730 + *
4731 + * This function is called by the x86 TSC code to mark clocksources as unstable;
4732 +- * it defers demotion and re-selection to a work.
4733 ++ * it defers demotion and re-selection to a kthread.
4734 + */
4735 + void clocksource_mark_unstable(struct clocksource *cs)
4736 + {
4737 +@@ -387,9 +408,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
4738 + }
4739 + }
4740 +
4741 +-static void __clocksource_change_rating(struct clocksource *cs, int rating);
4742 +-
4743 +-static int __clocksource_watchdog_work(void)
4744 ++static int __clocksource_watchdog_kthread(void)
4745 + {
4746 + struct clocksource *cs, *tmp;
4747 + unsigned long flags;
4748 +@@ -414,12 +433,13 @@ static int __clocksource_watchdog_work(void)
4749 + return select;
4750 + }
4751 +
4752 +-static void clocksource_watchdog_work(struct work_struct *work)
4753 ++static int clocksource_watchdog_kthread(void *data)
4754 + {
4755 + mutex_lock(&clocksource_mutex);
4756 +- if (__clocksource_watchdog_work())
4757 ++ if (__clocksource_watchdog_kthread())
4758 + clocksource_select();
4759 + mutex_unlock(&clocksource_mutex);
4760 ++ return 0;
4761 + }
4762 +
4763 + static bool clocksource_is_watchdog(struct clocksource *cs)
4764 +@@ -438,7 +458,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
4765 + static void clocksource_select_watchdog(bool fallback) { }
4766 + static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
4767 + static inline void clocksource_resume_watchdog(void) { }
4768 +-static inline int __clocksource_watchdog_work(void) { return 0; }
4769 ++static inline int __clocksource_watchdog_kthread(void) { return 0; }
4770 + static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
4771 + void clocksource_mark_unstable(struct clocksource *cs) { }
4772 +
4773 +@@ -672,7 +692,7 @@ static int __init clocksource_done_booting(void)
4774 + /*
4775 + * Run the watchdog first to eliminate unstable clock sources
4776 + */
4777 +- __clocksource_watchdog_work();
4778 ++ __clocksource_watchdog_kthread();
4779 + clocksource_select();
4780 + mutex_unlock(&clocksource_mutex);
4781 + return 0;
4782 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
4783 +index cc2d23e6ff61..786f8c014e7e 100644
4784 +--- a/kernel/time/timer.c
4785 ++++ b/kernel/time/timer.c
4786 +@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
4787 +
4788 + raw_spin_lock_irq(&base->lock);
4789 +
4790 ++ /*
4791 ++ * timer_base::must_forward_clk must be cleared before running
4792 ++ * timers so that any timer functions that call mod_timer() will
4793 ++ * not try to forward the base. Idle tracking / clock forwarding
4794 ++ * logic is only used with BASE_STD timers.
4795 ++ *
4796 ++ * The must_forward_clk flag is cleared unconditionally also for
4797 ++ * the deferrable base. The deferrable base is not affected by idle
4798 ++ * tracking and never forwarded, so clearing the flag is a NOOP.
4799 ++ *
4800 ++ * The fact that the deferrable base is never forwarded can cause
4801 ++ * large variations in granularity for deferrable timers, but they
4802 ++ * can be deferred for long periods due to idle anyway.
4803 ++ */
4804 ++ base->must_forward_clk = false;
4805 ++
4806 + while (time_after_eq(jiffies, base->clk)) {
4807 +
4808 + levels = collect_expired_timers(base, heads);
4809 +@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
4810 + {
4811 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
4812 +
4813 +- /*
4814 +- * must_forward_clk must be cleared before running timers so that any
4815 +- * timer functions that call mod_timer will not try to forward the
4816 +- * base. idle trcking / clock forwarding logic is only used with
4817 +- * BASE_STD timers.
4818 +- *
4819 +- * The deferrable base does not do idle tracking at all, so we do
4820 +- * not forward it. This can result in very large variations in
4821 +- * granularity for deferrable timers, but they can be deferred for
4822 +- * long periods due to idle.
4823 +- */
4824 +- base->must_forward_clk = false;
4825 +-
4826 + __run_timers(base);
4827 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
4828 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
4829 +diff --git a/mm/debug.c b/mm/debug.c
4830 +index 38c926520c97..bd10aad8539a 100644
4831 +--- a/mm/debug.c
4832 ++++ b/mm/debug.c
4833 +@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
4834 +
4835 + void dump_mm(const struct mm_struct *mm)
4836 + {
4837 +- pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
4838 ++ pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
4839 + #ifdef CONFIG_MMU
4840 + "get_unmapped_area %px\n"
4841 + #endif
4842 +@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
4843 + "tlb_flush_pending %d\n"
4844 + "def_flags: %#lx(%pGv)\n",
4845 +
4846 +- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
4847 ++ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
4848 + #ifdef CONFIG_MMU
4849 + mm->get_unmapped_area,
4850 + #endif
4851 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4852 +index 7deb49f69e27..785252397e35 100644
4853 +--- a/mm/memory_hotplug.c
4854 ++++ b/mm/memory_hotplug.c
4855 +@@ -1341,7 +1341,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
4856 + if (__PageMovable(page))
4857 + return pfn;
4858 + if (PageHuge(page)) {
4859 +- if (page_huge_active(page))
4860 ++ if (hugepage_migration_supported(page_hstate(page)) &&
4861 ++ page_huge_active(page))
4862 + return pfn;
4863 + else
4864 + pfn = round_up(pfn + 1,
4865 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4866 +index 3222193c46c6..65f2e6481c99 100644
4867 +--- a/mm/page_alloc.c
4868 ++++ b/mm/page_alloc.c
4869 +@@ -7649,6 +7649,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
4870 + * handle each tail page individually in migration.
4871 + */
4872 + if (PageHuge(page)) {
4873 ++
4874 ++ if (!hugepage_migration_supported(page_hstate(page)))
4875 ++ goto unmovable;
4876 ++
4877 + iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
4878 + continue;
4879 + }
4880 +diff --git a/mm/vmacache.c b/mm/vmacache.c
4881 +index db7596eb6132..f1729617dc85 100644
4882 +--- a/mm/vmacache.c
4883 ++++ b/mm/vmacache.c
4884 +@@ -7,44 +7,6 @@
4885 + #include <linux/mm.h>
4886 + #include <linux/vmacache.h>
4887 +
4888 +-/*
4889 +- * Flush vma caches for threads that share a given mm.
4890 +- *
4891 +- * The operation is safe because the caller holds the mmap_sem
4892 +- * exclusively and other threads accessing the vma cache will
4893 +- * have mmap_sem held at least for read, so no extra locking
4894 +- * is required to maintain the vma cache.
4895 +- */
4896 +-void vmacache_flush_all(struct mm_struct *mm)
4897 +-{
4898 +- struct task_struct *g, *p;
4899 +-
4900 +- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
4901 +-
4902 +- /*
4903 +- * Single threaded tasks need not iterate the entire
4904 +- * list of process. We can avoid the flushing as well
4905 +- * since the mm's seqnum was increased and don't have
4906 +- * to worry about other threads' seqnum. Current's
4907 +- * flush will occur upon the next lookup.
4908 +- */
4909 +- if (atomic_read(&mm->mm_users) == 1)
4910 +- return;
4911 +-
4912 +- rcu_read_lock();
4913 +- for_each_process_thread(g, p) {
4914 +- /*
4915 +- * Only flush the vmacache pointers as the
4916 +- * mm seqnum is already set and curr's will
4917 +- * be set upon invalidation when the next
4918 +- * lookup is done.
4919 +- */
4920 +- if (mm == p->mm)
4921 +- vmacache_flush(p);
4922 +- }
4923 +- rcu_read_unlock();
4924 +-}
4925 +-
4926 + /*
4927 + * This task may be accessing a foreign mm via (for example)
4928 + * get_user_pages()->find_vma(). The vmacache is task-local and this
4929 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
4930 +index 3bba8f4b08a9..253975cce943 100644
4931 +--- a/net/bluetooth/hidp/core.c
4932 ++++ b/net/bluetooth/hidp/core.c
4933 +@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
4934 + hid->version = req->version;
4935 + hid->country = req->country;
4936 +
4937 +- strncpy(hid->name, req->name, sizeof(req->name) - 1);
4938 ++ strncpy(hid->name, req->name, sizeof(hid->name));
4939 +
4940 + snprintf(hid->phys, sizeof(hid->phys), "%pMR",
4941 + &l2cap_pi(session->ctrl_sock->sk)->chan->src);
4942 +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
4943 +index 2589a6b78aa1..013fdb6fa07a 100644
4944 +--- a/net/dcb/dcbnl.c
4945 ++++ b/net/dcb/dcbnl.c
4946 +@@ -1786,7 +1786,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
4947 + if (itr->app.selector == app->selector &&
4948 + itr->app.protocol == app->protocol &&
4949 + itr->ifindex == ifindex &&
4950 +- (!prio || itr->app.priority == prio))
4951 ++ ((prio == -1) || itr->app.priority == prio))
4952 + return itr;
4953 + }
4954 +
4955 +@@ -1821,7 +1821,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
4956 + u8 prio = 0;
4957 +
4958 + spin_lock_bh(&dcb_lock);
4959 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
4960 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
4961 ++ if (itr)
4962 + prio = itr->app.priority;
4963 + spin_unlock_bh(&dcb_lock);
4964 +
4965 +@@ -1849,7 +1850,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
4966 +
4967 + spin_lock_bh(&dcb_lock);
4968 + /* Search for existing match and replace */
4969 +- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
4970 ++ itr = dcb_app_lookup(new, dev->ifindex, -1);
4971 ++ if (itr) {
4972 + if (new->priority)
4973 + itr->app.priority = new->priority;
4974 + else {
4975 +@@ -1882,7 +1884,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
4976 + u8 prio = 0;
4977 +
4978 + spin_lock_bh(&dcb_lock);
4979 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
4980 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
4981 ++ if (itr)
4982 + prio |= 1 << itr->app.priority;
4983 + spin_unlock_bh(&dcb_lock);
4984 +
4985 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4986 +index 932985ca4e66..3f80a5ca4050 100644
4987 +--- a/net/mac80211/rx.c
4988 ++++ b/net/mac80211/rx.c
4989 +@@ -1612,6 +1612,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
4990 + */
4991 + if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
4992 + !ieee80211_has_morefrags(hdr->frame_control) &&
4993 ++ !is_multicast_ether_addr(hdr->addr1) &&
4994 + (ieee80211_is_mgmt(hdr->frame_control) ||
4995 + ieee80211_is_data(hdr->frame_control)) &&
4996 + !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
4997 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
4998 +index 20a171ac4bb2..16849969c138 100644
4999 +--- a/sound/pci/hda/hda_codec.c
5000 ++++ b/sound/pci/hda/hda_codec.c
5001 +@@ -3910,7 +3910,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
5002 +
5003 + list_for_each_codec(codec, bus) {
5004 + /* FIXME: maybe a better way needed for forced reset */
5005 +- cancel_delayed_work_sync(&codec->jackpoll_work);
5006 ++ if (current_work() != &codec->jackpoll_work.work)
5007 ++ cancel_delayed_work_sync(&codec->jackpoll_work);
5008 + #ifdef CONFIG_PM
5009 + if (hda_codec_is_power_on(codec)) {
5010 + hda_call_codec_suspend(codec);
5011 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5012 +index f6af3e1c2b93..d14b05f68d6d 100644
5013 +--- a/sound/pci/hda/patch_realtek.c
5014 ++++ b/sound/pci/hda/patch_realtek.c
5015 +@@ -6530,6 +6530,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5016 + SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
5017 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5018 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5019 ++ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5020 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5021 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5022 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5023 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5024 +index 5feae9666822..55d6c9488d8e 100644
5025 +--- a/sound/soc/soc-pcm.c
5026 ++++ b/sound/soc/soc-pcm.c
5027 +@@ -1165,6 +1165,9 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
5028 + snd_pcm_sframes_t codec_delay = 0;
5029 + int i;
5030 +
5031 ++ /* clearing the previous total delay */
5032 ++ runtime->delay = 0;
5033 ++
5034 + for_each_rtdcom(rtd, rtdcom) {
5035 + component = rtdcom->component;
5036 +
5037 +@@ -1176,6 +1179,8 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
5038 + offset = component->driver->ops->pointer(substream);
5039 + break;
5040 + }
5041 ++ /* base delay if assigned in pointer callback */
5042 ++ delay = runtime->delay;
5043 +
5044 + if (cpu_dai->driver->ops->delay)
5045 + delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
5046 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
5047 +index f5a3b402589e..67b042738ed7 100644
5048 +--- a/tools/perf/Makefile.config
5049 ++++ b/tools/perf/Makefile.config
5050 +@@ -905,8 +905,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
5051 + mandir = share/man
5052 + infodir = share/info
5053 + perfexecdir = libexec/perf-core
5054 +-perf_include_dir = lib/include/perf
5055 +-perf_examples_dir = lib/examples/perf
5056 ++perf_include_dir = lib/perf/include
5057 ++perf_examples_dir = lib/perf/examples
5058 + sharedir = $(prefix)/share
5059 + template_dir = share/perf-core/templates
5060 + STRACE_GROUPS_DIR = share/perf-core/strace/groups
5061 +diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
5062 +index 6a8738f7ead3..eab66e3b0a19 100644
5063 +--- a/tools/perf/builtin-c2c.c
5064 ++++ b/tools/perf/builtin-c2c.c
5065 +@@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
5066 + " s Toggle full length of symbol and source line columns \n"
5067 + " q Return back to cacheline list \n";
5068 +
5069 ++ if (!he)
5070 ++ return 0;
5071 ++
5072 + /* Display compact version first. */
5073 + c2c.symbol_full = false;
5074 +
5075 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
5076 +index d215714f48df..21bf7f5a3cf5 100644
5077 +--- a/tools/perf/perf.h
5078 ++++ b/tools/perf/perf.h
5079 +@@ -25,7 +25,9 @@ static inline unsigned long long rdclock(void)
5080 + return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
5081 + }
5082 +
5083 ++#ifndef MAX_NR_CPUS
5084 + #define MAX_NR_CPUS 1024
5085 ++#endif
5086 +
5087 + extern const char *input_name;
5088 + extern bool perf_host, perf_guest;
5089 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
5090 +index 94fce4f537e9..0d5504751cc5 100644
5091 +--- a/tools/perf/util/evsel.c
5092 ++++ b/tools/perf/util/evsel.c
5093 +@@ -848,6 +848,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
5094 + }
5095 + }
5096 +
5097 ++static bool is_dummy_event(struct perf_evsel *evsel)
5098 ++{
5099 ++ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
5100 ++ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
5101 ++}
5102 ++
5103 + /*
5104 + * The enable_on_exec/disabled value strategy:
5105 + *
5106 +@@ -1086,6 +1092,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
5107 + else
5108 + perf_evsel__reset_sample_bit(evsel, PERIOD);
5109 + }
5110 ++
5111 ++ /*
5112 ++ * For initial_delay, a dummy event is added implicitly.
5113 ++ * The software event will trigger -EOPNOTSUPP error out,
5114 ++ * if BRANCH_STACK bit is set.
5115 ++ */
5116 ++ if (opts->initial_delay && is_dummy_event(evsel))
5117 ++ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
5118 + }
5119 +
5120 + static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
5121 +diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
5122 +index b53596ad601b..2e7fd8227969 100644
5123 +--- a/tools/testing/nvdimm/pmem-dax.c
5124 ++++ b/tools/testing/nvdimm/pmem-dax.c
5125 +@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
5126 + if (get_nfit_res(pmem->phys_addr + offset)) {
5127 + struct page *page;
5128 +
5129 +- *kaddr = pmem->virt_addr + offset;
5130 ++ if (kaddr)
5131 ++ *kaddr = pmem->virt_addr + offset;
5132 + page = vmalloc_to_page(pmem->virt_addr + offset);
5133 +- *pfn = page_to_pfn_t(page);
5134 ++ if (pfn)
5135 ++ *pfn = page_to_pfn_t(page);
5136 + pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
5137 + __func__, pmem, pgoff, page_to_pfn(page));
5138 +
5139 + return 1;
5140 + }
5141 +
5142 +- *kaddr = pmem->virt_addr + offset;
5143 +- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
5144 ++ if (kaddr)
5145 ++ *kaddr = pmem->virt_addr + offset;
5146 ++ if (pfn)
5147 ++ *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
5148 +
5149 + /*
5150 + * If badblocks are present, limit known good range to the
5151 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
5152 +index 41106d9d5cc7..f9c856c8e472 100644
5153 +--- a/tools/testing/selftests/bpf/test_verifier.c
5154 ++++ b/tools/testing/selftests/bpf/test_verifier.c
5155 +@@ -6997,7 +6997,7 @@ static struct bpf_test tests[] = {
5156 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5157 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5158 + BPF_FUNC_map_lookup_elem),
5159 +- BPF_MOV64_REG(BPF_REG_0, 0),
5160 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5161 + BPF_EXIT_INSN(),
5162 + },
5163 + .fixup_map_in_map = { 3 },
5164 +@@ -7020,7 +7020,7 @@ static struct bpf_test tests[] = {
5165 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5166 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5167 + BPF_FUNC_map_lookup_elem),
5168 +- BPF_MOV64_REG(BPF_REG_0, 0),
5169 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5170 + BPF_EXIT_INSN(),
5171 + },
5172 + .fixup_map_in_map = { 3 },
5173 +@@ -7042,7 +7042,7 @@ static struct bpf_test tests[] = {
5174 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5175 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5176 + BPF_FUNC_map_lookup_elem),
5177 +- BPF_MOV64_REG(BPF_REG_0, 0),
5178 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5179 + BPF_EXIT_INSN(),
5180 + },
5181 + .fixup_map_in_map = { 3 },
5182 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
5183 +index 70952bd98ff9..13147a1f5731 100644
5184 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
5185 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
5186 +@@ -17,7 +17,7 @@
5187 + "cmdUnderTest": "$TC actions add action connmark",
5188 + "expExitCode": "0",
5189 + "verifyCmd": "$TC actions list action connmark",
5190 +- "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
5191 ++ "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
5192 + "matchCount": "1",
5193 + "teardown": [
5194 + "$TC actions flush action connmark"
5195 +@@ -41,7 +41,7 @@
5196 + "cmdUnderTest": "$TC actions add action connmark pass index 1",
5197 + "expExitCode": "0",
5198 + "verifyCmd": "$TC actions get action connmark index 1",
5199 +- "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
5200 ++ "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
5201 + "matchCount": "1",
5202 + "teardown": [
5203 + "$TC actions flush action connmark"
5204 +@@ -65,7 +65,7 @@
5205 + "cmdUnderTest": "$TC actions add action connmark drop index 100",
5206 + "expExitCode": "0",
5207 + "verifyCmd": "$TC actions get action connmark index 100",
5208 +- "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
5209 ++ "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
5210 + "matchCount": "1",
5211 + "teardown": [
5212 + "$TC actions flush action connmark"
5213 +@@ -89,7 +89,7 @@
5214 + "cmdUnderTest": "$TC actions add action connmark pipe index 455",
5215 + "expExitCode": "0",
5216 + "verifyCmd": "$TC actions get action connmark index 455",
5217 +- "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
5218 ++ "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
5219 + "matchCount": "1",
5220 + "teardown": [
5221 + "$TC actions flush action connmark"
5222 +@@ -113,7 +113,7 @@
5223 + "cmdUnderTest": "$TC actions add action connmark reclassify index 7",
5224 + "expExitCode": "0",
5225 + "verifyCmd": "$TC actions list action connmark",
5226 +- "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
5227 ++ "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
5228 + "matchCount": "1",
5229 + "teardown": [
5230 + "$TC actions flush action connmark"
5231 +@@ -137,7 +137,7 @@
5232 + "cmdUnderTest": "$TC actions add action connmark continue index 17",
5233 + "expExitCode": "0",
5234 + "verifyCmd": "$TC actions list action connmark",
5235 +- "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
5236 ++ "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
5237 + "matchCount": "1",
5238 + "teardown": [
5239 + "$TC actions flush action connmark"
5240 +@@ -161,7 +161,7 @@
5241 + "cmdUnderTest": "$TC actions add action connmark jump 10 index 17",
5242 + "expExitCode": "0",
5243 + "verifyCmd": "$TC actions list action connmark",
5244 +- "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
5245 ++ "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
5246 + "matchCount": "1",
5247 + "teardown": [
5248 + "$TC actions flush action connmark"
5249 +@@ -185,7 +185,7 @@
5250 + "cmdUnderTest": "$TC actions add action connmark zone 100 pipe index 1",
5251 + "expExitCode": "0",
5252 + "verifyCmd": "$TC actions get action connmark index 1",
5253 +- "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
5254 ++ "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
5255 + "matchCount": "1",
5256 + "teardown": [
5257 + "$TC actions flush action connmark"
5258 +@@ -209,7 +209,7 @@
5259 + "cmdUnderTest": "$TC actions add action connmark zone 65536 reclassify index 21",
5260 + "expExitCode": "255",
5261 + "verifyCmd": "$TC actions get action connmark index 1",
5262 +- "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
5263 ++ "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
5264 + "matchCount": "0",
5265 + "teardown": [
5266 + "$TC actions flush action connmark"
5267 +@@ -233,7 +233,7 @@
5268 + "cmdUnderTest": "$TC actions add action connmark zone 655 unsupp_arg pass index 2",
5269 + "expExitCode": "255",
5270 + "verifyCmd": "$TC actions get action connmark index 2",
5271 +- "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
5272 ++ "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
5273 + "matchCount": "0",
5274 + "teardown": [
5275 + "$TC actions flush action connmark"
5276 +@@ -258,7 +258,7 @@
5277 + "cmdUnderTest": "$TC actions replace action connmark zone 555 reclassify index 555",
5278 + "expExitCode": "0",
5279 + "verifyCmd": "$TC actions get action connmark index 555",
5280 +- "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
5281 ++ "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
5282 + "matchCount": "1",
5283 + "teardown": [
5284 + "$TC actions flush action connmark"
5285 +@@ -282,7 +282,7 @@
5286 + "cmdUnderTest": "$TC actions add action connmark zone 555 pipe index 5 cookie aabbccddeeff112233445566778800a1",
5287 + "expExitCode": "0",
5288 + "verifyCmd": "$TC actions get action connmark index 5",
5289 +- "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
5290 ++ "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
5291 + "matchCount": "1",
5292 + "teardown": [
5293 + "$TC actions flush action connmark"
5294 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
5295 +index 6e4edfae1799..db49fd0f8445 100644
5296 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
5297 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
5298 +@@ -44,7 +44,8 @@
5299 + "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
5300 + "matchCount": "1",
5301 + "teardown": [
5302 +- "$TC actions flush action mirred"
5303 ++ "$TC actions flush action mirred",
5304 ++ "$TC actions flush action gact"
5305 + ]
5306 + },
5307 + {
5308 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5309 +index c2b95a22959b..fd8c88463928 100644
5310 +--- a/virt/kvm/arm/mmu.c
5311 ++++ b/virt/kvm/arm/mmu.c
5312 +@@ -1831,13 +1831,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
5313 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
5314 + {
5315 + unsigned long end = hva + PAGE_SIZE;
5316 ++ kvm_pfn_t pfn = pte_pfn(pte);
5317 + pte_t stage2_pte;
5318 +
5319 + if (!kvm->arch.pgd)
5320 + return;
5321 +
5322 + trace_kvm_set_spte_hva(hva);
5323 +- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
5324 ++
5325 ++ /*
5326 ++ * We've moved a page around, probably through CoW, so let's treat it
5327 ++ * just like a translation fault and clean the cache to the PoC.
5328 ++ */
5329 ++ clean_dcache_guest_page(pfn, PAGE_SIZE);
5330 ++ stage2_pte = pfn_pte(pfn, PAGE_S2);
5331 + handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
5332 + }
5333 +