Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.18 commit in: /
Date: Wed, 14 Nov 2018 11:38:01
Message-Id: 1542195384.8e059277ddb6ba638e6fbbc3f6f005fd4bdbac44.mpagano@gentoo
1 commit: 8e059277ddb6ba638e6fbbc3f6f005fd4bdbac44
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Sep 19 22:41:12 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Nov 14 11:36:24 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=8e059277
7
8 Linux patch 4.18.9
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1008_linux-4.18.9.patch | 5298 +++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5302 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 597262e..6534d27 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -75,6 +75,10 @@ Patch: 1007_linux-4.18.8.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.18.8
23
24 +Patch: 1008_linux-4.18.9.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.18.9
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1008_linux-4.18.9.patch b/1008_linux-4.18.9.patch
33 new file mode 100644
34 index 0000000..877b17a
35 --- /dev/null
36 +++ b/1008_linux-4.18.9.patch
37 @@ -0,0 +1,5298 @@
38 +diff --git a/Makefile b/Makefile
39 +index 0d73431f66cd..1178348fb9ca 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 18
46 +-SUBLEVEL = 8
47 ++SUBLEVEL = 9
48 + EXTRAVERSION =
49 + NAME = Merciless Moray
50 +
51 +diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi
52 +index 47b74fbc403c..37bafd44e36d 100644
53 +--- a/arch/arc/boot/dts/axs10x_mb.dtsi
54 ++++ b/arch/arc/boot/dts/axs10x_mb.dtsi
55 +@@ -9,6 +9,10 @@
56 + */
57 +
58 + / {
59 ++ aliases {
60 ++ ethernet = &gmac;
61 ++ };
62 ++
63 + axs10x_mb {
64 + compatible = "simple-bus";
65 + #address-cells = <1>;
66 +@@ -68,7 +72,7 @@
67 + };
68 + };
69 +
70 +- ethernet@0x18000 {
71 ++ gmac: ethernet@0x18000 {
72 + #interrupt-cells = <1>;
73 + compatible = "snps,dwmac";
74 + reg = < 0x18000 0x2000 >;
75 +@@ -81,6 +85,7 @@
76 + max-speed = <100>;
77 + resets = <&creg_rst 5>;
78 + reset-names = "stmmaceth";
79 ++ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
80 + };
81 +
82 + ehci@0x40000 {
83 +diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts
84 +index 006aa3de5348..d00f283094d3 100644
85 +--- a/arch/arc/boot/dts/hsdk.dts
86 ++++ b/arch/arc/boot/dts/hsdk.dts
87 +@@ -25,6 +25,10 @@
88 + bootargs = "earlycon=uart8250,mmio32,0xf0005000,115200n8 console=ttyS0,115200n8 debug print-fatal-signals=1";
89 + };
90 +
91 ++ aliases {
92 ++ ethernet = &gmac;
93 ++ };
94 ++
95 + cpus {
96 + #address-cells = <1>;
97 + #size-cells = <0>;
98 +@@ -163,7 +167,7 @@
99 + #clock-cells = <0>;
100 + };
101 +
102 +- ethernet@8000 {
103 ++ gmac: ethernet@8000 {
104 + #interrupt-cells = <1>;
105 + compatible = "snps,dwmac";
106 + reg = <0x8000 0x2000>;
107 +@@ -176,6 +180,7 @@
108 + phy-handle = <&phy0>;
109 + resets = <&cgu_rst HSDK_ETH_RESET>;
110 + reset-names = "stmmaceth";
111 ++ mac-address = [00 00 00 00 00 00]; /* Filled in by U-Boot */
112 +
113 + mdio {
114 + #address-cells = <1>;
115 +diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
116 +index a635ea972304..df848c44dacd 100644
117 +--- a/arch/arc/configs/axs101_defconfig
118 ++++ b/arch/arc/configs/axs101_defconfig
119 +@@ -1,5 +1,4 @@
120 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
121 +-# CONFIG_SWAP is not set
122 + CONFIG_SYSVIPC=y
123 + CONFIG_POSIX_MQUEUE=y
124 + # CONFIG_CROSS_MEMORY_ATTACH is not set
125 +diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
126 +index aa507e423075..bcbdc0494faa 100644
127 +--- a/arch/arc/configs/axs103_defconfig
128 ++++ b/arch/arc/configs/axs103_defconfig
129 +@@ -1,5 +1,4 @@
130 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
131 +-# CONFIG_SWAP is not set
132 + CONFIG_SYSVIPC=y
133 + CONFIG_POSIX_MQUEUE=y
134 + # CONFIG_CROSS_MEMORY_ATTACH is not set
135 +diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
136 +index eba07f468654..d145bce7ebdf 100644
137 +--- a/arch/arc/configs/axs103_smp_defconfig
138 ++++ b/arch/arc/configs/axs103_smp_defconfig
139 +@@ -1,5 +1,4 @@
140 + CONFIG_DEFAULT_HOSTNAME="ARCLinux"
141 +-# CONFIG_SWAP is not set
142 + CONFIG_SYSVIPC=y
143 + CONFIG_POSIX_MQUEUE=y
144 + # CONFIG_CROSS_MEMORY_ATTACH is not set
145 +diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
146 +index d496ef579859..ca46153d7915 100644
147 +--- a/arch/arm64/kvm/hyp/switch.c
148 ++++ b/arch/arm64/kvm/hyp/switch.c
149 +@@ -98,8 +98,10 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu)
150 + val = read_sysreg(cpacr_el1);
151 + val |= CPACR_EL1_TTA;
152 + val &= ~CPACR_EL1_ZEN;
153 +- if (!update_fp_enabled(vcpu))
154 ++ if (!update_fp_enabled(vcpu)) {
155 + val &= ~CPACR_EL1_FPEN;
156 ++ __activate_traps_fpsimd32(vcpu);
157 ++ }
158 +
159 + write_sysreg(val, cpacr_el1);
160 +
161 +@@ -114,8 +116,10 @@ static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
162 +
163 + val = CPTR_EL2_DEFAULT;
164 + val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
165 +- if (!update_fp_enabled(vcpu))
166 ++ if (!update_fp_enabled(vcpu)) {
167 + val |= CPTR_EL2_TFP;
168 ++ __activate_traps_fpsimd32(vcpu);
169 ++ }
170 +
171 + write_sysreg(val, cptr_el2);
172 + }
173 +@@ -129,7 +133,6 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
174 + if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
175 + write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
176 +
177 +- __activate_traps_fpsimd32(vcpu);
178 + if (has_vhe())
179 + activate_traps_vhe(vcpu);
180 + else
181 +diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
182 +index 4f33dbc67348..7096915f26e0 100644
183 +--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
184 ++++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
185 +@@ -184,7 +184,7 @@
186 + #address-cells = <1>;
187 + #size-cells = <0>;
188 + compatible = "mscc,ocelot-miim";
189 +- reg = <0x107009c 0x36>, <0x10700f0 0x8>;
190 ++ reg = <0x107009c 0x24>, <0x10700f0 0x8>;
191 + interrupts = <14>;
192 + status = "disabled";
193 +
194 +diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
195 +index 8505db478904..1d92efb82c37 100644
196 +--- a/arch/mips/cavium-octeon/octeon-platform.c
197 ++++ b/arch/mips/cavium-octeon/octeon-platform.c
198 +@@ -322,6 +322,7 @@ static int __init octeon_ehci_device_init(void)
199 + return 0;
200 +
201 + pd = of_find_device_by_node(ehci_node);
202 ++ of_node_put(ehci_node);
203 + if (!pd)
204 + return 0;
205 +
206 +@@ -384,6 +385,7 @@ static int __init octeon_ohci_device_init(void)
207 + return 0;
208 +
209 + pd = of_find_device_by_node(ohci_node);
210 ++ of_node_put(ohci_node);
211 + if (!pd)
212 + return 0;
213 +
214 +diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
215 +index 5ba6fcc26fa7..94a78dbbc91f 100644
216 +--- a/arch/mips/generic/init.c
217 ++++ b/arch/mips/generic/init.c
218 +@@ -204,6 +204,7 @@ void __init arch_init_irq(void)
219 + "mti,cpu-interrupt-controller");
220 + if (!cpu_has_veic && !intc_node)
221 + mips_cpu_irq_init();
222 ++ of_node_put(intc_node);
223 +
224 + irqchip_init();
225 + }
226 +diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
227 +index cea8ad864b3f..57b34257be2b 100644
228 +--- a/arch/mips/include/asm/io.h
229 ++++ b/arch/mips/include/asm/io.h
230 +@@ -141,14 +141,14 @@ static inline void * phys_to_virt(unsigned long address)
231 + /*
232 + * ISA I/O bus memory addresses are 1:1 with the physical address.
233 + */
234 +-static inline unsigned long isa_virt_to_bus(volatile void * address)
235 ++static inline unsigned long isa_virt_to_bus(volatile void *address)
236 + {
237 +- return (unsigned long)address - PAGE_OFFSET;
238 ++ return virt_to_phys(address);
239 + }
240 +
241 +-static inline void * isa_bus_to_virt(unsigned long address)
242 ++static inline void *isa_bus_to_virt(unsigned long address)
243 + {
244 +- return (void *)(address + PAGE_OFFSET);
245 ++ return phys_to_virt(address);
246 + }
247 +
248 + #define isa_page_to_bus page_to_phys
249 +diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
250 +index 019035d7225c..8f845f6e5f42 100644
251 +--- a/arch/mips/kernel/vdso.c
252 ++++ b/arch/mips/kernel/vdso.c
253 +@@ -13,6 +13,7 @@
254 + #include <linux/err.h>
255 + #include <linux/init.h>
256 + #include <linux/ioport.h>
257 ++#include <linux/kernel.h>
258 + #include <linux/mm.h>
259 + #include <linux/sched.h>
260 + #include <linux/slab.h>
261 +@@ -20,6 +21,7 @@
262 +
263 + #include <asm/abi.h>
264 + #include <asm/mips-cps.h>
265 ++#include <asm/page.h>
266 + #include <asm/vdso.h>
267 +
268 + /* Kernel-provided data used by the VDSO. */
269 +@@ -128,12 +130,30 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
270 + vvar_size = gic_size + PAGE_SIZE;
271 + size = vvar_size + image->size;
272 +
273 ++ /*
274 ++ * Find a region that's large enough for us to perform the
275 ++ * colour-matching alignment below.
276 ++ */
277 ++ if (cpu_has_dc_aliases)
278 ++ size += shm_align_mask + 1;
279 ++
280 + base = get_unmapped_area(NULL, 0, size, 0, 0);
281 + if (IS_ERR_VALUE(base)) {
282 + ret = base;
283 + goto out;
284 + }
285 +
286 ++ /*
287 ++ * If we suffer from dcache aliasing, ensure that the VDSO data page
288 ++ * mapping is coloured the same as the kernel's mapping of that memory.
289 ++ * This ensures that when the kernel updates the VDSO data userland
290 ++ * will observe it without requiring cache invalidations.
291 ++ */
292 ++ if (cpu_has_dc_aliases) {
293 ++ base = __ALIGN_MASK(base, shm_align_mask);
294 ++ base += ((unsigned long)&vdso_data - gic_size) & shm_align_mask;
295 ++ }
296 ++
297 + data_addr = base + gic_size;
298 + vdso_addr = data_addr + PAGE_SIZE;
299 +
300 +diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
301 +index e12dfa48b478..a5893b2cdc0e 100644
302 +--- a/arch/mips/mm/c-r4k.c
303 ++++ b/arch/mips/mm/c-r4k.c
304 +@@ -835,7 +835,8 @@ static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
305 + static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
306 + {
307 + /* Catch bad driver code */
308 +- BUG_ON(size == 0);
309 ++ if (WARN_ON(size == 0))
310 ++ return;
311 +
312 + preempt_disable();
313 + if (cpu_has_inclusive_pcaches) {
314 +@@ -871,7 +872,8 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
315 + static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
316 + {
317 + /* Catch bad driver code */
318 +- BUG_ON(size == 0);
319 ++ if (WARN_ON(size == 0))
320 ++ return;
321 +
322 + preempt_disable();
323 + if (cpu_has_inclusive_pcaches) {
324 +diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
325 +index 01ee40f11f3a..76234a14b97d 100644
326 +--- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
327 ++++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
328 +@@ -9,6 +9,7 @@
329 +
330 + #include <linux/slab.h>
331 + #include <linux/cpumask.h>
332 ++#include <linux/kmemleak.h>
333 + #include <linux/percpu.h>
334 +
335 + struct vmemmap_backing {
336 +@@ -82,6 +83,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
337 +
338 + pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
339 + pgtable_gfp_flags(mm, GFP_KERNEL));
340 ++ /*
341 ++ * Don't scan the PGD for pointers, it contains references to PUDs but
342 ++ * those references are not full pointers and so can't be recognised by
343 ++ * kmemleak.
344 ++ */
345 ++ kmemleak_no_scan(pgd);
346 ++
347 + /*
348 + * With hugetlb, we don't clear the second half of the page table.
349 + * If we share the same slab cache with the pmd or pud level table,
350 +@@ -110,8 +118,19 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
351 +
352 + static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
353 + {
354 +- return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
355 +- pgtable_gfp_flags(mm, GFP_KERNEL));
356 ++ pud_t *pud;
357 ++
358 ++ pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
359 ++ pgtable_gfp_flags(mm, GFP_KERNEL));
360 ++ /*
361 ++ * Tell kmemleak to ignore the PUD, that means don't scan it for
362 ++ * pointers and don't consider it a leak. PUDs are typically only
363 ++ * referred to by their PGD, but kmemleak is not able to recognise those
364 ++ * as pointers, leading to false leak reports.
365 ++ */
366 ++ kmemleak_ignore(pud);
367 ++
368 ++ return pud;
369 + }
370 +
371 + static inline void pud_free(struct mm_struct *mm, pud_t *pud)
372 +diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
373 +index 176f911ee983..7efc42538ccf 100644
374 +--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
375 ++++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
376 +@@ -738,10 +738,10 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
377 + gpa, shift);
378 + kvmppc_radix_tlbie_page(kvm, gpa, shift);
379 + if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
380 +- unsigned long npages = 1;
381 ++ unsigned long psize = PAGE_SIZE;
382 + if (shift)
383 +- npages = 1ul << (shift - PAGE_SHIFT);
384 +- kvmppc_update_dirty_map(memslot, gfn, npages);
385 ++ psize = 1ul << shift;
386 ++ kvmppc_update_dirty_map(memslot, gfn, psize);
387 + }
388 + }
389 + return 0;
390 +diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
391 +index 81b2cbce7df8..7c324eff2f22 100644
392 +--- a/arch/powerpc/platforms/4xx/msi.c
393 ++++ b/arch/powerpc/platforms/4xx/msi.c
394 +@@ -146,13 +146,19 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
395 + const u32 *sdr_addr;
396 + dma_addr_t msi_phys;
397 + void *msi_virt;
398 ++ int err;
399 +
400 + sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
401 + if (!sdr_addr)
402 +- return -1;
403 ++ return -EINVAL;
404 +
405 +- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
406 +- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
407 ++ msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
408 ++ if (!msi_data)
409 ++ return -EINVAL;
410 ++
411 ++ msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
412 ++ if (!msi_mask)
413 ++ return -EINVAL;
414 +
415 + msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
416 + if (!msi->msi_dev)
417 +@@ -160,30 +166,30 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
418 +
419 + msi->msi_regs = of_iomap(msi->msi_dev, 0);
420 + if (!msi->msi_regs) {
421 +- dev_err(&dev->dev, "of_iomap problem failed\n");
422 +- return -ENOMEM;
423 ++ dev_err(&dev->dev, "of_iomap failed\n");
424 ++ err = -ENOMEM;
425 ++ goto node_put;
426 + }
427 + dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
428 + (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
429 +
430 + msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
431 +- if (!msi_virt)
432 +- return -ENOMEM;
433 ++ if (!msi_virt) {
434 ++ err = -ENOMEM;
435 ++ goto iounmap;
436 ++ }
437 + msi->msi_addr_hi = upper_32_bits(msi_phys);
438 + msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
439 + dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
440 + msi->msi_addr_hi, msi->msi_addr_lo);
441 +
442 ++ mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
443 ++ mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
444 ++
445 + /* Progam the Interrupt handler Termination addr registers */
446 + out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
447 + out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
448 +
449 +- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
450 +- if (!msi_data)
451 +- return -1;
452 +- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
453 +- if (!msi_mask)
454 +- return -1;
455 + /* Program MSI Expected data and Mask bits */
456 + out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
457 + out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
458 +@@ -191,6 +197,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
459 + dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
460 +
461 + return 0;
462 ++
463 ++iounmap:
464 ++ iounmap(msi->msi_regs);
465 ++node_put:
466 ++ of_node_put(msi->msi_dev);
467 ++ return err;
468 + }
469 +
470 + static int ppc4xx_of_msi_remove(struct platform_device *dev)
471 +@@ -209,7 +221,6 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
472 + msi_bitmap_free(&msi->bitmap);
473 + iounmap(msi->msi_regs);
474 + of_node_put(msi->msi_dev);
475 +- kfree(msi);
476 +
477 + return 0;
478 + }
479 +@@ -223,18 +234,16 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
480 +
481 + dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
482 +
483 +- msi = kzalloc(sizeof(*msi), GFP_KERNEL);
484 +- if (!msi) {
485 +- dev_err(&dev->dev, "No memory for MSI structure\n");
486 ++ msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
487 ++ if (!msi)
488 + return -ENOMEM;
489 +- }
490 + dev->dev.platform_data = msi;
491 +
492 + /* Get MSI ranges */
493 + err = of_address_to_resource(dev->dev.of_node, 0, &res);
494 + if (err) {
495 + dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
496 +- goto error_out;
497 ++ return err;
498 + }
499 +
500 + msi_irqs = of_irq_count(dev->dev.of_node);
501 +@@ -243,7 +252,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
502 +
503 + err = ppc4xx_setup_pcieh_hw(dev, res, msi);
504 + if (err)
505 +- goto error_out;
506 ++ return err;
507 +
508 + err = ppc4xx_msi_init_allocator(dev, msi);
509 + if (err) {
510 +@@ -256,7 +265,7 @@ static int ppc4xx_msi_probe(struct platform_device *dev)
511 + phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
512 + phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
513 + }
514 +- return err;
515 ++ return 0;
516 +
517 + error_out:
518 + ppc4xx_of_msi_remove(dev);
519 +diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
520 +index 8cdf91f5d3a4..c773465b2c95 100644
521 +--- a/arch/powerpc/platforms/powernv/npu-dma.c
522 ++++ b/arch/powerpc/platforms/powernv/npu-dma.c
523 +@@ -437,8 +437,9 @@ static int get_mmio_atsd_reg(struct npu *npu)
524 + int i;
525 +
526 + for (i = 0; i < npu->mmio_atsd_count; i++) {
527 +- if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
528 +- return i;
529 ++ if (!test_bit(i, &npu->mmio_atsd_usage))
530 ++ if (!test_and_set_bit_lock(i, &npu->mmio_atsd_usage))
531 ++ return i;
532 + }
533 +
534 + return -ENOSPC;
535 +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
536 +index 8a4868a3964b..cb098e962ffe 100644
537 +--- a/arch/powerpc/platforms/pseries/setup.c
538 ++++ b/arch/powerpc/platforms/pseries/setup.c
539 +@@ -647,6 +647,15 @@ void of_pci_parse_iov_addrs(struct pci_dev *dev, const int *indexes)
540 + }
541 + }
542 +
543 ++static void pseries_disable_sriov_resources(struct pci_dev *pdev)
544 ++{
545 ++ int i;
546 ++
547 ++ pci_warn(pdev, "No hypervisor support for SR-IOV on this device, IOV BARs disabled.\n");
548 ++ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
549 ++ pdev->resource[i + PCI_IOV_RESOURCES].flags = 0;
550 ++}
551 ++
552 + static void pseries_pci_fixup_resources(struct pci_dev *pdev)
553 + {
554 + const int *indexes;
555 +@@ -654,10 +663,10 @@ static void pseries_pci_fixup_resources(struct pci_dev *pdev)
556 +
557 + /*Firmware must support open sriov otherwise dont configure*/
558 + indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
559 +- if (!indexes)
560 +- return;
561 +- /* Assign the addresses from device tree*/
562 +- of_pci_set_vf_bar_size(pdev, indexes);
563 ++ if (indexes)
564 ++ of_pci_set_vf_bar_size(pdev, indexes);
565 ++ else
566 ++ pseries_disable_sriov_resources(pdev);
567 + }
568 +
569 + static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
570 +@@ -669,10 +678,10 @@ static void pseries_pci_fixup_iov_resources(struct pci_dev *pdev)
571 + return;
572 + /*Firmware must support open sriov otherwise dont configure*/
573 + indexes = of_get_property(dn, "ibm,open-sriov-vf-bar-info", NULL);
574 +- if (!indexes)
575 +- return;
576 +- /* Assign the addresses from device tree*/
577 +- of_pci_parse_iov_addrs(pdev, indexes);
578 ++ if (indexes)
579 ++ of_pci_parse_iov_addrs(pdev, indexes);
580 ++ else
581 ++ pseries_disable_sriov_resources(pdev);
582 + }
583 +
584 + static resource_size_t pseries_pci_iov_resource_alignment(struct pci_dev *pdev,
585 +diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
586 +index 84c89cb9636f..cbdd8341f17e 100644
587 +--- a/arch/s390/kvm/vsie.c
588 ++++ b/arch/s390/kvm/vsie.c
589 +@@ -173,7 +173,8 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
590 + return set_validity_icpt(scb_s, 0x0039U);
591 +
592 + /* copy only the wrapping keys */
593 +- if (read_guest_real(vcpu, crycb_addr + 72, &vsie_page->crycb, 56))
594 ++ if (read_guest_real(vcpu, crycb_addr + 72,
595 ++ vsie_page->crycb.dea_wrapping_key_mask, 56))
596 + return set_validity_icpt(scb_s, 0x0035U);
597 +
598 + scb_s->ecb3 |= ecb3_flags;
599 +diff --git a/arch/x86/include/asm/kdebug.h b/arch/x86/include/asm/kdebug.h
600 +index 395c9631e000..75f1e35e7c15 100644
601 +--- a/arch/x86/include/asm/kdebug.h
602 ++++ b/arch/x86/include/asm/kdebug.h
603 +@@ -22,10 +22,20 @@ enum die_val {
604 + DIE_NMIUNKNOWN,
605 + };
606 +
607 ++enum show_regs_mode {
608 ++ SHOW_REGS_SHORT,
609 ++ /*
610 ++ * For when userspace crashed, but we don't think it's our fault, and
611 ++ * therefore don't print kernel registers.
612 ++ */
613 ++ SHOW_REGS_USER,
614 ++ SHOW_REGS_ALL
615 ++};
616 ++
617 + extern void die(const char *, struct pt_regs *,long);
618 + extern int __must_check __die(const char *, struct pt_regs *, long);
619 + extern void show_stack_regs(struct pt_regs *regs);
620 +-extern void __show_regs(struct pt_regs *regs, int all);
621 ++extern void __show_regs(struct pt_regs *regs, enum show_regs_mode);
622 + extern void show_iret_regs(struct pt_regs *regs);
623 + extern unsigned long oops_begin(void);
624 + extern void oops_end(unsigned long, struct pt_regs *, int signr);
625 +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
626 +index acebb808c4b5..0722b7745382 100644
627 +--- a/arch/x86/include/asm/kvm_host.h
628 ++++ b/arch/x86/include/asm/kvm_host.h
629 +@@ -1198,18 +1198,22 @@ enum emulation_result {
630 + #define EMULTYPE_NO_DECODE (1 << 0)
631 + #define EMULTYPE_TRAP_UD (1 << 1)
632 + #define EMULTYPE_SKIP (1 << 2)
633 +-#define EMULTYPE_RETRY (1 << 3)
634 +-#define EMULTYPE_NO_REEXECUTE (1 << 4)
635 +-#define EMULTYPE_NO_UD_ON_FAIL (1 << 5)
636 +-#define EMULTYPE_VMWARE (1 << 6)
637 ++#define EMULTYPE_ALLOW_RETRY (1 << 3)
638 ++#define EMULTYPE_NO_UD_ON_FAIL (1 << 4)
639 ++#define EMULTYPE_VMWARE (1 << 5)
640 + int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
641 + int emulation_type, void *insn, int insn_len);
642 +
643 + static inline int emulate_instruction(struct kvm_vcpu *vcpu,
644 + int emulation_type)
645 + {
646 +- return x86_emulate_instruction(vcpu, 0,
647 +- emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
648 ++ return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
649 ++}
650 ++
651 ++static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
652 ++ void *insn, int insn_len)
653 ++{
654 ++ return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len);
655 + }
656 +
657 + void kvm_enable_efer_bits(u64);
658 +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
659 +index c9b773401fd8..21d1fa5eaa5f 100644
660 +--- a/arch/x86/kernel/apic/vector.c
661 ++++ b/arch/x86/kernel/apic/vector.c
662 +@@ -422,7 +422,7 @@ static int activate_managed(struct irq_data *irqd)
663 + if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) {
664 + /* Something in the core code broke! Survive gracefully */
665 + pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq);
666 +- return EINVAL;
667 ++ return -EINVAL;
668 + }
669 +
670 + ret = assign_managed_vector(irqd, vector_searchmask);
671 +diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
672 +index 0624957aa068..07b5fc00b188 100644
673 +--- a/arch/x86/kernel/cpu/microcode/amd.c
674 ++++ b/arch/x86/kernel/cpu/microcode/amd.c
675 +@@ -504,6 +504,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
676 + struct microcode_amd *mc_amd;
677 + struct ucode_cpu_info *uci;
678 + struct ucode_patch *p;
679 ++ enum ucode_state ret;
680 + u32 rev, dummy;
681 +
682 + BUG_ON(raw_smp_processor_id() != cpu);
683 +@@ -521,9 +522,8 @@ static enum ucode_state apply_microcode_amd(int cpu)
684 +
685 + /* need to apply patch? */
686 + if (rev >= mc_amd->hdr.patch_id) {
687 +- c->microcode = rev;
688 +- uci->cpu_sig.rev = rev;
689 +- return UCODE_OK;
690 ++ ret = UCODE_OK;
691 ++ goto out;
692 + }
693 +
694 + if (__apply_microcode_amd(mc_amd)) {
695 +@@ -531,13 +531,21 @@ static enum ucode_state apply_microcode_amd(int cpu)
696 + cpu, mc_amd->hdr.patch_id);
697 + return UCODE_ERROR;
698 + }
699 +- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
700 +- mc_amd->hdr.patch_id);
701 +
702 +- uci->cpu_sig.rev = mc_amd->hdr.patch_id;
703 +- c->microcode = mc_amd->hdr.patch_id;
704 ++ rev = mc_amd->hdr.patch_id;
705 ++ ret = UCODE_UPDATED;
706 ++
707 ++ pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
708 +
709 +- return UCODE_UPDATED;
710 ++out:
711 ++ uci->cpu_sig.rev = rev;
712 ++ c->microcode = rev;
713 ++
714 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
715 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
716 ++ boot_cpu_data.microcode = rev;
717 ++
718 ++ return ret;
719 + }
720 +
721 + static int install_equiv_cpu_table(const u8 *buf)
722 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
723 +index 97ccf4c3b45b..16936a24795c 100644
724 +--- a/arch/x86/kernel/cpu/microcode/intel.c
725 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
726 +@@ -795,6 +795,7 @@ static enum ucode_state apply_microcode_intel(int cpu)
727 + struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
728 + struct cpuinfo_x86 *c = &cpu_data(cpu);
729 + struct microcode_intel *mc;
730 ++ enum ucode_state ret;
731 + static int prev_rev;
732 + u32 rev;
733 +
734 +@@ -817,9 +818,8 @@ static enum ucode_state apply_microcode_intel(int cpu)
735 + */
736 + rev = intel_get_microcode_revision();
737 + if (rev >= mc->hdr.rev) {
738 +- uci->cpu_sig.rev = rev;
739 +- c->microcode = rev;
740 +- return UCODE_OK;
741 ++ ret = UCODE_OK;
742 ++ goto out;
743 + }
744 +
745 + /*
746 +@@ -848,10 +848,17 @@ static enum ucode_state apply_microcode_intel(int cpu)
747 + prev_rev = rev;
748 + }
749 +
750 ++ ret = UCODE_UPDATED;
751 ++
752 ++out:
753 + uci->cpu_sig.rev = rev;
754 +- c->microcode = rev;
755 ++ c->microcode = rev;
756 ++
757 ++ /* Update boot_cpu_data's revision too, if we're on the BSP: */
758 ++ if (c->cpu_index == boot_cpu_data.cpu_index)
759 ++ boot_cpu_data.microcode = rev;
760 +
761 +- return UCODE_UPDATED;
762 ++ return ret;
763 + }
764 +
765 + static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
766 +diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
767 +index 17b02adc79aa..0c5a9fc6e36d 100644
768 +--- a/arch/x86/kernel/dumpstack.c
769 ++++ b/arch/x86/kernel/dumpstack.c
770 +@@ -155,7 +155,7 @@ static void show_regs_if_on_stack(struct stack_info *info, struct pt_regs *regs,
771 + * they can be printed in the right context.
772 + */
773 + if (!partial && on_stack(info, regs, sizeof(*regs))) {
774 +- __show_regs(regs, 0);
775 ++ __show_regs(regs, SHOW_REGS_SHORT);
776 +
777 + } else if (partial && on_stack(info, (void *)regs + IRET_FRAME_OFFSET,
778 + IRET_FRAME_SIZE)) {
779 +@@ -353,7 +353,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
780 + oops_exit();
781 +
782 + /* Executive summary in case the oops scrolled away */
783 +- __show_regs(&exec_summary_regs, true);
784 ++ __show_regs(&exec_summary_regs, SHOW_REGS_ALL);
785 +
786 + if (!signr)
787 + return;
788 +@@ -416,14 +416,9 @@ void die(const char *str, struct pt_regs *regs, long err)
789 +
790 + void show_regs(struct pt_regs *regs)
791 + {
792 +- bool all = true;
793 +-
794 + show_regs_print_info(KERN_DEFAULT);
795 +
796 +- if (IS_ENABLED(CONFIG_X86_32))
797 +- all = !user_mode(regs);
798 +-
799 +- __show_regs(regs, all);
800 ++ __show_regs(regs, user_mode(regs) ? SHOW_REGS_USER : SHOW_REGS_ALL);
801 +
802 + /*
803 + * When in-kernel, we also print out the stack at the time of the fault..
804 +diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
805 +index 0ae659de21eb..666d1825390d 100644
806 +--- a/arch/x86/kernel/process_32.c
807 ++++ b/arch/x86/kernel/process_32.c
808 +@@ -59,7 +59,7 @@
809 + #include <asm/intel_rdt_sched.h>
810 + #include <asm/proto.h>
811 +
812 +-void __show_regs(struct pt_regs *regs, int all)
813 ++void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
814 + {
815 + unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
816 + unsigned long d0, d1, d2, d3, d6, d7;
817 +@@ -85,7 +85,7 @@ void __show_regs(struct pt_regs *regs, int all)
818 + printk(KERN_DEFAULT "DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x EFLAGS: %08lx\n",
819 + (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss, regs->flags);
820 +
821 +- if (!all)
822 ++ if (mode != SHOW_REGS_ALL)
823 + return;
824 +
825 + cr0 = read_cr0();
826 +diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
827 +index 4344a032ebe6..0091a733c1cf 100644
828 +--- a/arch/x86/kernel/process_64.c
829 ++++ b/arch/x86/kernel/process_64.c
830 +@@ -62,7 +62,7 @@
831 + __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
832 +
833 + /* Prints also some state that isn't saved in the pt_regs */
834 +-void __show_regs(struct pt_regs *regs, int all)
835 ++void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
836 + {
837 + unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
838 + unsigned long d0, d1, d2, d3, d6, d7;
839 +@@ -87,9 +87,17 @@ void __show_regs(struct pt_regs *regs, int all)
840 + printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
841 + regs->r13, regs->r14, regs->r15);
842 +
843 +- if (!all)
844 ++ if (mode == SHOW_REGS_SHORT)
845 + return;
846 +
847 ++ if (mode == SHOW_REGS_USER) {
848 ++ rdmsrl(MSR_FS_BASE, fs);
849 ++ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
850 ++ printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
851 ++ fs, shadowgs);
852 ++ return;
853 ++ }
854 ++
855 + asm("movl %%ds,%0" : "=r" (ds));
856 + asm("movl %%cs,%0" : "=r" (cs));
857 + asm("movl %%es,%0" : "=r" (es));
858 +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
859 +index 42f1ba92622a..97d41754769e 100644
860 +--- a/arch/x86/kvm/mmu.c
861 ++++ b/arch/x86/kvm/mmu.c
862 +@@ -4960,7 +4960,7 @@ static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
863 + int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
864 + void *insn, int insn_len)
865 + {
866 +- int r, emulation_type = EMULTYPE_RETRY;
867 ++ int r, emulation_type = 0;
868 + enum emulation_result er;
869 + bool direct = vcpu->arch.mmu.direct_map;
870 +
871 +@@ -4973,10 +4973,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
872 + r = RET_PF_INVALID;
873 + if (unlikely(error_code & PFERR_RSVD_MASK)) {
874 + r = handle_mmio_page_fault(vcpu, cr2, direct);
875 +- if (r == RET_PF_EMULATE) {
876 +- emulation_type = 0;
877 ++ if (r == RET_PF_EMULATE)
878 + goto emulate;
879 +- }
880 + }
881 +
882 + if (r == RET_PF_INVALID) {
883 +@@ -5003,8 +5001,19 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
884 + return 1;
885 + }
886 +
887 +- if (mmio_info_in_cache(vcpu, cr2, direct))
888 +- emulation_type = 0;
889 ++ /*
890 ++ * vcpu->arch.mmu.page_fault returned RET_PF_EMULATE, but we can still
891 ++ * optimistically try to just unprotect the page and let the processor
892 ++ * re-execute the instruction that caused the page fault. Do not allow
893 ++ * retrying MMIO emulation, as it's not only pointless but could also
894 ++ * cause us to enter an infinite loop because the processor will keep
895 ++ * faulting on the non-existent MMIO address. Retrying an instruction
896 ++ * from a nested guest is also pointless and dangerous as we are only
897 ++ * explicitly shadowing L1's page tables, i.e. unprotecting something
898 ++ * for L1 isn't going to magically fix whatever issue cause L2 to fail.
899 ++ */
900 ++ if (!mmio_info_in_cache(vcpu, cr2, direct) && !is_guest_mode(vcpu))
901 ++ emulation_type = EMULTYPE_ALLOW_RETRY;
902 + emulate:
903 + /*
904 + * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
905 +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
906 +index 9799f86388e7..ef772e5634d4 100644
907 +--- a/arch/x86/kvm/svm.c
908 ++++ b/arch/x86/kvm/svm.c
909 +@@ -3875,8 +3875,8 @@ static int emulate_on_interception(struct vcpu_svm *svm)
910 +
911 + static int rsm_interception(struct vcpu_svm *svm)
912 + {
913 +- return x86_emulate_instruction(&svm->vcpu, 0, 0,
914 +- rsm_ins_bytes, 2) == EMULATE_DONE;
915 ++ return kvm_emulate_instruction_from_buffer(&svm->vcpu,
916 ++ rsm_ins_bytes, 2) == EMULATE_DONE;
917 + }
918 +
919 + static int rdpmc_interception(struct vcpu_svm *svm)
920 +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
921 +index 9869bfd0c601..d0c3be353bb6 100644
922 +--- a/arch/x86/kvm/vmx.c
923 ++++ b/arch/x86/kvm/vmx.c
924 +@@ -7539,8 +7539,8 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
925 + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
926 + return kvm_skip_emulated_instruction(vcpu);
927 + else
928 +- return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
929 +- NULL, 0) == EMULATE_DONE;
930 ++ return emulate_instruction(vcpu, EMULTYPE_SKIP) ==
931 ++ EMULATE_DONE;
932 + }
933 +
934 + return kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
935 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
936 +index 94cd63081471..97fcac34e007 100644
937 +--- a/arch/x86/kvm/x86.c
938 ++++ b/arch/x86/kvm/x86.c
939 +@@ -5810,7 +5810,10 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
940 + gpa_t gpa = cr2;
941 + kvm_pfn_t pfn;
942 +
943 +- if (emulation_type & EMULTYPE_NO_REEXECUTE)
944 ++ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
945 ++ return false;
946 ++
947 ++ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
948 + return false;
949 +
950 + if (!vcpu->arch.mmu.direct_map) {
951 +@@ -5898,7 +5901,10 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
952 + */
953 + vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
954 +
955 +- if (!(emulation_type & EMULTYPE_RETRY))
956 ++ if (!(emulation_type & EMULTYPE_ALLOW_RETRY))
957 ++ return false;
958 ++
959 ++ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
960 + return false;
961 +
962 + if (x86_page_table_writing_insn(ctxt))
963 +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
964 +index d1f1612672c7..045338ac1667 100644
965 +--- a/arch/x86/mm/fault.c
966 ++++ b/arch/x86/mm/fault.c
967 +@@ -317,8 +317,6 @@ static noinline int vmalloc_fault(unsigned long address)
968 + if (!(address >= VMALLOC_START && address < VMALLOC_END))
969 + return -1;
970 +
971 +- WARN_ON_ONCE(in_nmi());
972 +-
973 + /*
974 + * Synchronize this task's top level page-table
975 + * with the 'reference' page table.
976 +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
977 +index 58c6efa9f9a9..9fe5952d117d 100644
978 +--- a/block/bfq-cgroup.c
979 ++++ b/block/bfq-cgroup.c
980 +@@ -275,9 +275,9 @@ static void bfqg_and_blkg_get(struct bfq_group *bfqg)
981 +
982 + void bfqg_and_blkg_put(struct bfq_group *bfqg)
983 + {
984 +- bfqg_put(bfqg);
985 +-
986 + blkg_put(bfqg_to_blkg(bfqg));
987 ++
988 ++ bfqg_put(bfqg);
989 + }
990 +
991 + /* @stats = 0 */
992 +diff --git a/block/blk-core.c b/block/blk-core.c
993 +index 746a5eac4541..cbaca5a73f2e 100644
994 +--- a/block/blk-core.c
995 ++++ b/block/blk-core.c
996 +@@ -2161,9 +2161,12 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
997 + {
998 + const int op = bio_op(bio);
999 +
1000 +- if (part->policy && (op_is_write(op) && !op_is_flush(op))) {
1001 ++ if (part->policy && op_is_write(op)) {
1002 + char b[BDEVNAME_SIZE];
1003 +
1004 ++ if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
1005 ++ return false;
1006 ++
1007 + WARN_ONCE(1,
1008 + "generic_make_request: Trying to write "
1009 + "to read-only block-device %s (partno %d)\n",
1010 +diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
1011 +index d5f2c21d8531..816923bf874d 100644
1012 +--- a/block/blk-mq-tag.c
1013 ++++ b/block/blk-mq-tag.c
1014 +@@ -402,8 +402,6 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
1015 + if (tdepth <= tags->nr_reserved_tags)
1016 + return -EINVAL;
1017 +
1018 +- tdepth -= tags->nr_reserved_tags;
1019 +-
1020 + /*
1021 + * If we are allowed to grow beyond the original size, allocate
1022 + * a new set of tags before freeing the old one.
1023 +@@ -423,7 +421,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
1024 + if (tdepth > 16 * BLKDEV_MAX_RQ)
1025 + return -EINVAL;
1026 +
1027 +- new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
1028 ++ new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth,
1029 ++ tags->nr_reserved_tags);
1030 + if (!new)
1031 + return -ENOMEM;
1032 + ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
1033 +@@ -440,7 +439,8 @@ int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
1034 + * Don't need (or can't) update reserved tags here, they
1035 + * remain static and should never need resizing.
1036 + */
1037 +- sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
1038 ++ sbitmap_queue_resize(&tags->bitmap_tags,
1039 ++ tdepth - tags->nr_reserved_tags);
1040 + }
1041 +
1042 + return 0;
1043 +diff --git a/block/partitions/aix.c b/block/partitions/aix.c
1044 +index 007f95eea0e1..903f3ed175d0 100644
1045 +--- a/block/partitions/aix.c
1046 ++++ b/block/partitions/aix.c
1047 +@@ -178,7 +178,7 @@ int aix_partition(struct parsed_partitions *state)
1048 + u32 vgda_sector = 0;
1049 + u32 vgda_len = 0;
1050 + int numlvs = 0;
1051 +- struct pvd *pvd;
1052 ++ struct pvd *pvd = NULL;
1053 + struct lv_info {
1054 + unsigned short pps_per_lv;
1055 + unsigned short pps_found;
1056 +@@ -232,10 +232,11 @@ int aix_partition(struct parsed_partitions *state)
1057 + if (lvip[i].pps_per_lv)
1058 + foundlvs += 1;
1059 + }
1060 ++ /* pvd loops depend on n[].name and lvip[].pps_per_lv */
1061 ++ pvd = alloc_pvd(state, vgda_sector + 17);
1062 + }
1063 + put_dev_sector(sect);
1064 + }
1065 +- pvd = alloc_pvd(state, vgda_sector + 17);
1066 + if (pvd) {
1067 + int numpps = be16_to_cpu(pvd->pp_count);
1068 + int psn_part1 = be32_to_cpu(pvd->psn_part1);
1069 +@@ -282,10 +283,14 @@ int aix_partition(struct parsed_partitions *state)
1070 + next_lp_ix += 1;
1071 + }
1072 + for (i = 0; i < state->limit; i += 1)
1073 +- if (lvip[i].pps_found && !lvip[i].lv_is_contiguous)
1074 ++ if (lvip[i].pps_found && !lvip[i].lv_is_contiguous) {
1075 ++ char tmp[sizeof(n[i].name) + 1]; // null char
1076 ++
1077 ++ snprintf(tmp, sizeof(tmp), "%s", n[i].name);
1078 + pr_warn("partition %s (%u pp's found) is "
1079 + "not contiguous\n",
1080 +- n[i].name, lvip[i].pps_found);
1081 ++ tmp, lvip[i].pps_found);
1082 ++ }
1083 + kfree(pvd);
1084 + }
1085 + kfree(n);
1086 +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1087 +index 9706613eecf9..bf64cfa30feb 100644
1088 +--- a/drivers/acpi/acpi_lpss.c
1089 ++++ b/drivers/acpi/acpi_lpss.c
1090 +@@ -879,7 +879,7 @@ static void acpi_lpss_dismiss(struct device *dev)
1091 + #define LPSS_GPIODEF0_DMA_LLP BIT(13)
1092 +
1093 + static DEFINE_MUTEX(lpss_iosf_mutex);
1094 +-static bool lpss_iosf_d3_entered;
1095 ++static bool lpss_iosf_d3_entered = true;
1096 +
1097 + static void lpss_iosf_enter_d3_state(void)
1098 + {
1099 +diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
1100 +index 2628806c64a2..3d5277a39097 100644
1101 +--- a/drivers/android/binder_alloc.c
1102 ++++ b/drivers/android/binder_alloc.c
1103 +@@ -327,6 +327,35 @@ err_no_vma:
1104 + return vma ? -ENOMEM : -ESRCH;
1105 + }
1106 +
1107 ++
1108 ++static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
1109 ++ struct vm_area_struct *vma)
1110 ++{
1111 ++ if (vma)
1112 ++ alloc->vma_vm_mm = vma->vm_mm;
1113 ++ /*
1114 ++ * If we see alloc->vma is not NULL, buffer data structures set up
1115 ++ * completely. Look at smp_rmb side binder_alloc_get_vma.
1116 ++ * We also want to guarantee new alloc->vma_vm_mm is always visible
1117 ++ * if alloc->vma is set.
1118 ++ */
1119 ++ smp_wmb();
1120 ++ alloc->vma = vma;
1121 ++}
1122 ++
1123 ++static inline struct vm_area_struct *binder_alloc_get_vma(
1124 ++ struct binder_alloc *alloc)
1125 ++{
1126 ++ struct vm_area_struct *vma = NULL;
1127 ++
1128 ++ if (alloc->vma) {
1129 ++ /* Look at description in binder_alloc_set_vma */
1130 ++ smp_rmb();
1131 ++ vma = alloc->vma;
1132 ++ }
1133 ++ return vma;
1134 ++}
1135 ++
1136 + static struct binder_buffer *binder_alloc_new_buf_locked(
1137 + struct binder_alloc *alloc,
1138 + size_t data_size,
1139 +@@ -343,7 +372,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
1140 + size_t size, data_offsets_size;
1141 + int ret;
1142 +
1143 +- if (alloc->vma == NULL) {
1144 ++ if (!binder_alloc_get_vma(alloc)) {
1145 + pr_err("%d: binder_alloc_buf, no vma\n",
1146 + alloc->pid);
1147 + return ERR_PTR(-ESRCH);
1148 +@@ -714,9 +743,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
1149 + buffer->free = 1;
1150 + binder_insert_free_buffer(alloc, buffer);
1151 + alloc->free_async_space = alloc->buffer_size / 2;
1152 +- barrier();
1153 +- alloc->vma = vma;
1154 +- alloc->vma_vm_mm = vma->vm_mm;
1155 ++ binder_alloc_set_vma(alloc, vma);
1156 + mmgrab(alloc->vma_vm_mm);
1157 +
1158 + return 0;
1159 +@@ -743,10 +770,10 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
1160 + int buffers, page_count;
1161 + struct binder_buffer *buffer;
1162 +
1163 +- BUG_ON(alloc->vma);
1164 +-
1165 + buffers = 0;
1166 + mutex_lock(&alloc->mutex);
1167 ++ BUG_ON(alloc->vma);
1168 ++
1169 + while ((n = rb_first(&alloc->allocated_buffers))) {
1170 + buffer = rb_entry(n, struct binder_buffer, rb_node);
1171 +
1172 +@@ -889,7 +916,7 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
1173 + */
1174 + void binder_alloc_vma_close(struct binder_alloc *alloc)
1175 + {
1176 +- WRITE_ONCE(alloc->vma, NULL);
1177 ++ binder_alloc_set_vma(alloc, NULL);
1178 + }
1179 +
1180 + /**
1181 +@@ -924,7 +951,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
1182 +
1183 + index = page - alloc->pages;
1184 + page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
1185 +- vma = alloc->vma;
1186 ++ vma = binder_alloc_get_vma(alloc);
1187 + if (vma) {
1188 + if (!mmget_not_zero(alloc->vma_vm_mm))
1189 + goto err_mmget;
1190 +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
1191 +index 09620c2ffa0f..704a761f94b2 100644
1192 +--- a/drivers/ata/libahci.c
1193 ++++ b/drivers/ata/libahci.c
1194 +@@ -2107,7 +2107,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1195 + struct ahci_host_priv *hpriv = ap->host->private_data;
1196 + void __iomem *port_mmio = ahci_port_base(ap);
1197 + struct ata_device *dev = ap->link.device;
1198 +- u32 devslp, dm, dito, mdat, deto;
1199 ++ u32 devslp, dm, dito, mdat, deto, dito_conf;
1200 + int rc;
1201 + unsigned int err_mask;
1202 +
1203 +@@ -2131,8 +2131,15 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1204 + return;
1205 + }
1206 +
1207 +- /* device sleep was already enabled */
1208 +- if (devslp & PORT_DEVSLP_ADSE)
1209 ++ dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
1210 ++ dito = devslp_idle_timeout / (dm + 1);
1211 ++ if (dito > 0x3ff)
1212 ++ dito = 0x3ff;
1213 ++
1214 ++ dito_conf = (devslp >> PORT_DEVSLP_DITO_OFFSET) & 0x3FF;
1215 ++
1216 ++ /* device sleep was already enabled and same dito */
1217 ++ if ((devslp & PORT_DEVSLP_ADSE) && (dito_conf == dito))
1218 + return;
1219 +
1220 + /* set DITO, MDAT, DETO and enable DevSlp, need to stop engine first */
1221 +@@ -2140,11 +2147,6 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1222 + if (rc)
1223 + return;
1224 +
1225 +- dm = (devslp & PORT_DEVSLP_DM_MASK) >> PORT_DEVSLP_DM_OFFSET;
1226 +- dito = devslp_idle_timeout / (dm + 1);
1227 +- if (dito > 0x3ff)
1228 +- dito = 0x3ff;
1229 +-
1230 + /* Use the nominal value 10 ms if the read MDAT is zero,
1231 + * the nominal value of DETO is 20 ms.
1232 + */
1233 +@@ -2162,6 +2164,8 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
1234 + deto = 20;
1235 + }
1236 +
1237 ++ /* Make dito, mdat, deto bits to 0s */
1238 ++ devslp &= ~GENMASK_ULL(24, 2);
1239 + devslp |= ((dito << PORT_DEVSLP_DITO_OFFSET) |
1240 + (mdat << PORT_DEVSLP_MDAT_OFFSET) |
1241 + (deto << PORT_DEVSLP_DETO_OFFSET) |
1242 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
1243 +index f5e560188a18..622ab8edc035 100644
1244 +--- a/drivers/base/memory.c
1245 ++++ b/drivers/base/memory.c
1246 +@@ -416,26 +416,24 @@ static ssize_t show_valid_zones(struct device *dev,
1247 + struct zone *default_zone;
1248 + int nid;
1249 +
1250 +- /*
1251 +- * The block contains more than one zone can not be offlined.
1252 +- * This can happen e.g. for ZONE_DMA and ZONE_DMA32
1253 +- */
1254 +- if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
1255 +- return sprintf(buf, "none\n");
1256 +-
1257 +- start_pfn = valid_start_pfn;
1258 +- nr_pages = valid_end_pfn - start_pfn;
1259 +-
1260 + /*
1261 + * Check the existing zone. Make sure that we do that only on the
1262 + * online nodes otherwise the page_zone is not reliable
1263 + */
1264 + if (mem->state == MEM_ONLINE) {
1265 ++ /*
1266 ++ * The block contains more than one zone can not be offlined.
1267 ++ * This can happen e.g. for ZONE_DMA and ZONE_DMA32
1268 ++ */
1269 ++ if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
1270 ++ &valid_start_pfn, &valid_end_pfn))
1271 ++ return sprintf(buf, "none\n");
1272 ++ start_pfn = valid_start_pfn;
1273 + strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
1274 + goto out;
1275 + }
1276 +
1277 +- nid = pfn_to_nid(start_pfn);
1278 ++ nid = mem->nid;
1279 + default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
1280 + strcat(buf, default_zone->name);
1281 +
1282 +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1283 +index 3fb95c8d9fd8..15a5ce5bba3d 100644
1284 +--- a/drivers/block/nbd.c
1285 ++++ b/drivers/block/nbd.c
1286 +@@ -1239,6 +1239,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1287 + case NBD_SET_SOCK:
1288 + return nbd_add_socket(nbd, arg, false);
1289 + case NBD_SET_BLKSIZE:
1290 ++ if (!arg || !is_power_of_2(arg) || arg < 512 ||
1291 ++ arg > PAGE_SIZE)
1292 ++ return -EINVAL;
1293 + nbd_size_set(nbd, arg,
1294 + div_s64(config->bytesize, arg));
1295 + return 0;
1296 +diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
1297 +index b3f83cd96f33..01f59be71433 100644
1298 +--- a/drivers/block/pktcdvd.c
1299 ++++ b/drivers/block/pktcdvd.c
1300 +@@ -67,7 +67,7 @@
1301 + #include <scsi/scsi.h>
1302 + #include <linux/debugfs.h>
1303 + #include <linux/device.h>
1304 +-
1305 ++#include <linux/nospec.h>
1306 + #include <linux/uaccess.h>
1307 +
1308 + #define DRIVER_NAME "pktcdvd"
1309 +@@ -2231,6 +2231,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
1310 + {
1311 + if (dev_minor >= MAX_WRITERS)
1312 + return NULL;
1313 ++
1314 ++ dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
1315 + return pkt_devs[dev_minor];
1316 + }
1317 +
1318 +diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
1319 +index f3c643a0473c..5f953ca8ac5b 100644
1320 +--- a/drivers/bluetooth/Kconfig
1321 ++++ b/drivers/bluetooth/Kconfig
1322 +@@ -159,6 +159,7 @@ config BT_HCIUART_LL
1323 + config BT_HCIUART_3WIRE
1324 + bool "Three-wire UART (H5) protocol support"
1325 + depends on BT_HCIUART
1326 ++ depends on BT_HCIUART_SERDEV
1327 + help
1328 + The HCI Three-wire UART Transport Layer makes it possible to
1329 + user the Bluetooth HCI over a serial port interface. The HCI
1330 +diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
1331 +index 6116cd05e228..9086edc9066b 100644
1332 +--- a/drivers/char/tpm/tpm_i2c_infineon.c
1333 ++++ b/drivers/char/tpm/tpm_i2c_infineon.c
1334 +@@ -117,7 +117,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1335 + /* Lock the adapter for the duration of the whole sequence. */
1336 + if (!tpm_dev.client->adapter->algo->master_xfer)
1337 + return -EOPNOTSUPP;
1338 +- i2c_lock_adapter(tpm_dev.client->adapter);
1339 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1340 +
1341 + if (tpm_dev.chip_type == SLB9645) {
1342 + /* use a combined read for newer chips
1343 +@@ -192,7 +192,7 @@ static int iic_tpm_read(u8 addr, u8 *buffer, size_t len)
1344 + }
1345 +
1346 + out:
1347 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1348 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1349 + /* take care of 'guard time' */
1350 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1351 +
1352 +@@ -224,7 +224,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1353 +
1354 + if (!tpm_dev.client->adapter->algo->master_xfer)
1355 + return -EOPNOTSUPP;
1356 +- i2c_lock_adapter(tpm_dev.client->adapter);
1357 ++ i2c_lock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1358 +
1359 + /* prepend the 'register address' to the buffer */
1360 + tpm_dev.buf[0] = addr;
1361 +@@ -243,7 +243,7 @@ static int iic_tpm_write_generic(u8 addr, u8 *buffer, size_t len,
1362 + usleep_range(sleep_low, sleep_hi);
1363 + }
1364 +
1365 +- i2c_unlock_adapter(tpm_dev.client->adapter);
1366 ++ i2c_unlock_bus(tpm_dev.client->adapter, I2C_LOCK_SEGMENT);
1367 + /* take care of 'guard time' */
1368 + usleep_range(SLEEP_DURATION_LOW, SLEEP_DURATION_HI);
1369 +
1370 +diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c
1371 +index 424ff2fde1f2..9914f6973463 100644
1372 +--- a/drivers/char/tpm/tpm_tis_spi.c
1373 ++++ b/drivers/char/tpm/tpm_tis_spi.c
1374 +@@ -199,6 +199,7 @@ static const struct tpm_tis_phy_ops tpm_spi_phy_ops = {
1375 + static int tpm_tis_spi_probe(struct spi_device *dev)
1376 + {
1377 + struct tpm_tis_spi_phy *phy;
1378 ++ int irq;
1379 +
1380 + phy = devm_kzalloc(&dev->dev, sizeof(struct tpm_tis_spi_phy),
1381 + GFP_KERNEL);
1382 +@@ -211,7 +212,13 @@ static int tpm_tis_spi_probe(struct spi_device *dev)
1383 + if (!phy->iobuf)
1384 + return -ENOMEM;
1385 +
1386 +- return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops,
1387 ++ /* If the SPI device has an IRQ then use that */
1388 ++ if (dev->irq > 0)
1389 ++ irq = dev->irq;
1390 ++ else
1391 ++ irq = -1;
1392 ++
1393 ++ return tpm_tis_core_init(&dev->dev, &phy->priv, irq, &tpm_spi_phy_ops,
1394 + NULL);
1395 + }
1396 +
1397 +diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
1398 +index bb2a6f2f5516..a985bf5e1ac6 100644
1399 +--- a/drivers/clk/clk-scmi.c
1400 ++++ b/drivers/clk/clk-scmi.c
1401 +@@ -38,7 +38,6 @@ static unsigned long scmi_clk_recalc_rate(struct clk_hw *hw,
1402 + static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1403 + unsigned long *parent_rate)
1404 + {
1405 +- int step;
1406 + u64 fmin, fmax, ftmp;
1407 + struct scmi_clk *clk = to_scmi_clk(hw);
1408 +
1409 +@@ -60,9 +59,9 @@ static long scmi_clk_round_rate(struct clk_hw *hw, unsigned long rate,
1410 +
1411 + ftmp = rate - fmin;
1412 + ftmp += clk->info->range.step_size - 1; /* to round up */
1413 +- step = do_div(ftmp, clk->info->range.step_size);
1414 ++ do_div(ftmp, clk->info->range.step_size);
1415 +
1416 +- return step * clk->info->range.step_size + fmin;
1417 ++ return ftmp * clk->info->range.step_size + fmin;
1418 + }
1419 +
1420 + static int scmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
1421 +diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
1422 +index fd49b24fd6af..99e2aace8078 100644
1423 +--- a/drivers/dax/pmem.c
1424 ++++ b/drivers/dax/pmem.c
1425 +@@ -105,15 +105,19 @@ static int dax_pmem_probe(struct device *dev)
1426 + if (rc)
1427 + return rc;
1428 +
1429 +- rc = devm_add_action_or_reset(dev, dax_pmem_percpu_exit,
1430 +- &dax_pmem->ref);
1431 +- if (rc)
1432 ++ rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
1433 ++ if (rc) {
1434 ++ percpu_ref_exit(&dax_pmem->ref);
1435 + return rc;
1436 ++ }
1437 +
1438 + dax_pmem->pgmap.ref = &dax_pmem->ref;
1439 + addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
1440 +- if (IS_ERR(addr))
1441 ++ if (IS_ERR(addr)) {
1442 ++ devm_remove_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
1443 ++ percpu_ref_exit(&dax_pmem->ref);
1444 + return PTR_ERR(addr);
1445 ++ }
1446 +
1447 + rc = devm_add_action_or_reset(dev, dax_pmem_percpu_kill,
1448 + &dax_pmem->ref);
1449 +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
1450 +index e9db895916c3..1aa67bb5d8c0 100644
1451 +--- a/drivers/firmware/google/vpd.c
1452 ++++ b/drivers/firmware/google/vpd.c
1453 +@@ -246,6 +246,7 @@ static int vpd_section_destroy(struct vpd_section *sec)
1454 + sysfs_remove_bin_file(vpd_kobj, &sec->bin_attr);
1455 + kfree(sec->raw_name);
1456 + memunmap(sec->baseaddr);
1457 ++ sec->enabled = false;
1458 + }
1459 +
1460 + return 0;
1461 +@@ -279,8 +280,10 @@ static int vpd_sections_init(phys_addr_t physaddr)
1462 + ret = vpd_section_init("rw", &rw_vpd,
1463 + physaddr + sizeof(struct vpd_cbmem) +
1464 + header.ro_size, header.rw_size);
1465 +- if (ret)
1466 ++ if (ret) {
1467 ++ vpd_section_destroy(&ro_vpd);
1468 + return ret;
1469 ++ }
1470 + }
1471 +
1472 + return 0;
1473 +diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
1474 +index b23d9a36be1f..51c7d1b84c2e 100644
1475 +--- a/drivers/gpio/gpio-ml-ioh.c
1476 ++++ b/drivers/gpio/gpio-ml-ioh.c
1477 +@@ -496,9 +496,10 @@ static int ioh_gpio_probe(struct pci_dev *pdev,
1478 + return 0;
1479 +
1480 + err_gpiochip_add:
1481 ++ chip = chip_save;
1482 + while (--i >= 0) {
1483 +- chip--;
1484 + gpiochip_remove(&chip->gpio);
1485 ++ chip++;
1486 + }
1487 + kfree(chip_save);
1488 +
1489 +diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
1490 +index 1e66f808051c..2e33fd552899 100644
1491 +--- a/drivers/gpio/gpio-pxa.c
1492 ++++ b/drivers/gpio/gpio-pxa.c
1493 +@@ -241,6 +241,17 @@ int pxa_irq_to_gpio(int irq)
1494 + return irq_gpio0;
1495 + }
1496 +
1497 ++static bool pxa_gpio_has_pinctrl(void)
1498 ++{
1499 ++ switch (gpio_type) {
1500 ++ case PXA3XX_GPIO:
1501 ++ return false;
1502 ++
1503 ++ default:
1504 ++ return true;
1505 ++ }
1506 ++}
1507 ++
1508 + static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
1509 + {
1510 + struct pxa_gpio_chip *pchip = chip_to_pxachip(chip);
1511 +@@ -255,9 +266,11 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
1512 + unsigned long flags;
1513 + int ret;
1514 +
1515 +- ret = pinctrl_gpio_direction_input(chip->base + offset);
1516 +- if (!ret)
1517 +- return 0;
1518 ++ if (pxa_gpio_has_pinctrl()) {
1519 ++ ret = pinctrl_gpio_direction_input(chip->base + offset);
1520 ++ if (!ret)
1521 ++ return 0;
1522 ++ }
1523 +
1524 + spin_lock_irqsave(&gpio_lock, flags);
1525 +
1526 +@@ -282,9 +295,11 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
1527 +
1528 + writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
1529 +
1530 +- ret = pinctrl_gpio_direction_output(chip->base + offset);
1531 +- if (ret)
1532 +- return ret;
1533 ++ if (pxa_gpio_has_pinctrl()) {
1534 ++ ret = pinctrl_gpio_direction_output(chip->base + offset);
1535 ++ if (ret)
1536 ++ return ret;
1537 ++ }
1538 +
1539 + spin_lock_irqsave(&gpio_lock, flags);
1540 +
1541 +@@ -348,8 +363,12 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio,
1542 + pchip->chip.set = pxa_gpio_set;
1543 + pchip->chip.to_irq = pxa_gpio_to_irq;
1544 + pchip->chip.ngpio = ngpio;
1545 +- pchip->chip.request = gpiochip_generic_request;
1546 +- pchip->chip.free = gpiochip_generic_free;
1547 ++
1548 ++ if (pxa_gpio_has_pinctrl()) {
1549 ++ pchip->chip.request = gpiochip_generic_request;
1550 ++ pchip->chip.free = gpiochip_generic_free;
1551 ++ }
1552 ++
1553 + #ifdef CONFIG_OF_GPIO
1554 + pchip->chip.of_node = np;
1555 + pchip->chip.of_xlate = pxa_gpio_of_xlate;
1556 +diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
1557 +index 94396caaca75..d5d79727c55d 100644
1558 +--- a/drivers/gpio/gpio-tegra.c
1559 ++++ b/drivers/gpio/gpio-tegra.c
1560 +@@ -720,4 +720,4 @@ static int __init tegra_gpio_init(void)
1561 + {
1562 + return platform_driver_register(&tegra_gpio_driver);
1563 + }
1564 +-postcore_initcall(tegra_gpio_init);
1565 ++subsys_initcall(tegra_gpio_init);
1566 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
1567 +index a576b8bbb3cd..dea40b322191 100644
1568 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
1569 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
1570 +@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
1571 + }
1572 + }
1573 +
1574 +-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
1575 ++static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
1576 + struct dc_link *link,
1577 + struct psr_context *psr_context)
1578 + {
1579 +@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
1580 +
1581 + /* notifyDMCUMsg */
1582 + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1583 ++
1584 ++ return true;
1585 + }
1586 +
1587 + static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
1588 +@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
1589 + * least a few frames. Should never hit the max retry assert below.
1590 + */
1591 + if (wait == true) {
1592 +- for (retryCount = 0; retryCount <= 1000; retryCount++) {
1593 +- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
1594 +- if (enable) {
1595 +- if (psr_state != 0)
1596 +- break;
1597 +- } else {
1598 +- if (psr_state == 0)
1599 +- break;
1600 ++ for (retryCount = 0; retryCount <= 1000; retryCount++) {
1601 ++ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
1602 ++ if (enable) {
1603 ++ if (psr_state != 0)
1604 ++ break;
1605 ++ } else {
1606 ++ if (psr_state == 0)
1607 ++ break;
1608 ++ }
1609 ++ udelay(500);
1610 + }
1611 +- udelay(500);
1612 +- }
1613 +
1614 +- /* assert if max retry hit */
1615 +- ASSERT(retryCount <= 1000);
1616 ++ /* assert if max retry hit */
1617 ++ if (retryCount >= 1000)
1618 ++ ASSERT(0);
1619 + }
1620 + }
1621 +
1622 +-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1623 ++static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1624 + struct dc_link *link,
1625 + struct psr_context *psr_context)
1626 + {
1627 +@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1628 +
1629 + /* If microcontroller is not running, do nothing */
1630 + if (dmcu->dmcu_state != DMCU_RUNNING)
1631 +- return;
1632 ++ return false;
1633 +
1634 + link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
1635 + psr_context->psrExitLinkTrainingRequired);
1636 +@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
1637 +
1638 + /* notifyDMCUMsg */
1639 + REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
1640 ++
1641 ++ /* waitDMCUReadyForCmd */
1642 ++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
1643 ++
1644 ++ return true;
1645 + }
1646 +
1647 + static void dcn10_psr_wait_loop(
1648 +diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
1649 +index de60f940030d..4550747fb61c 100644
1650 +--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
1651 ++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
1652 +@@ -48,7 +48,7 @@ struct dmcu_funcs {
1653 + const char *src,
1654 + unsigned int bytes);
1655 + void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
1656 +- void (*setup_psr)(struct dmcu *dmcu,
1657 ++ bool (*setup_psr)(struct dmcu *dmcu,
1658 + struct dc_link *link,
1659 + struct psr_context *psr_context);
1660 + void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
1661 +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
1662 +index 48685cddbad1..c73bd003f845 100644
1663 +--- a/drivers/gpu/ipu-v3/ipu-common.c
1664 ++++ b/drivers/gpu/ipu-v3/ipu-common.c
1665 +@@ -1401,6 +1401,8 @@ static int ipu_probe(struct platform_device *pdev)
1666 + return -ENODEV;
1667 +
1668 + ipu->id = of_alias_get_id(np, "ipu");
1669 ++ if (ipu->id < 0)
1670 ++ ipu->id = 0;
1671 +
1672 + if (of_device_is_compatible(np, "fsl,imx6qp-ipu") &&
1673 + IS_ENABLED(CONFIG_DRM)) {
1674 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1675 +index c7981ddd8776..e80bcd71fe1e 100644
1676 +--- a/drivers/hid/hid-ids.h
1677 ++++ b/drivers/hid/hid-ids.h
1678 +@@ -528,6 +528,7 @@
1679 +
1680 + #define I2C_VENDOR_ID_RAYD 0x2386
1681 + #define I2C_PRODUCT_ID_RAYD_3118 0x3118
1682 ++#define I2C_PRODUCT_ID_RAYD_4B33 0x4B33
1683 +
1684 + #define USB_VENDOR_ID_HANWANG 0x0b57
1685 + #define USB_DEVICE_ID_HANWANG_TABLET_FIRST 0x5000
1686 +diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1687 +index ab93dd5927c3..b23c4b5854d8 100644
1688 +--- a/drivers/hid/hid-input.c
1689 ++++ b/drivers/hid/hid-input.c
1690 +@@ -1579,6 +1579,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
1691 + input_dev->dev.parent = &hid->dev;
1692 +
1693 + hidinput->input = input_dev;
1694 ++ hidinput->application = application;
1695 + list_add_tail(&hidinput->list, &hid->inputs);
1696 +
1697 + INIT_LIST_HEAD(&hidinput->reports);
1698 +@@ -1674,8 +1675,7 @@ static struct hid_input *hidinput_match_application(struct hid_report *report)
1699 + struct hid_input *hidinput;
1700 +
1701 + list_for_each_entry(hidinput, &hid->inputs, list) {
1702 +- if (hidinput->report &&
1703 +- hidinput->report->application == report->application)
1704 ++ if (hidinput->application == report->application)
1705 + return hidinput;
1706 + }
1707 +
1708 +@@ -1812,6 +1812,7 @@ void hidinput_disconnect(struct hid_device *hid)
1709 + input_unregister_device(hidinput->input);
1710 + else
1711 + input_free_device(hidinput->input);
1712 ++ kfree(hidinput->name);
1713 + kfree(hidinput);
1714 + }
1715 +
1716 +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1717 +index 45968f7970f8..15c934ef6b18 100644
1718 +--- a/drivers/hid/hid-multitouch.c
1719 ++++ b/drivers/hid/hid-multitouch.c
1720 +@@ -1167,7 +1167,8 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1721 + struct hid_usage *usage,
1722 + enum latency_mode latency,
1723 + bool surface_switch,
1724 +- bool button_switch)
1725 ++ bool button_switch,
1726 ++ bool *inputmode_found)
1727 + {
1728 + struct mt_device *td = hid_get_drvdata(hdev);
1729 + struct mt_class *cls = &td->mtclass;
1730 +@@ -1179,6 +1180,14 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1731 +
1732 + switch (usage->hid) {
1733 + case HID_DG_INPUTMODE:
1734 ++ /*
1735 ++ * Some elan panels wrongly declare 2 input mode features,
1736 ++ * and silently ignore when we set the value in the second
1737 ++ * field. Skip the second feature and hope for the best.
1738 ++ */
1739 ++ if (*inputmode_found)
1740 ++ return false;
1741 ++
1742 + if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
1743 + report_len = hid_report_len(report);
1744 + buf = hid_alloc_report_buf(report, GFP_KERNEL);
1745 +@@ -1194,6 +1203,7 @@ static bool mt_need_to_apply_feature(struct hid_device *hdev,
1746 + }
1747 +
1748 + field->value[index] = td->inputmode_value;
1749 ++ *inputmode_found = true;
1750 + return true;
1751 +
1752 + case HID_DG_CONTACTMAX:
1753 +@@ -1231,6 +1241,7 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1754 + struct hid_usage *usage;
1755 + int i, j;
1756 + bool update_report;
1757 ++ bool inputmode_found = false;
1758 +
1759 + rep_enum = &hdev->report_enum[HID_FEATURE_REPORT];
1760 + list_for_each_entry(rep, &rep_enum->report_list, list) {
1761 +@@ -1249,7 +1260,8 @@ static void mt_set_modes(struct hid_device *hdev, enum latency_mode latency,
1762 + usage,
1763 + latency,
1764 + surface_switch,
1765 +- button_switch))
1766 ++ button_switch,
1767 ++ &inputmode_found))
1768 + update_report = true;
1769 + }
1770 + }
1771 +@@ -1476,6 +1488,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
1772 + */
1773 + hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
1774 +
1775 ++ if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
1776 ++ hdev->quirks |= HID_QUIRK_MULTI_INPUT;
1777 ++
1778 + timer_setup(&td->release_timer, mt_expired_timeout, 0);
1779 +
1780 + ret = hid_parse(hdev);
1781 +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1782 +index eae0cb3ddec6..5fd1159fc095 100644
1783 +--- a/drivers/hid/i2c-hid/i2c-hid.c
1784 ++++ b/drivers/hid/i2c-hid/i2c-hid.c
1785 +@@ -174,6 +174,8 @@ static const struct i2c_hid_quirks {
1786 + I2C_HID_QUIRK_RESEND_REPORT_DESCR },
1787 + { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
1788 + I2C_HID_QUIRK_RESEND_REPORT_DESCR },
1789 ++ { I2C_VENDOR_ID_RAYD, I2C_PRODUCT_ID_RAYD_4B33,
1790 ++ I2C_HID_QUIRK_RESEND_REPORT_DESCR },
1791 + { 0, 0 }
1792 + };
1793 +
1794 +diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
1795 +index 658dc765753b..553adccb05d7 100644
1796 +--- a/drivers/hv/hv.c
1797 ++++ b/drivers/hv/hv.c
1798 +@@ -242,6 +242,10 @@ int hv_synic_alloc(void)
1799 +
1800 + return 0;
1801 + err:
1802 ++ /*
1803 ++ * Any memory allocations that succeeded will be freed when
1804 ++ * the caller cleans up by calling hv_synic_free()
1805 ++ */
1806 + return -ENOMEM;
1807 + }
1808 +
1809 +@@ -254,12 +258,10 @@ void hv_synic_free(void)
1810 + struct hv_per_cpu_context *hv_cpu
1811 + = per_cpu_ptr(hv_context.cpu_context, cpu);
1812 +
1813 +- if (hv_cpu->synic_event_page)
1814 +- free_page((unsigned long)hv_cpu->synic_event_page);
1815 +- if (hv_cpu->synic_message_page)
1816 +- free_page((unsigned long)hv_cpu->synic_message_page);
1817 +- if (hv_cpu->post_msg_page)
1818 +- free_page((unsigned long)hv_cpu->post_msg_page);
1819 ++ kfree(hv_cpu->clk_evt);
1820 ++ free_page((unsigned long)hv_cpu->synic_event_page);
1821 ++ free_page((unsigned long)hv_cpu->synic_message_page);
1822 ++ free_page((unsigned long)hv_cpu->post_msg_page);
1823 + }
1824 +
1825 + kfree(hv_context.hv_numa_map);
1826 +diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
1827 +index 60e4d0e939a3..715b6fdb4989 100644
1828 +--- a/drivers/i2c/busses/i2c-aspeed.c
1829 ++++ b/drivers/i2c/busses/i2c-aspeed.c
1830 +@@ -868,7 +868,7 @@ static int aspeed_i2c_probe_bus(struct platform_device *pdev)
1831 + if (!match)
1832 + bus->get_clk_reg_val = aspeed_i2c_24xx_get_clk_reg_val;
1833 + else
1834 +- bus->get_clk_reg_val = match->data;
1835 ++ bus->get_clk_reg_val = (u32 (*)(u32))match->data;
1836 +
1837 + /* Initialize the I2C adapter */
1838 + spin_lock_init(&bus->lock);
1839 +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1840 +index aa726607645e..45fcf0c37a9e 100644
1841 +--- a/drivers/i2c/busses/i2c-i801.c
1842 ++++ b/drivers/i2c/busses/i2c-i801.c
1843 +@@ -139,6 +139,7 @@
1844 +
1845 + #define SBREG_BAR 0x10
1846 + #define SBREG_SMBCTRL 0xc6000c
1847 ++#define SBREG_SMBCTRL_DNV 0xcf000c
1848 +
1849 + /* Host status bits for SMBPCISTS */
1850 + #define SMBPCISTS_INTS BIT(3)
1851 +@@ -1396,7 +1397,11 @@ static void i801_add_tco(struct i801_priv *priv)
1852 + spin_unlock(&p2sb_spinlock);
1853 +
1854 + res = &tco_res[ICH_RES_MEM_OFF];
1855 +- res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1856 ++ if (pci_dev->device == PCI_DEVICE_ID_INTEL_DNV_SMBUS)
1857 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL_DNV;
1858 ++ else
1859 ++ res->start = (resource_size_t)base64_addr + SBREG_SMBCTRL;
1860 ++
1861 + res->end = res->start + 3;
1862 + res->flags = IORESOURCE_MEM;
1863 +
1864 +diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
1865 +index 9a71e50d21f1..0c51c0ffdda9 100644
1866 +--- a/drivers/i2c/busses/i2c-xiic.c
1867 ++++ b/drivers/i2c/busses/i2c-xiic.c
1868 +@@ -532,6 +532,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1869 + {
1870 + u8 rx_watermark;
1871 + struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
1872 ++ unsigned long flags;
1873 +
1874 + /* Clear and enable Rx full interrupt. */
1875 + xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
1876 +@@ -547,6 +548,7 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1877 + rx_watermark = IIC_RX_FIFO_DEPTH;
1878 + xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
1879 +
1880 ++ local_irq_save(flags);
1881 + if (!(msg->flags & I2C_M_NOSTART))
1882 + /* write the address */
1883 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1884 +@@ -556,6 +558,8 @@ static void xiic_start_recv(struct xiic_i2c *i2c)
1885 +
1886 + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
1887 + msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
1888 ++ local_irq_restore(flags);
1889 ++
1890 + if (i2c->nmsgs == 1)
1891 + /* very last, enable bus not busy as well */
1892 + xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
1893 +diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
1894 +index bff10ab141b0..dafcb6f019b3 100644
1895 +--- a/drivers/infiniband/core/cma.c
1896 ++++ b/drivers/infiniband/core/cma.c
1897 +@@ -1445,9 +1445,16 @@ static bool cma_match_net_dev(const struct rdma_cm_id *id,
1898 + (addr->src_addr.ss_family == AF_IB ||
1899 + rdma_protocol_roce(id->device, port_num));
1900 +
1901 +- return !addr->dev_addr.bound_dev_if ||
1902 +- (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1903 +- addr->dev_addr.bound_dev_if == net_dev->ifindex);
1904 ++ /*
1905 ++ * Net namespaces must match, and if the listner is listening
1906 ++ * on a specific netdevice than netdevice must match as well.
1907 ++ */
1908 ++ if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1909 ++ (!!addr->dev_addr.bound_dev_if ==
1910 ++ (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1911 ++ return true;
1912 ++ else
1913 ++ return false;
1914 + }
1915 +
1916 + static struct rdma_id_private *cma_find_listener(
1917 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
1918 +index 63b5b3edabcb..8dc336a85128 100644
1919 +--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
1920 ++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
1921 +@@ -494,6 +494,9 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
1922 + step_idx = 1;
1923 + } else if (hop_num == HNS_ROCE_HOP_NUM_0) {
1924 + step_idx = 0;
1925 ++ } else {
1926 ++ ret = -EINVAL;
1927 ++ goto err_dma_alloc_l1;
1928 + }
1929 +
1930 + /* set HEM base address to hardware */
1931 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1932 +index a6e11be0ea0f..c00925ed9da8 100644
1933 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1934 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1935 +@@ -273,7 +273,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1936 + switch (wr->opcode) {
1937 + case IB_WR_SEND_WITH_IMM:
1938 + case IB_WR_RDMA_WRITE_WITH_IMM:
1939 +- ud_sq_wqe->immtdata = wr->ex.imm_data;
1940 ++ ud_sq_wqe->immtdata =
1941 ++ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
1942 + break;
1943 + default:
1944 + ud_sq_wqe->immtdata = 0;
1945 +@@ -371,7 +372,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1946 + switch (wr->opcode) {
1947 + case IB_WR_SEND_WITH_IMM:
1948 + case IB_WR_RDMA_WRITE_WITH_IMM:
1949 +- rc_sq_wqe->immtdata = wr->ex.imm_data;
1950 ++ rc_sq_wqe->immtdata =
1951 ++ cpu_to_le32(be32_to_cpu(wr->ex.imm_data));
1952 + break;
1953 + case IB_WR_SEND_WITH_INV:
1954 + rc_sq_wqe->inv_key =
1955 +@@ -1931,7 +1933,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
1956 + case HNS_ROCE_V2_OPCODE_RDMA_WRITE_IMM:
1957 + wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
1958 + wc->wc_flags = IB_WC_WITH_IMM;
1959 +- wc->ex.imm_data = cqe->immtdata;
1960 ++ wc->ex.imm_data =
1961 ++ cpu_to_be32(le32_to_cpu(cqe->immtdata));
1962 + break;
1963 + case HNS_ROCE_V2_OPCODE_SEND:
1964 + wc->opcode = IB_WC_RECV;
1965 +@@ -1940,7 +1943,8 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
1966 + case HNS_ROCE_V2_OPCODE_SEND_WITH_IMM:
1967 + wc->opcode = IB_WC_RECV;
1968 + wc->wc_flags = IB_WC_WITH_IMM;
1969 +- wc->ex.imm_data = cqe->immtdata;
1970 ++ wc->ex.imm_data =
1971 ++ cpu_to_be32(le32_to_cpu(cqe->immtdata));
1972 + break;
1973 + case HNS_ROCE_V2_OPCODE_SEND_WITH_INV:
1974 + wc->opcode = IB_WC_RECV;
1975 +diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1976 +index d47675f365c7..7e2c740e0df5 100644
1977 +--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1978 ++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
1979 +@@ -768,7 +768,7 @@ struct hns_roce_v2_cqe {
1980 + __le32 byte_4;
1981 + union {
1982 + __le32 rkey;
1983 +- __be32 immtdata;
1984 ++ __le32 immtdata;
1985 + };
1986 + __le32 byte_12;
1987 + __le32 byte_16;
1988 +@@ -926,7 +926,7 @@ struct hns_roce_v2_cq_db {
1989 + struct hns_roce_v2_ud_send_wqe {
1990 + __le32 byte_4;
1991 + __le32 msg_len;
1992 +- __be32 immtdata;
1993 ++ __le32 immtdata;
1994 + __le32 byte_16;
1995 + __le32 byte_20;
1996 + __le32 byte_24;
1997 +@@ -1012,7 +1012,7 @@ struct hns_roce_v2_rc_send_wqe {
1998 + __le32 msg_len;
1999 + union {
2000 + __le32 inv_key;
2001 +- __be32 immtdata;
2002 ++ __le32 immtdata;
2003 + };
2004 + __le32 byte_16;
2005 + __le32 byte_20;
2006 +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
2007 +index 6709328d90f8..c7e034963738 100644
2008 +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
2009 ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
2010 +@@ -822,6 +822,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
2011 + if (neigh && list_empty(&neigh->list)) {
2012 + kref_get(&mcast->ah->ref);
2013 + neigh->ah = mcast->ah;
2014 ++ neigh->ah->valid = 1;
2015 + list_add_tail(&neigh->list, &mcast->neigh_list);
2016 + }
2017 + }
2018 +diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
2019 +index 54fe190fd4bc..48c5ccab00a0 100644
2020 +--- a/drivers/input/touchscreen/atmel_mxt_ts.c
2021 ++++ b/drivers/input/touchscreen/atmel_mxt_ts.c
2022 +@@ -1658,10 +1658,11 @@ static int mxt_parse_object_table(struct mxt_data *data,
2023 + break;
2024 + case MXT_TOUCH_MULTI_T9:
2025 + data->multitouch = MXT_TOUCH_MULTI_T9;
2026 ++ /* Only handle messages from first T9 instance */
2027 + data->T9_reportid_min = min_id;
2028 +- data->T9_reportid_max = max_id;
2029 +- data->num_touchids = object->num_report_ids
2030 +- * mxt_obj_instances(object);
2031 ++ data->T9_reportid_max = min_id +
2032 ++ object->num_report_ids - 1;
2033 ++ data->num_touchids = object->num_report_ids;
2034 + break;
2035 + case MXT_SPT_MESSAGECOUNT_T44:
2036 + data->T44_address = object->start_address;
2037 +diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
2038 +index 1d647104bccc..b73c6a7bf7f2 100644
2039 +--- a/drivers/iommu/arm-smmu-v3.c
2040 ++++ b/drivers/iommu/arm-smmu-v3.c
2041 +@@ -24,6 +24,7 @@
2042 + #include <linux/acpi_iort.h>
2043 + #include <linux/bitfield.h>
2044 + #include <linux/bitops.h>
2045 ++#include <linux/crash_dump.h>
2046 + #include <linux/delay.h>
2047 + #include <linux/dma-iommu.h>
2048 + #include <linux/err.h>
2049 +@@ -2211,8 +2212,12 @@ static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2050 + reg &= ~clr;
2051 + reg |= set;
2052 + writel_relaxed(reg | GBPA_UPDATE, gbpa);
2053 +- return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2054 +- 1, ARM_SMMU_POLL_TIMEOUT_US);
2055 ++ ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2056 ++ 1, ARM_SMMU_POLL_TIMEOUT_US);
2057 ++
2058 ++ if (ret)
2059 ++ dev_err(smmu->dev, "GBPA not responding to update\n");
2060 ++ return ret;
2061 + }
2062 +
2063 + static void arm_smmu_free_msis(void *data)
2064 +@@ -2392,8 +2397,15 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2065 +
2066 + /* Clear CR0 and sync (disables SMMU and queue processing) */
2067 + reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2068 +- if (reg & CR0_SMMUEN)
2069 ++ if (reg & CR0_SMMUEN) {
2070 ++ if (is_kdump_kernel()) {
2071 ++ arm_smmu_update_gbpa(smmu, GBPA_ABORT, 0);
2072 ++ arm_smmu_device_disable(smmu);
2073 ++ return -EBUSY;
2074 ++ }
2075 ++
2076 + dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2077 ++ }
2078 +
2079 + ret = arm_smmu_device_disable(smmu);
2080 + if (ret)
2081 +@@ -2491,10 +2503,8 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
2082 + enables |= CR0_SMMUEN;
2083 + } else {
2084 + ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2085 +- if (ret) {
2086 +- dev_err(smmu->dev, "GBPA not responding to update\n");
2087 ++ if (ret)
2088 + return ret;
2089 +- }
2090 + }
2091 + ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2092 + ARM_SMMU_CR0ACK);
2093 +diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
2094 +index 09b47260c74b..feb1664815b7 100644
2095 +--- a/drivers/iommu/ipmmu-vmsa.c
2096 ++++ b/drivers/iommu/ipmmu-vmsa.c
2097 +@@ -73,7 +73,7 @@ struct ipmmu_vmsa_domain {
2098 + struct io_pgtable_ops *iop;
2099 +
2100 + unsigned int context_id;
2101 +- spinlock_t lock; /* Protects mappings */
2102 ++ struct mutex mutex; /* Protects mappings */
2103 + };
2104 +
2105 + static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
2106 +@@ -595,7 +595,7 @@ static struct iommu_domain *__ipmmu_domain_alloc(unsigned type)
2107 + if (!domain)
2108 + return NULL;
2109 +
2110 +- spin_lock_init(&domain->lock);
2111 ++ mutex_init(&domain->mutex);
2112 +
2113 + return &domain->io_domain;
2114 + }
2115 +@@ -641,7 +641,6 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
2116 + struct iommu_fwspec *fwspec = dev->iommu_fwspec;
2117 + struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
2118 + struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain);
2119 +- unsigned long flags;
2120 + unsigned int i;
2121 + int ret = 0;
2122 +
2123 +@@ -650,7 +649,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
2124 + return -ENXIO;
2125 + }
2126 +
2127 +- spin_lock_irqsave(&domain->lock, flags);
2128 ++ mutex_lock(&domain->mutex);
2129 +
2130 + if (!domain->mmu) {
2131 + /* The domain hasn't been used yet, initialize it. */
2132 +@@ -674,7 +673,7 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
2133 + } else
2134 + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
2135 +
2136 +- spin_unlock_irqrestore(&domain->lock, flags);
2137 ++ mutex_unlock(&domain->mutex);
2138 +
2139 + if (ret < 0)
2140 + return ret;
2141 +diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
2142 +index 25c1ce811053..1fdd09ebb3f1 100644
2143 +--- a/drivers/macintosh/via-pmu.c
2144 ++++ b/drivers/macintosh/via-pmu.c
2145 +@@ -534,8 +534,9 @@ init_pmu(void)
2146 + int timeout;
2147 + struct adb_request req;
2148 +
2149 +- out_8(&via[B], via[B] | TREQ); /* negate TREQ */
2150 +- out_8(&via[DIRB], (via[DIRB] | TREQ) & ~TACK); /* TACK in, TREQ out */
2151 ++ /* Negate TREQ. Set TACK to input and TREQ to output. */
2152 ++ out_8(&via[B], in_8(&via[B]) | TREQ);
2153 ++ out_8(&via[DIRB], (in_8(&via[DIRB]) | TREQ) & ~TACK);
2154 +
2155 + pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask);
2156 + timeout = 100000;
2157 +@@ -1418,8 +1419,8 @@ pmu_sr_intr(void)
2158 + struct adb_request *req;
2159 + int bite = 0;
2160 +
2161 +- if (via[B] & TREQ) {
2162 +- printk(KERN_ERR "PMU: spurious SR intr (%x)\n", via[B]);
2163 ++ if (in_8(&via[B]) & TREQ) {
2164 ++ printk(KERN_ERR "PMU: spurious SR intr (%x)\n", in_8(&via[B]));
2165 + out_8(&via[IFR], SR_INT);
2166 + return NULL;
2167 + }
2168 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
2169 +index ce14a3d1f609..44df244807e5 100644
2170 +--- a/drivers/md/dm-cache-target.c
2171 ++++ b/drivers/md/dm-cache-target.c
2172 +@@ -2250,7 +2250,7 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2173 + {0, 2, "Invalid number of cache feature arguments"},
2174 + };
2175 +
2176 +- int r;
2177 ++ int r, mode_ctr = 0;
2178 + unsigned argc;
2179 + const char *arg;
2180 + struct cache_features *cf = &ca->features;
2181 +@@ -2264,14 +2264,20 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2182 + while (argc--) {
2183 + arg = dm_shift_arg(as);
2184 +
2185 +- if (!strcasecmp(arg, "writeback"))
2186 ++ if (!strcasecmp(arg, "writeback")) {
2187 + cf->io_mode = CM_IO_WRITEBACK;
2188 ++ mode_ctr++;
2189 ++ }
2190 +
2191 +- else if (!strcasecmp(arg, "writethrough"))
2192 ++ else if (!strcasecmp(arg, "writethrough")) {
2193 + cf->io_mode = CM_IO_WRITETHROUGH;
2194 ++ mode_ctr++;
2195 ++ }
2196 +
2197 +- else if (!strcasecmp(arg, "passthrough"))
2198 ++ else if (!strcasecmp(arg, "passthrough")) {
2199 + cf->io_mode = CM_IO_PASSTHROUGH;
2200 ++ mode_ctr++;
2201 ++ }
2202 +
2203 + else if (!strcasecmp(arg, "metadata2"))
2204 + cf->metadata_version = 2;
2205 +@@ -2282,6 +2288,11 @@ static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
2206 + }
2207 + }
2208 +
2209 ++ if (mode_ctr > 1) {
2210 ++ *error = "Duplicate cache io_mode features requested";
2211 ++ return -EINVAL;
2212 ++ }
2213 ++
2214 + return 0;
2215 + }
2216 +
2217 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2218 +index 2031506a0ecd..49107c52c8e6 100644
2219 +--- a/drivers/md/raid5.c
2220 ++++ b/drivers/md/raid5.c
2221 +@@ -4521,6 +4521,12 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
2222 + s->failed++;
2223 + if (rdev && !test_bit(Faulty, &rdev->flags))
2224 + do_recovery = 1;
2225 ++ else if (!rdev) {
2226 ++ rdev = rcu_dereference(
2227 ++ conf->disks[i].replacement);
2228 ++ if (rdev && !test_bit(Faulty, &rdev->flags))
2229 ++ do_recovery = 1;
2230 ++ }
2231 + }
2232 +
2233 + if (test_bit(R5_InJournal, &dev->flags))
2234 +diff --git a/drivers/media/dvb-frontends/helene.c b/drivers/media/dvb-frontends/helene.c
2235 +index a0d0b53c91d7..a5de65dcf784 100644
2236 +--- a/drivers/media/dvb-frontends/helene.c
2237 ++++ b/drivers/media/dvb-frontends/helene.c
2238 +@@ -897,7 +897,10 @@ static int helene_x_pon(struct helene_priv *priv)
2239 + helene_write_regs(priv, 0x99, cdata, sizeof(cdata));
2240 +
2241 + /* 0x81 - 0x94 */
2242 +- data[0] = 0x18; /* xtal 24 MHz */
2243 ++ if (priv->xtal == SONY_HELENE_XTAL_16000)
2244 ++ data[0] = 0x10; /* xtal 16 MHz */
2245 ++ else
2246 ++ data[0] = 0x18; /* xtal 24 MHz */
2247 + data[1] = (uint8_t)(0x80 | (0x04 & 0x1F)); /* 4 x 25 = 100uA */
2248 + data[2] = (uint8_t)(0x80 | (0x26 & 0x7F)); /* 38 x 0.25 = 9.5pF */
2249 + data[3] = 0x80; /* REFOUT signal output 500mVpp */
2250 +diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
2251 +index 7be636237acf..0f324055cc9f 100644
2252 +--- a/drivers/media/platform/davinci/vpif_display.c
2253 ++++ b/drivers/media/platform/davinci/vpif_display.c
2254 +@@ -1114,6 +1114,14 @@ vpif_init_free_channel_objects:
2255 + return err;
2256 + }
2257 +
2258 ++static void free_vpif_objs(void)
2259 ++{
2260 ++ int i;
2261 ++
2262 ++ for (i = 0; i < VPIF_DISPLAY_MAX_DEVICES; i++)
2263 ++ kfree(vpif_obj.dev[i]);
2264 ++}
2265 ++
2266 + static int vpif_async_bound(struct v4l2_async_notifier *notifier,
2267 + struct v4l2_subdev *subdev,
2268 + struct v4l2_async_subdev *asd)
2269 +@@ -1255,11 +1263,6 @@ static __init int vpif_probe(struct platform_device *pdev)
2270 + return -EINVAL;
2271 + }
2272 +
2273 +- if (!pdev->dev.platform_data) {
2274 +- dev_warn(&pdev->dev, "Missing platform data. Giving up.\n");
2275 +- return -EINVAL;
2276 +- }
2277 +-
2278 + vpif_dev = &pdev->dev;
2279 + err = initialize_vpif();
2280 +
2281 +@@ -1271,7 +1274,7 @@ static __init int vpif_probe(struct platform_device *pdev)
2282 + err = v4l2_device_register(vpif_dev, &vpif_obj.v4l2_dev);
2283 + if (err) {
2284 + v4l2_err(vpif_dev->driver, "Error registering v4l2 device\n");
2285 +- return err;
2286 ++ goto vpif_free;
2287 + }
2288 +
2289 + while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, res_idx))) {
2290 +@@ -1314,7 +1317,10 @@ static __init int vpif_probe(struct platform_device *pdev)
2291 + if (vpif_obj.sd[i])
2292 + vpif_obj.sd[i]->grp_id = 1 << i;
2293 + }
2294 +- vpif_probe_complete();
2295 ++ err = vpif_probe_complete();
2296 ++ if (err) {
2297 ++ goto probe_subdev_out;
2298 ++ }
2299 + } else {
2300 + vpif_obj.notifier.subdevs = vpif_obj.config->asd;
2301 + vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
2302 +@@ -1334,6 +1340,8 @@ probe_subdev_out:
2303 + kfree(vpif_obj.sd);
2304 + vpif_unregister:
2305 + v4l2_device_unregister(&vpif_obj.v4l2_dev);
2306 ++vpif_free:
2307 ++ free_vpif_objs();
2308 +
2309 + return err;
2310 + }
2311 +@@ -1355,8 +1363,8 @@ static int vpif_remove(struct platform_device *device)
2312 + ch = vpif_obj.dev[i];
2313 + /* Unregister video device */
2314 + video_unregister_device(&ch->video_dev);
2315 +- kfree(vpif_obj.dev[i]);
2316 + }
2317 ++ free_vpif_objs();
2318 +
2319 + return 0;
2320 + }
2321 +diff --git a/drivers/media/platform/qcom/camss-8x16/camss-csid.c b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
2322 +index 226f36ef7419..2bf65805f2c1 100644
2323 +--- a/drivers/media/platform/qcom/camss-8x16/camss-csid.c
2324 ++++ b/drivers/media/platform/qcom/camss-8x16/camss-csid.c
2325 +@@ -392,9 +392,6 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2326 + !media_entity_remote_pad(&csid->pads[MSM_CSID_PAD_SINK]))
2327 + return -ENOLINK;
2328 +
2329 +- dt = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SRC].code)->
2330 +- data_type;
2331 +-
2332 + if (tg->enabled) {
2333 + /* Config Test Generator */
2334 + struct v4l2_mbus_framefmt *f =
2335 +@@ -416,6 +413,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2336 + writel_relaxed(val, csid->base +
2337 + CAMSS_CSID_TG_DT_n_CGG_0(0));
2338 +
2339 ++ dt = csid_get_fmt_entry(
2340 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->data_type;
2341 ++
2342 + /* 5:0 data type */
2343 + val = dt;
2344 + writel_relaxed(val, csid->base +
2345 +@@ -425,6 +425,9 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2346 + val = tg->payload_mode;
2347 + writel_relaxed(val, csid->base +
2348 + CAMSS_CSID_TG_DT_n_CGG_2(0));
2349 ++
2350 ++ df = csid_get_fmt_entry(
2351 ++ csid->fmt[MSM_CSID_PAD_SRC].code)->decode_format;
2352 + } else {
2353 + struct csid_phy_config *phy = &csid->phy;
2354 +
2355 +@@ -439,13 +442,16 @@ static int csid_set_stream(struct v4l2_subdev *sd, int enable)
2356 +
2357 + writel_relaxed(val,
2358 + csid->base + CAMSS_CSID_CORE_CTRL_1);
2359 ++
2360 ++ dt = csid_get_fmt_entry(
2361 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->data_type;
2362 ++ df = csid_get_fmt_entry(
2363 ++ csid->fmt[MSM_CSID_PAD_SINK].code)->decode_format;
2364 + }
2365 +
2366 + /* Config LUT */
2367 +
2368 + dt_shift = (cid % 4) * 8;
2369 +- df = csid_get_fmt_entry(csid->fmt[MSM_CSID_PAD_SINK].code)->
2370 +- decode_format;
2371 +
2372 + val = readl_relaxed(csid->base + CAMSS_CSID_CID_LUT_VC_n(vc));
2373 + val &= ~(0xff << dt_shift);
2374 +diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
2375 +index daef72d410a3..dc5ae8025832 100644
2376 +--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
2377 ++++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
2378 +@@ -339,6 +339,7 @@ enum rcar_csi2_pads {
2379 +
2380 + struct rcar_csi2_info {
2381 + int (*init_phtw)(struct rcar_csi2 *priv, unsigned int mbps);
2382 ++ int (*confirm_start)(struct rcar_csi2 *priv);
2383 + const struct rcsi2_mbps_reg *hsfreqrange;
2384 + unsigned int csi0clkfreqrange;
2385 + bool clear_ulps;
2386 +@@ -545,6 +546,13 @@ static int rcsi2_start(struct rcar_csi2 *priv)
2387 + if (ret)
2388 + return ret;
2389 +
2390 ++ /* Confirm start */
2391 ++ if (priv->info->confirm_start) {
2392 ++ ret = priv->info->confirm_start(priv);
2393 ++ if (ret)
2394 ++ return ret;
2395 ++ }
2396 ++
2397 + /* Clear Ultra Low Power interrupt. */
2398 + if (priv->info->clear_ulps)
2399 + rcsi2_write(priv, INTSTATE_REG,
2400 +@@ -880,6 +888,11 @@ static int rcsi2_init_phtw_h3_v3h_m3n(struct rcar_csi2 *priv, unsigned int mbps)
2401 + }
2402 +
2403 + static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
2404 ++{
2405 ++ return rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
2406 ++}
2407 ++
2408 ++static int rcsi2_confirm_start_v3m_e3(struct rcar_csi2 *priv)
2409 + {
2410 + static const struct phtw_value step1[] = {
2411 + { .data = 0xed, .code = 0x34 },
2412 +@@ -890,12 +903,6 @@ static int rcsi2_init_phtw_v3m_e3(struct rcar_csi2 *priv, unsigned int mbps)
2413 + { /* sentinel */ },
2414 + };
2415 +
2416 +- int ret;
2417 +-
2418 +- ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3m_e3, 0x44);
2419 +- if (ret)
2420 +- return ret;
2421 +-
2422 + return rcsi2_phtw_write_array(priv, step1);
2423 + }
2424 +
2425 +@@ -949,6 +956,7 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77965 = {
2426 +
2427 + static const struct rcar_csi2_info rcar_csi2_info_r8a77970 = {
2428 + .init_phtw = rcsi2_init_phtw_v3m_e3,
2429 ++ .confirm_start = rcsi2_confirm_start_v3m_e3,
2430 + };
2431 +
2432 + static const struct of_device_id rcar_csi2_of_table[] = {
2433 +diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2434 +index a80251ed3143..780548dd650e 100644
2435 +--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
2436 ++++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
2437 +@@ -254,24 +254,24 @@ static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
2438 + static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
2439 + {
2440 + struct s5p_mfc_dev *dev = ctx->dev;
2441 +- struct s5p_mfc_buf *dst_buf, *src_buf;
2442 +- size_t dec_y_addr;
2443 ++ struct s5p_mfc_buf *dst_buf, *src_buf;
2444 ++ u32 dec_y_addr;
2445 + unsigned int frame_type;
2446 +
2447 + /* Make sure we actually have a new frame before continuing. */
2448 + frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
2449 + if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED)
2450 + return;
2451 +- dec_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
2452 ++ dec_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dec_y_adr, dev);
2453 +
2454 + /* Copy timestamp / timecode from decoded src to dst and set
2455 + appropriate flags. */
2456 + src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
2457 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
2458 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
2459 +- == dec_y_addr) {
2460 +- dst_buf->b->timecode =
2461 +- src_buf->b->timecode;
2462 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
2463 ++
2464 ++ if (addr == dec_y_addr) {
2465 ++ dst_buf->b->timecode = src_buf->b->timecode;
2466 + dst_buf->b->vb2_buf.timestamp =
2467 + src_buf->b->vb2_buf.timestamp;
2468 + dst_buf->b->flags &=
2469 +@@ -307,10 +307,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
2470 + {
2471 + struct s5p_mfc_dev *dev = ctx->dev;
2472 + struct s5p_mfc_buf *dst_buf;
2473 +- size_t dspl_y_addr;
2474 ++ u32 dspl_y_addr;
2475 + unsigned int frame_type;
2476 +
2477 +- dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
2478 ++ dspl_y_addr = (u32)s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
2479 + if (IS_MFCV6_PLUS(dev))
2480 + frame_type = s5p_mfc_hw_call(dev->mfc_ops,
2481 + get_disp_frame_type, ctx);
2482 +@@ -329,9 +329,10 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
2483 + /* The MFC returns address of the buffer, now we have to
2484 + * check which videobuf does it correspond to */
2485 + list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
2486 ++ u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
2487 ++
2488 + /* Check if this is the buffer we're looking for */
2489 +- if (vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0)
2490 +- == dspl_y_addr) {
2491 ++ if (addr == dspl_y_addr) {
2492 + list_del(&dst_buf->list);
2493 + ctx->dst_queue_cnt--;
2494 + dst_buf->b->sequence = ctx->sequence;
2495 +diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
2496 +index 0d4fdd34a710..9ce8b4d79d1f 100644
2497 +--- a/drivers/media/usb/dvb-usb/dw2102.c
2498 ++++ b/drivers/media/usb/dvb-usb/dw2102.c
2499 +@@ -2101,14 +2101,12 @@ static struct dvb_usb_device_properties s6x0_properties = {
2500 + }
2501 + };
2502 +
2503 +-static struct dvb_usb_device_properties *p1100;
2504 + static const struct dvb_usb_device_description d1100 = {
2505 + "Prof 1100 USB ",
2506 + {&dw2102_table[PROF_1100], NULL},
2507 + {NULL},
2508 + };
2509 +
2510 +-static struct dvb_usb_device_properties *s660;
2511 + static const struct dvb_usb_device_description d660 = {
2512 + "TeVii S660 USB",
2513 + {&dw2102_table[TEVII_S660], NULL},
2514 +@@ -2127,14 +2125,12 @@ static const struct dvb_usb_device_description d480_2 = {
2515 + {NULL},
2516 + };
2517 +
2518 +-static struct dvb_usb_device_properties *p7500;
2519 + static const struct dvb_usb_device_description d7500 = {
2520 + "Prof 7500 USB DVB-S2",
2521 + {&dw2102_table[PROF_7500], NULL},
2522 + {NULL},
2523 + };
2524 +
2525 +-static struct dvb_usb_device_properties *s421;
2526 + static const struct dvb_usb_device_description d421 = {
2527 + "TeVii S421 PCI",
2528 + {&dw2102_table[TEVII_S421], NULL},
2529 +@@ -2334,6 +2330,11 @@ static int dw2102_probe(struct usb_interface *intf,
2530 + const struct usb_device_id *id)
2531 + {
2532 + int retval = -ENOMEM;
2533 ++ struct dvb_usb_device_properties *p1100;
2534 ++ struct dvb_usb_device_properties *s660;
2535 ++ struct dvb_usb_device_properties *p7500;
2536 ++ struct dvb_usb_device_properties *s421;
2537 ++
2538 + p1100 = kmemdup(&s6x0_properties,
2539 + sizeof(struct dvb_usb_device_properties), GFP_KERNEL);
2540 + if (!p1100)
2541 +@@ -2402,8 +2403,16 @@ static int dw2102_probe(struct usb_interface *intf,
2542 + 0 == dvb_usb_device_init(intf, &t220_properties,
2543 + THIS_MODULE, NULL, adapter_nr) ||
2544 + 0 == dvb_usb_device_init(intf, &tt_s2_4600_properties,
2545 +- THIS_MODULE, NULL, adapter_nr))
2546 ++ THIS_MODULE, NULL, adapter_nr)) {
2547 ++
2548 ++ /* clean up copied properties */
2549 ++ kfree(s421);
2550 ++ kfree(p7500);
2551 ++ kfree(s660);
2552 ++ kfree(p1100);
2553 ++
2554 + return 0;
2555 ++ }
2556 +
2557 + retval = -ENODEV;
2558 + kfree(s421);
2559 +diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
2560 +index 6c8438311d3b..ff5e41ac4723 100644
2561 +--- a/drivers/media/usb/em28xx/em28xx-cards.c
2562 ++++ b/drivers/media/usb/em28xx/em28xx-cards.c
2563 +@@ -3376,7 +3376,9 @@ void em28xx_free_device(struct kref *ref)
2564 + if (!dev->disconnected)
2565 + em28xx_release_resources(dev);
2566 +
2567 +- kfree(dev->alt_max_pkt_size_isoc);
2568 ++ if (dev->ts == PRIMARY_TS)
2569 ++ kfree(dev->alt_max_pkt_size_isoc);
2570 ++
2571 + kfree(dev);
2572 + }
2573 + EXPORT_SYMBOL_GPL(em28xx_free_device);
2574 +diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
2575 +index f70845e7d8c6..45b24776a695 100644
2576 +--- a/drivers/media/usb/em28xx/em28xx-core.c
2577 ++++ b/drivers/media/usb/em28xx/em28xx-core.c
2578 +@@ -655,12 +655,12 @@ int em28xx_capture_start(struct em28xx *dev, int start)
2579 + rc = em28xx_write_reg_bits(dev,
2580 + EM2874_R5F_TS_ENABLE,
2581 + start ? EM2874_TS1_CAPTURE_ENABLE : 0x00,
2582 +- EM2874_TS1_CAPTURE_ENABLE);
2583 ++ EM2874_TS1_CAPTURE_ENABLE | EM2874_TS1_FILTER_ENABLE | EM2874_TS1_NULL_DISCARD);
2584 + else
2585 + rc = em28xx_write_reg_bits(dev,
2586 + EM2874_R5F_TS_ENABLE,
2587 + start ? EM2874_TS2_CAPTURE_ENABLE : 0x00,
2588 +- EM2874_TS2_CAPTURE_ENABLE);
2589 ++ EM2874_TS2_CAPTURE_ENABLE | EM2874_TS2_FILTER_ENABLE | EM2874_TS2_NULL_DISCARD);
2590 + } else {
2591 + /* FIXME: which is the best order? */
2592 + /* video registers are sampled by VREF */
2593 +diff --git a/drivers/media/usb/em28xx/em28xx-dvb.c b/drivers/media/usb/em28xx/em28xx-dvb.c
2594 +index b778d8a1983e..a73faf12f7e4 100644
2595 +--- a/drivers/media/usb/em28xx/em28xx-dvb.c
2596 ++++ b/drivers/media/usb/em28xx/em28xx-dvb.c
2597 +@@ -218,7 +218,9 @@ static int em28xx_start_streaming(struct em28xx_dvb *dvb)
2598 + dvb_alt = dev->dvb_alt_isoc;
2599 + }
2600 +
2601 +- usb_set_interface(udev, dev->ifnum, dvb_alt);
2602 ++ if (!dev->board.has_dual_ts)
2603 ++ usb_set_interface(udev, dev->ifnum, dvb_alt);
2604 ++
2605 + rc = em28xx_set_mode(dev, EM28XX_DIGITAL_MODE);
2606 + if (rc < 0)
2607 + return rc;
2608 +diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
2609 +index 31112f622b88..475e5b3790ed 100644
2610 +--- a/drivers/memory/ti-aemif.c
2611 ++++ b/drivers/memory/ti-aemif.c
2612 +@@ -411,7 +411,7 @@ static int aemif_probe(struct platform_device *pdev)
2613 + if (ret < 0)
2614 + goto error;
2615 + }
2616 +- } else {
2617 ++ } else if (pdata) {
2618 + for (i = 0; i < pdata->num_sub_devices; i++) {
2619 + pdata->sub_devices[i].dev.parent = dev;
2620 + ret = platform_device_register(&pdata->sub_devices[i]);
2621 +diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
2622 +index 36dcd98977d6..4f545fdc6ebc 100644
2623 +--- a/drivers/mfd/rave-sp.c
2624 ++++ b/drivers/mfd/rave-sp.c
2625 +@@ -776,6 +776,13 @@ static int rave_sp_probe(struct serdev_device *serdev)
2626 + return ret;
2627 +
2628 + serdev_device_set_baudrate(serdev, baud);
2629 ++ serdev_device_set_flow_control(serdev, false);
2630 ++
2631 ++ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
2632 ++ if (ret) {
2633 ++ dev_err(dev, "Failed to set parity\n");
2634 ++ return ret;
2635 ++ }
2636 +
2637 + ret = rave_sp_get_status(sp);
2638 + if (ret) {
2639 +diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
2640 +index 47012c0899cd..7a30546880a4 100644
2641 +--- a/drivers/mfd/ti_am335x_tscadc.c
2642 ++++ b/drivers/mfd/ti_am335x_tscadc.c
2643 +@@ -209,14 +209,13 @@ static int ti_tscadc_probe(struct platform_device *pdev)
2644 + * The TSC_ADC_SS controller design assumes the OCP clock is
2645 + * at least 6x faster than the ADC clock.
2646 + */
2647 +- clk = clk_get(&pdev->dev, "adc_tsc_fck");
2648 ++ clk = devm_clk_get(&pdev->dev, "adc_tsc_fck");
2649 + if (IS_ERR(clk)) {
2650 + dev_err(&pdev->dev, "failed to get TSC fck\n");
2651 + err = PTR_ERR(clk);
2652 + goto err_disable_clk;
2653 + }
2654 + clock_rate = clk_get_rate(clk);
2655 +- clk_put(clk);
2656 + tscadc->clk_div = clock_rate / ADC_CLK;
2657 +
2658 + /* TSCADC_CLKDIV needs to be configured to the value minus 1 */
2659 +diff --git a/drivers/misc/mic/scif/scif_api.c b/drivers/misc/mic/scif/scif_api.c
2660 +index 7b2dddcdd46d..42f7a12894d6 100644
2661 +--- a/drivers/misc/mic/scif/scif_api.c
2662 ++++ b/drivers/misc/mic/scif/scif_api.c
2663 +@@ -370,11 +370,10 @@ int scif_bind(scif_epd_t epd, u16 pn)
2664 + goto scif_bind_exit;
2665 + }
2666 + } else {
2667 +- pn = scif_get_new_port();
2668 +- if (!pn) {
2669 +- ret = -ENOSPC;
2670 ++ ret = scif_get_new_port();
2671 ++ if (ret < 0)
2672 + goto scif_bind_exit;
2673 +- }
2674 ++ pn = ret;
2675 + }
2676 +
2677 + ep->state = SCIFEP_BOUND;
2678 +@@ -648,13 +647,12 @@ int __scif_connect(scif_epd_t epd, struct scif_port_id *dst, bool non_block)
2679 + err = -EISCONN;
2680 + break;
2681 + case SCIFEP_UNBOUND:
2682 +- ep->port.port = scif_get_new_port();
2683 +- if (!ep->port.port) {
2684 +- err = -ENOSPC;
2685 +- } else {
2686 +- ep->port.node = scif_info.nodeid;
2687 +- ep->conn_async_state = ASYNC_CONN_IDLE;
2688 +- }
2689 ++ err = scif_get_new_port();
2690 ++ if (err < 0)
2691 ++ break;
2692 ++ ep->port.port = err;
2693 ++ ep->port.node = scif_info.nodeid;
2694 ++ ep->conn_async_state = ASYNC_CONN_IDLE;
2695 + /* Fall through */
2696 + case SCIFEP_BOUND:
2697 + /*
2698 +diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
2699 +index 5ec3f5a43718..14a5e9da32bd 100644
2700 +--- a/drivers/misc/ti-st/st_kim.c
2701 ++++ b/drivers/misc/ti-st/st_kim.c
2702 +@@ -756,14 +756,14 @@ static int kim_probe(struct platform_device *pdev)
2703 + err = gpio_request(kim_gdata->nshutdown, "kim");
2704 + if (unlikely(err)) {
2705 + pr_err(" gpio %d request failed ", kim_gdata->nshutdown);
2706 +- return err;
2707 ++ goto err_sysfs_group;
2708 + }
2709 +
2710 + /* Configure nShutdown GPIO as output=0 */
2711 + err = gpio_direction_output(kim_gdata->nshutdown, 0);
2712 + if (unlikely(err)) {
2713 + pr_err(" unable to configure gpio %d", kim_gdata->nshutdown);
2714 +- return err;
2715 ++ goto err_sysfs_group;
2716 + }
2717 + /* get reference of pdev for request_firmware
2718 + */
2719 +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
2720 +index b01d15ec4c56..3e3e6a8f1abc 100644
2721 +--- a/drivers/mtd/nand/raw/nand_base.c
2722 ++++ b/drivers/mtd/nand/raw/nand_base.c
2723 +@@ -2668,8 +2668,8 @@ static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2724 + return subop && instr_idx < subop->ninstrs;
2725 + }
2726 +
2727 +-static int nand_subop_get_start_off(const struct nand_subop *subop,
2728 +- unsigned int instr_idx)
2729 ++static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2730 ++ unsigned int instr_idx)
2731 + {
2732 + if (instr_idx)
2733 + return 0;
2734 +@@ -2688,12 +2688,12 @@ static int nand_subop_get_start_off(const struct nand_subop *subop,
2735 + *
2736 + * Given an address instruction, returns the offset of the first cycle to issue.
2737 + */
2738 +-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2739 +- unsigned int instr_idx)
2740 ++unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2741 ++ unsigned int instr_idx)
2742 + {
2743 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2744 +- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2745 +- return -EINVAL;
2746 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2747 ++ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2748 ++ return 0;
2749 +
2750 + return nand_subop_get_start_off(subop, instr_idx);
2751 + }
2752 +@@ -2710,14 +2710,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2753 + *
2754 + * Given an address instruction, returns the number of address cycle to issue.
2755 + */
2756 +-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2757 +- unsigned int instr_idx)
2758 ++unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2759 ++ unsigned int instr_idx)
2760 + {
2761 + int start_off, end_off;
2762 +
2763 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2764 +- subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR)
2765 +- return -EINVAL;
2766 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2767 ++ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2768 ++ return 0;
2769 +
2770 + start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2771 +
2772 +@@ -2742,12 +2742,12 @@ EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2773 + *
2774 + * Given a data instruction, returns the offset to start from.
2775 + */
2776 +-int nand_subop_get_data_start_off(const struct nand_subop *subop,
2777 +- unsigned int instr_idx)
2778 ++unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2779 ++ unsigned int instr_idx)
2780 + {
2781 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2782 +- !nand_instr_is_data(&subop->instrs[instr_idx]))
2783 +- return -EINVAL;
2784 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2785 ++ !nand_instr_is_data(&subop->instrs[instr_idx])))
2786 ++ return 0;
2787 +
2788 + return nand_subop_get_start_off(subop, instr_idx);
2789 + }
2790 +@@ -2764,14 +2764,14 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2791 + *
2792 + * Returns the length of the chunk of data to send/receive.
2793 + */
2794 +-int nand_subop_get_data_len(const struct nand_subop *subop,
2795 +- unsigned int instr_idx)
2796 ++unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2797 ++ unsigned int instr_idx)
2798 + {
2799 + int start_off = 0, end_off;
2800 +
2801 +- if (!nand_subop_instr_is_valid(subop, instr_idx) ||
2802 +- !nand_instr_is_data(&subop->instrs[instr_idx]))
2803 +- return -EINVAL;
2804 ++ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2805 ++ !nand_instr_is_data(&subop->instrs[instr_idx])))
2806 ++ return 0;
2807 +
2808 + start_off = nand_subop_get_data_start_off(subop, instr_idx);
2809 +
2810 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2811 +index 82ac1d10f239..b4253d0e056b 100644
2812 +--- a/drivers/net/ethernet/marvell/mvneta.c
2813 ++++ b/drivers/net/ethernet/marvell/mvneta.c
2814 +@@ -3196,7 +3196,6 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
2815 +
2816 + on_each_cpu(mvneta_percpu_enable, pp, true);
2817 + mvneta_start_dev(pp);
2818 +- mvneta_port_up(pp);
2819 +
2820 + netdev_update_features(dev);
2821 +
2822 +diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
2823 +index 0c5b68e7da51..9b3167054843 100644
2824 +--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
2825 ++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
2826 +@@ -22,7 +22,7 @@
2827 + #include <linux/mdio-mux.h>
2828 + #include <linux/delay.h>
2829 +
2830 +-#define MDIO_PARAM_OFFSET 0x00
2831 ++#define MDIO_PARAM_OFFSET 0x23c
2832 + #define MDIO_PARAM_MIIM_CYCLE 29
2833 + #define MDIO_PARAM_INTERNAL_SEL 25
2834 + #define MDIO_PARAM_BUS_ID 22
2835 +@@ -30,20 +30,22 @@
2836 + #define MDIO_PARAM_PHY_ID 16
2837 + #define MDIO_PARAM_PHY_DATA 0
2838 +
2839 +-#define MDIO_READ_OFFSET 0x04
2840 ++#define MDIO_READ_OFFSET 0x240
2841 + #define MDIO_READ_DATA_MASK 0xffff
2842 +-#define MDIO_ADDR_OFFSET 0x08
2843 ++#define MDIO_ADDR_OFFSET 0x244
2844 +
2845 +-#define MDIO_CTRL_OFFSET 0x0C
2846 ++#define MDIO_CTRL_OFFSET 0x248
2847 + #define MDIO_CTRL_WRITE_OP 0x1
2848 + #define MDIO_CTRL_READ_OP 0x2
2849 +
2850 +-#define MDIO_STAT_OFFSET 0x10
2851 ++#define MDIO_STAT_OFFSET 0x24c
2852 + #define MDIO_STAT_DONE 1
2853 +
2854 + #define BUS_MAX_ADDR 32
2855 + #define EXT_BUS_START_ADDR 16
2856 +
2857 ++#define MDIO_REG_ADDR_SPACE_SIZE 0x250
2858 ++
2859 + struct iproc_mdiomux_desc {
2860 + void *mux_handle;
2861 + void __iomem *base;
2862 +@@ -169,6 +171,14 @@ static int mdio_mux_iproc_probe(struct platform_device *pdev)
2863 + md->dev = &pdev->dev;
2864 +
2865 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2866 ++ if (res->start & 0xfff) {
2867 ++ /* For backward compatibility in case the
2868 ++ * base address is specified with an offset.
2869 ++ */
2870 ++ dev_info(&pdev->dev, "fix base address in dt-blob\n");
2871 ++ res->start &= ~0xfff;
2872 ++ res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1;
2873 ++ }
2874 + md->base = devm_ioremap_resource(&pdev->dev, res);
2875 + if (IS_ERR(md->base)) {
2876 + dev_err(&pdev->dev, "failed to ioremap register\n");
2877 +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2878 +index 836e0a47b94a..747c6951b5c1 100644
2879 +--- a/drivers/net/wireless/ath/ath10k/mac.c
2880 ++++ b/drivers/net/wireless/ath/ath10k/mac.c
2881 +@@ -3085,6 +3085,13 @@ static int ath10k_update_channel_list(struct ath10k *ar)
2882 + passive = channel->flags & IEEE80211_CHAN_NO_IR;
2883 + ch->passive = passive;
2884 +
2885 ++ /* the firmware is ignoring the "radar" flag of the
2886 ++ * channel and is scanning actively using Probe Requests
2887 ++ * on "Radar detection"/DFS channels which are not
2888 ++ * marked as "available"
2889 ++ */
2890 ++ ch->passive |= ch->chan_radar;
2891 ++
2892 + ch->freq = channel->center_freq;
2893 + ch->band_center_freq1 = channel->center_freq;
2894 + ch->min_power = 0;
2895 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2896 +index 8c49a26fc571..21eb3a598a86 100644
2897 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2898 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2899 +@@ -1584,6 +1584,11 @@ static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
2900 + cfg->keep_alive_pattern_size = __cpu_to_le32(0);
2901 + cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
2902 + cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
2903 ++ cfg->wmi_send_separate = __cpu_to_le32(0);
2904 ++ cfg->num_ocb_vdevs = __cpu_to_le32(0);
2905 ++ cfg->num_ocb_channels = __cpu_to_le32(0);
2906 ++ cfg->num_ocb_schedules = __cpu_to_le32(0);
2907 ++ cfg->host_capab = __cpu_to_le32(0);
2908 +
2909 + ath10k_wmi_put_host_mem_chunks(ar, chunks);
2910 +
2911 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2912 +index 3e1e340cd834..1cb93d09b8a9 100644
2913 +--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2914 ++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
2915 +@@ -1670,6 +1670,11 @@ struct wmi_tlv_resource_config {
2916 + __le32 keep_alive_pattern_size;
2917 + __le32 max_tdls_concurrent_sleep_sta;
2918 + __le32 max_tdls_concurrent_buffer_sta;
2919 ++ __le32 wmi_send_separate;
2920 ++ __le32 num_ocb_vdevs;
2921 ++ __le32 num_ocb_channels;
2922 ++ __le32 num_ocb_schedules;
2923 ++ __le32 host_capab;
2924 + } __packed;
2925 +
2926 + struct wmi_tlv_init_cmd {
2927 +diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
2928 +index e60bea4604e4..fcd9d5eeae72 100644
2929 +--- a/drivers/net/wireless/ath/ath9k/hw.c
2930 ++++ b/drivers/net/wireless/ath/ath9k/hw.c
2931 +@@ -2942,16 +2942,19 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
2932 + struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
2933 + struct ieee80211_channel *channel;
2934 + int chan_pwr, new_pwr;
2935 ++ u16 ctl = NO_CTL;
2936 +
2937 + if (!chan)
2938 + return;
2939 +
2940 ++ if (!test)
2941 ++ ctl = ath9k_regd_get_ctl(reg, chan);
2942 ++
2943 + channel = chan->chan;
2944 + chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
2945 + new_pwr = min_t(int, chan_pwr, reg->power_limit);
2946 +
2947 +- ah->eep_ops->set_txpower(ah, chan,
2948 +- ath9k_regd_get_ctl(reg, chan),
2949 ++ ah->eep_ops->set_txpower(ah, chan, ctl,
2950 + get_antenna_gain(ah, chan), new_pwr, test);
2951 + }
2952 +
2953 +diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2954 +index 7fdb152be0bb..a249ee747dc9 100644
2955 +--- a/drivers/net/wireless/ath/ath9k/xmit.c
2956 ++++ b/drivers/net/wireless/ath/ath9k/xmit.c
2957 +@@ -86,7 +86,8 @@ static void ath_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
2958 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2959 + struct ieee80211_sta *sta = info->status.status_driver_data[0];
2960 +
2961 +- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
2962 ++ if (info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
2963 ++ IEEE80211_TX_STATUS_EOSP)) {
2964 + ieee80211_tx_status(hw, skb);
2965 + return;
2966 + }
2967 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2968 +index 8520523b91b4..d8d8443c1c93 100644
2969 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2970 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2971 +@@ -1003,6 +1003,10 @@ static int iwl_pci_resume(struct device *device)
2972 + if (!trans->op_mode)
2973 + return 0;
2974 +
2975 ++ /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
2976 ++ if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
2977 ++ return 0;
2978 ++
2979 + /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
2980 + iwl_pcie_conf_msix_hw(trans_pcie);
2981 +
2982 +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2983 +index 7229991ae70d..a2a98087eb41 100644
2984 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2985 ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2986 +@@ -1539,18 +1539,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
2987 +
2988 + iwl_pcie_enable_rx_wake(trans, true);
2989 +
2990 +- /*
2991 +- * Reconfigure IVAR table in case of MSIX or reset ict table in
2992 +- * MSI mode since HW reset erased it.
2993 +- * Also enables interrupts - none will happen as
2994 +- * the device doesn't know we're waking it up, only when
2995 +- * the opmode actually tells it after this call.
2996 +- */
2997 +- iwl_pcie_conf_msix_hw(trans_pcie);
2998 +- if (!trans_pcie->msix_enabled)
2999 +- iwl_pcie_reset_ict(trans);
3000 +- iwl_enable_interrupts(trans);
3001 +-
3002 + iwl_set_bit(trans, CSR_GP_CNTRL,
3003 + BIT(trans->cfg->csr->flag_mac_access_req));
3004 + iwl_set_bit(trans, CSR_GP_CNTRL,
3005 +@@ -1568,6 +1556,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
3006 + return ret;
3007 + }
3008 +
3009 ++ /*
3010 ++ * Reconfigure IVAR table in case of MSIX or reset ict table in
3011 ++ * MSI mode since HW reset erased it.
3012 ++ * Also enables interrupts - none will happen as
3013 ++ * the device doesn't know we're waking it up, only when
3014 ++ * the opmode actually tells it after this call.
3015 ++ */
3016 ++ iwl_pcie_conf_msix_hw(trans_pcie);
3017 ++ if (!trans_pcie->msix_enabled)
3018 ++ iwl_pcie_reset_ict(trans);
3019 ++ iwl_enable_interrupts(trans);
3020 ++
3021 + iwl_pcie_set_pwr(trans, false);
3022 +
3023 + if (!reset) {
3024 +diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
3025 +index 0f15696195f8..078a4940bc5c 100644
3026 +--- a/drivers/net/wireless/ti/wlcore/rx.c
3027 ++++ b/drivers/net/wireless/ti/wlcore/rx.c
3028 +@@ -59,7 +59,7 @@ static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
3029 + static void wl1271_rx_status(struct wl1271 *wl,
3030 + struct wl1271_rx_descriptor *desc,
3031 + struct ieee80211_rx_status *status,
3032 +- u8 beacon)
3033 ++ u8 beacon, u8 probe_rsp)
3034 + {
3035 + memset(status, 0, sizeof(struct ieee80211_rx_status));
3036 +
3037 +@@ -106,6 +106,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
3038 + }
3039 + }
3040 +
3041 ++ if (beacon || probe_rsp)
3042 ++ status->boottime_ns = ktime_get_boot_ns();
3043 ++
3044 + if (beacon)
3045 + wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
3046 + status->band);
3047 +@@ -191,7 +194,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
3048 + if (ieee80211_is_data_present(hdr->frame_control))
3049 + is_data = 1;
3050 +
3051 +- wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
3052 ++ wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
3053 ++ ieee80211_is_probe_resp(hdr->frame_control));
3054 + wlcore_hw_set_rx_csum(wl, desc, skb);
3055 +
3056 + seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
3057 +diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
3058 +index cf0aa7cee5b0..a939e8d31735 100644
3059 +--- a/drivers/pci/controller/pcie-mobiveil.c
3060 ++++ b/drivers/pci/controller/pcie-mobiveil.c
3061 +@@ -23,6 +23,8 @@
3062 + #include <linux/platform_device.h>
3063 + #include <linux/slab.h>
3064 +
3065 ++#include "../pci.h"
3066 ++
3067 + /* register offsets and bit positions */
3068 +
3069 + /*
3070 +@@ -130,7 +132,7 @@ struct mobiveil_pcie {
3071 + void __iomem *config_axi_slave_base; /* endpoint config base */
3072 + void __iomem *csr_axi_slave_base; /* root port config base */
3073 + void __iomem *apb_csr_base; /* MSI register base */
3074 +- void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */
3075 ++ phys_addr_t pcie_reg_base; /* Physical PCIe Controller Base */
3076 + struct irq_domain *intx_domain;
3077 + raw_spinlock_t intx_mask_lock;
3078 + int irq;
3079 +diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
3080 +index 47cd0c037433..f96af1467984 100644
3081 +--- a/drivers/pci/switch/switchtec.c
3082 ++++ b/drivers/pci/switch/switchtec.c
3083 +@@ -14,6 +14,8 @@
3084 + #include <linux/poll.h>
3085 + #include <linux/wait.h>
3086 +
3087 ++#include <linux/nospec.h>
3088 ++
3089 + MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver");
3090 + MODULE_VERSION("0.1");
3091 + MODULE_LICENSE("GPL");
3092 +@@ -909,6 +911,8 @@ static int ioctl_port_to_pff(struct switchtec_dev *stdev,
3093 + default:
3094 + if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id))
3095 + return -EINVAL;
3096 ++ p.port = array_index_nospec(p.port,
3097 ++ ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1);
3098 + p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]);
3099 + break;
3100 + }
3101 +diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
3102 +index d6d183e9db17..b5903fffb3d0 100644
3103 +--- a/drivers/pinctrl/berlin/berlin.c
3104 ++++ b/drivers/pinctrl/berlin/berlin.c
3105 +@@ -216,10 +216,8 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
3106 + }
3107 +
3108 + /* we will reallocate later */
3109 +- pctrl->functions = devm_kcalloc(&pdev->dev,
3110 +- max_functions,
3111 +- sizeof(*pctrl->functions),
3112 +- GFP_KERNEL);
3113 ++ pctrl->functions = kcalloc(max_functions,
3114 ++ sizeof(*pctrl->functions), GFP_KERNEL);
3115 + if (!pctrl->functions)
3116 + return -ENOMEM;
3117 +
3118 +@@ -257,8 +255,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
3119 + function++;
3120 + }
3121 +
3122 +- if (!found)
3123 ++ if (!found) {
3124 ++ kfree(pctrl->functions);
3125 + return -EINVAL;
3126 ++ }
3127 +
3128 + if (!function->groups) {
3129 + function->groups =
3130 +@@ -267,8 +267,10 @@ static int berlin_pinctrl_build_state(struct platform_device *pdev)
3131 + sizeof(char *),
3132 + GFP_KERNEL);
3133 +
3134 +- if (!function->groups)
3135 ++ if (!function->groups) {
3136 ++ kfree(pctrl->functions);
3137 + return -ENOMEM;
3138 ++ }
3139 + }
3140 +
3141 + groups = function->groups;
3142 +diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
3143 +index 1c6bb15579e1..b04edc22dad7 100644
3144 +--- a/drivers/pinctrl/freescale/pinctrl-imx.c
3145 ++++ b/drivers/pinctrl/freescale/pinctrl-imx.c
3146 +@@ -383,7 +383,7 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
3147 + const char *name;
3148 + int i, ret;
3149 +
3150 +- if (group > pctldev->num_groups)
3151 ++ if (group >= pctldev->num_groups)
3152 + return;
3153 +
3154 + seq_puts(s, "\n");
3155 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
3156 +index 04ae139671c8..b91db89eb924 100644
3157 +--- a/drivers/pinctrl/pinctrl-amd.c
3158 ++++ b/drivers/pinctrl/pinctrl-amd.c
3159 +@@ -552,7 +552,8 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id)
3160 + /* Each status bit covers four pins */
3161 + for (i = 0; i < 4; i++) {
3162 + regval = readl(regs + i);
3163 +- if (!(regval & PIN_IRQ_PENDING))
3164 ++ if (!(regval & PIN_IRQ_PENDING) ||
3165 ++ !(regval & BIT(INTERRUPT_MASK_OFF)))
3166 + continue;
3167 + irq = irq_find_mapping(gc->irq.domain, irqnr + i);
3168 + generic_handle_irq(irq);
3169 +diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
3170 +index fc12badf3805..d84fab616abf 100644
3171 +--- a/drivers/regulator/tps65217-regulator.c
3172 ++++ b/drivers/regulator/tps65217-regulator.c
3173 +@@ -232,6 +232,8 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
3174 + tps->strobes = devm_kcalloc(&pdev->dev,
3175 + TPS65217_NUM_REGULATOR, sizeof(u8),
3176 + GFP_KERNEL);
3177 ++ if (!tps->strobes)
3178 ++ return -ENOMEM;
3179 +
3180 + platform_set_drvdata(pdev, tps);
3181 +
3182 +diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
3183 +index b714a543a91d..8122807db380 100644
3184 +--- a/drivers/rpmsg/rpmsg_core.c
3185 ++++ b/drivers/rpmsg/rpmsg_core.c
3186 +@@ -15,6 +15,7 @@
3187 + #include <linux/module.h>
3188 + #include <linux/rpmsg.h>
3189 + #include <linux/of_device.h>
3190 ++#include <linux/pm_domain.h>
3191 + #include <linux/slab.h>
3192 +
3193 + #include "rpmsg_internal.h"
3194 +@@ -449,6 +450,10 @@ static int rpmsg_dev_probe(struct device *dev)
3195 + struct rpmsg_endpoint *ept = NULL;
3196 + int err;
3197 +
3198 ++ err = dev_pm_domain_attach(dev, true);
3199 ++ if (err)
3200 ++ goto out;
3201 ++
3202 + if (rpdrv->callback) {
3203 + strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
3204 + chinfo.src = rpdev->src;
3205 +@@ -490,6 +495,8 @@ static int rpmsg_dev_remove(struct device *dev)
3206 +
3207 + rpdrv->remove(rpdev);
3208 +
3209 ++ dev_pm_domain_detach(dev, true);
3210 ++
3211 + if (rpdev->ept)
3212 + rpmsg_destroy_ept(rpdev->ept);
3213 +
3214 +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
3215 +index 99ba4a770406..27521fc3ef5a 100644
3216 +--- a/drivers/scsi/3w-9xxx.c
3217 ++++ b/drivers/scsi/3w-9xxx.c
3218 +@@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3219 +
3220 + if (twa_initialize_device_extension(tw_dev)) {
3221 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
3222 ++ retval = -ENOMEM;
3223 + goto out_free_device_extension;
3224 + }
3225 +
3226 +@@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3227 + tw_dev->base_addr = ioremap(mem_addr, mem_len);
3228 + if (!tw_dev->base_addr) {
3229 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
3230 ++ retval = -ENOMEM;
3231 + goto out_release_mem_region;
3232 + }
3233 +
3234 +@@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3235 + TW_DISABLE_INTERRUPTS(tw_dev);
3236 +
3237 + /* Initialize the card */
3238 +- if (twa_reset_sequence(tw_dev, 0))
3239 ++ if (twa_reset_sequence(tw_dev, 0)) {
3240 ++ retval = -ENOMEM;
3241 + goto out_iounmap;
3242 ++ }
3243 +
3244 + /* Set host specific parameters */
3245 + if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
3246 +diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
3247 +index cf9f2a09b47d..40c1e6e64f58 100644
3248 +--- a/drivers/scsi/3w-sas.c
3249 ++++ b/drivers/scsi/3w-sas.c
3250 +@@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3251 +
3252 + if (twl_initialize_device_extension(tw_dev)) {
3253 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
3254 ++ retval = -ENOMEM;
3255 + goto out_free_device_extension;
3256 + }
3257 +
3258 +@@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3259 + tw_dev->base_addr = pci_iomap(pdev, 1, 0);
3260 + if (!tw_dev->base_addr) {
3261 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
3262 ++ retval = -ENOMEM;
3263 + goto out_release_mem_region;
3264 + }
3265 +
3266 +@@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3267 + /* Initialize the card */
3268 + if (twl_reset_sequence(tw_dev, 0)) {
3269 + TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
3270 ++ retval = -ENOMEM;
3271 + goto out_iounmap;
3272 + }
3273 +
3274 +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
3275 +index f6179e3d6953..961ea6f7def8 100644
3276 +--- a/drivers/scsi/3w-xxxx.c
3277 ++++ b/drivers/scsi/3w-xxxx.c
3278 +@@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3279 +
3280 + if (tw_initialize_device_extension(tw_dev)) {
3281 + printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
3282 ++ retval = -ENOMEM;
3283 + goto out_free_device_extension;
3284 + }
3285 +
3286 +@@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
3287 + tw_dev->base_addr = pci_resource_start(pdev, 0);
3288 + if (!tw_dev->base_addr) {
3289 + printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
3290 ++ retval = -ENOMEM;
3291 + goto out_release_mem_region;
3292 + }
3293 +
3294 +diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
3295 +index 20b249a649dd..902004dc8dc7 100644
3296 +--- a/drivers/scsi/lpfc/lpfc.h
3297 ++++ b/drivers/scsi/lpfc/lpfc.h
3298 +@@ -672,7 +672,7 @@ struct lpfc_hba {
3299 + #define LS_NPIV_FAB_SUPPORTED 0x2 /* Fabric supports NPIV */
3300 + #define LS_IGNORE_ERATT 0x4 /* intr handler should ignore ERATT */
3301 + #define LS_MDS_LINK_DOWN 0x8 /* MDS Diagnostics Link Down */
3302 +-#define LS_MDS_LOOPBACK 0x16 /* MDS Diagnostics Link Up (Loopback) */
3303 ++#define LS_MDS_LOOPBACK 0x10 /* MDS Diagnostics Link Up (Loopback) */
3304 +
3305 + uint32_t hba_flag; /* hba generic flags */
3306 + #define HBA_ERATT_HANDLED 0x1 /* This flag is set when eratt handled */
3307 +diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
3308 +index 76a5a99605aa..d723fd1d7b26 100644
3309 +--- a/drivers/scsi/lpfc/lpfc_nvme.c
3310 ++++ b/drivers/scsi/lpfc/lpfc_nvme.c
3311 +@@ -2687,7 +2687,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3312 + struct lpfc_nvme_rport *oldrport;
3313 + struct nvme_fc_remote_port *remote_port;
3314 + struct nvme_fc_port_info rpinfo;
3315 +- struct lpfc_nodelist *prev_ndlp;
3316 ++ struct lpfc_nodelist *prev_ndlp = NULL;
3317 +
3318 + lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
3319 + "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
3320 +@@ -2736,23 +2736,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3321 + spin_unlock_irq(&vport->phba->hbalock);
3322 + rport = remote_port->private;
3323 + if (oldrport) {
3324 ++ /* New remoteport record does not guarantee valid
3325 ++ * host private memory area.
3326 ++ */
3327 ++ prev_ndlp = oldrport->ndlp;
3328 + if (oldrport == remote_port->private) {
3329 +- /* Same remoteport. Just reuse. */
3330 ++ /* Same remoteport - ndlp should match.
3331 ++ * Just reuse.
3332 ++ */
3333 + lpfc_printf_vlog(ndlp->vport, KERN_INFO,
3334 + LOG_NVME_DISC,
3335 + "6014 Rebinding lport to "
3336 + "remoteport %p wwpn 0x%llx, "
3337 +- "Data: x%x x%x %p x%x x%06x\n",
3338 ++ "Data: x%x x%x %p %p x%x x%06x\n",
3339 + remote_port,
3340 + remote_port->port_name,
3341 + remote_port->port_id,
3342 + remote_port->port_role,
3343 ++ prev_ndlp,
3344 + ndlp,
3345 + ndlp->nlp_type,
3346 + ndlp->nlp_DID);
3347 + return 0;
3348 + }
3349 +- prev_ndlp = rport->ndlp;
3350 +
3351 + /* Sever the ndlp<->rport association
3352 + * before dropping the ndlp ref from
3353 +@@ -2786,13 +2792,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3354 + lpfc_printf_vlog(vport, KERN_INFO,
3355 + LOG_NVME_DISC | LOG_NODE,
3356 + "6022 Binding new rport to "
3357 +- "lport %p Remoteport %p WWNN 0x%llx, "
3358 ++ "lport %p Remoteport %p rport %p WWNN 0x%llx, "
3359 + "Rport WWPN 0x%llx DID "
3360 +- "x%06x Role x%x, ndlp %p\n",
3361 +- lport, remote_port,
3362 ++ "x%06x Role x%x, ndlp %p prev_ndlp %p\n",
3363 ++ lport, remote_port, rport,
3364 + rpinfo.node_name, rpinfo.port_name,
3365 + rpinfo.port_id, rpinfo.port_role,
3366 +- ndlp);
3367 ++ ndlp, prev_ndlp);
3368 + } else {
3369 + lpfc_printf_vlog(vport, KERN_ERR,
3370 + LOG_NVME_DISC | LOG_NODE,
3371 +diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
3372 +index ec550ee0108e..75d34def2361 100644
3373 +--- a/drivers/scsi/qla2xxx/qla_init.c
3374 ++++ b/drivers/scsi/qla2xxx/qla_init.c
3375 +@@ -1074,9 +1074,12 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
3376 + case PDS_PLOGI_COMPLETE:
3377 + case PDS_PRLI_PENDING:
3378 + case PDS_PRLI2_PENDING:
3379 +- ql_dbg(ql_dbg_disc, vha, 0x20d5, "%s %d %8phC relogin needed\n",
3380 +- __func__, __LINE__, fcport->port_name);
3381 +- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3382 ++ /* Set discovery state back to GNL to Relogin attempt */
3383 ++ if (qla_dual_mode_enabled(vha) ||
3384 ++ qla_ini_mode_enabled(vha)) {
3385 ++ fcport->disc_state = DSC_GNL;
3386 ++ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
3387 ++ }
3388 + return;
3389 + case PDS_LOGO_PENDING:
3390 + case PDS_PORT_UNAVAILABLE:
3391 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3392 +index 1027b0cb7fa3..6dc1b1bd8069 100644
3393 +--- a/drivers/scsi/qla2xxx/qla_target.c
3394 ++++ b/drivers/scsi/qla2xxx/qla_target.c
3395 +@@ -982,8 +982,9 @@ void qlt_free_session_done(struct work_struct *work)
3396 +
3397 + logo.id = sess->d_id;
3398 + logo.cmd_count = 0;
3399 ++ if (!own)
3400 ++ qlt_send_first_logo(vha, &logo);
3401 + sess->send_els_logo = 0;
3402 +- qlt_send_first_logo(vha, &logo);
3403 + }
3404 +
3405 + if (sess->logout_on_delete && sess->loop_id != FC_NO_LOOP_ID) {
3406 +diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
3407 +index 731ca0d8520a..9f3c263756a8 100644
3408 +--- a/drivers/scsi/qla2xxx/qla_tmpl.c
3409 ++++ b/drivers/scsi/qla2xxx/qla_tmpl.c
3410 +@@ -571,6 +571,15 @@ qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
3411 + }
3412 + break;
3413 +
3414 ++ case T268_BUF_TYPE_REQ_MIRROR:
3415 ++ case T268_BUF_TYPE_RSP_MIRROR:
3416 ++ /*
3417 ++ * Mirror pointers are not implemented in the
3418 ++ * driver, instead shadow pointers are used by
3419 ++ * the drier. Skip these entries.
3420 ++ */
3421 ++ qla27xx_skip_entry(ent, buf);
3422 ++ break;
3423 + default:
3424 + ql_dbg(ql_dbg_async, vha, 0xd02b,
3425 + "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
3426 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3427 +index ee5081ba5313..1fc87a3260cc 100644
3428 +--- a/drivers/target/target_core_transport.c
3429 ++++ b/drivers/target/target_core_transport.c
3430 +@@ -316,6 +316,7 @@ void __transport_register_session(
3431 + {
3432 + const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
3433 + unsigned char buf[PR_REG_ISID_LEN];
3434 ++ unsigned long flags;
3435 +
3436 + se_sess->se_tpg = se_tpg;
3437 + se_sess->fabric_sess_ptr = fabric_sess_ptr;
3438 +@@ -352,7 +353,7 @@ void __transport_register_session(
3439 + se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
3440 + }
3441 +
3442 +- spin_lock_irq(&se_nacl->nacl_sess_lock);
3443 ++ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
3444 + /*
3445 + * The se_nacl->nacl_sess pointer will be set to the
3446 + * last active I_T Nexus for each struct se_node_acl.
3447 +@@ -361,7 +362,7 @@ void __transport_register_session(
3448 +
3449 + list_add_tail(&se_sess->sess_acl_list,
3450 + &se_nacl->acl_sess_list);
3451 +- spin_unlock_irq(&se_nacl->nacl_sess_lock);
3452 ++ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
3453 + }
3454 + list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
3455 +
3456 +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3457 +index d8dc3d22051f..b8dc5efc606b 100644
3458 +--- a/drivers/target/target_core_user.c
3459 ++++ b/drivers/target/target_core_user.c
3460 +@@ -1745,9 +1745,11 @@ static int tcmu_configure_device(struct se_device *dev)
3461 +
3462 + info = &udev->uio_info;
3463 +
3464 ++ mutex_lock(&udev->cmdr_lock);
3465 + udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks),
3466 + sizeof(unsigned long),
3467 + GFP_KERNEL);
3468 ++ mutex_unlock(&udev->cmdr_lock);
3469 + if (!udev->data_bitmap) {
3470 + ret = -ENOMEM;
3471 + goto err_bitmap_alloc;
3472 +@@ -1957,7 +1959,7 @@ static match_table_t tokens = {
3473 + {Opt_hw_block_size, "hw_block_size=%u"},
3474 + {Opt_hw_max_sectors, "hw_max_sectors=%u"},
3475 + {Opt_nl_reply_supported, "nl_reply_supported=%d"},
3476 +- {Opt_max_data_area_mb, "max_data_area_mb=%u"},
3477 ++ {Opt_max_data_area_mb, "max_data_area_mb=%d"},
3478 + {Opt_err, NULL}
3479 + };
3480 +
3481 +@@ -1985,13 +1987,48 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
3482 + return 0;
3483 + }
3484 +
3485 ++static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg)
3486 ++{
3487 ++ int val, ret;
3488 ++
3489 ++ ret = match_int(arg, &val);
3490 ++ if (ret < 0) {
3491 ++ pr_err("match_int() failed for max_data_area_mb=. Error %d.\n",
3492 ++ ret);
3493 ++ return ret;
3494 ++ }
3495 ++
3496 ++ if (val <= 0) {
3497 ++ pr_err("Invalid max_data_area %d.\n", val);
3498 ++ return -EINVAL;
3499 ++ }
3500 ++
3501 ++ mutex_lock(&udev->cmdr_lock);
3502 ++ if (udev->data_bitmap) {
3503 ++ pr_err("Cannot set max_data_area_mb after it has been enabled.\n");
3504 ++ ret = -EINVAL;
3505 ++ goto unlock;
3506 ++ }
3507 ++
3508 ++ udev->max_blocks = TCMU_MBS_TO_BLOCKS(val);
3509 ++ if (udev->max_blocks > tcmu_global_max_blocks) {
3510 ++ pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
3511 ++ val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
3512 ++ udev->max_blocks = tcmu_global_max_blocks;
3513 ++ }
3514 ++
3515 ++unlock:
3516 ++ mutex_unlock(&udev->cmdr_lock);
3517 ++ return ret;
3518 ++}
3519 ++
3520 + static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
3521 + const char *page, ssize_t count)
3522 + {
3523 + struct tcmu_dev *udev = TCMU_DEV(dev);
3524 + char *orig, *ptr, *opts, *arg_p;
3525 + substring_t args[MAX_OPT_ARGS];
3526 +- int ret = 0, token, tmpval;
3527 ++ int ret = 0, token;
3528 +
3529 + opts = kstrdup(page, GFP_KERNEL);
3530 + if (!opts)
3531 +@@ -2044,37 +2081,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
3532 + pr_err("kstrtoint() failed for nl_reply_supported=\n");
3533 + break;
3534 + case Opt_max_data_area_mb:
3535 +- if (dev->export_count) {
3536 +- pr_err("Unable to set max_data_area_mb while exports exist\n");
3537 +- ret = -EINVAL;
3538 +- break;
3539 +- }
3540 +-
3541 +- arg_p = match_strdup(&args[0]);
3542 +- if (!arg_p) {
3543 +- ret = -ENOMEM;
3544 +- break;
3545 +- }
3546 +- ret = kstrtoint(arg_p, 0, &tmpval);
3547 +- kfree(arg_p);
3548 +- if (ret < 0) {
3549 +- pr_err("kstrtoint() failed for max_data_area_mb=\n");
3550 +- break;
3551 +- }
3552 +-
3553 +- if (tmpval <= 0) {
3554 +- pr_err("Invalid max_data_area %d\n", tmpval);
3555 +- ret = -EINVAL;
3556 +- break;
3557 +- }
3558 +-
3559 +- udev->max_blocks = TCMU_MBS_TO_BLOCKS(tmpval);
3560 +- if (udev->max_blocks > tcmu_global_max_blocks) {
3561 +- pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n",
3562 +- tmpval,
3563 +- TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
3564 +- udev->max_blocks = tcmu_global_max_blocks;
3565 +- }
3566 ++ ret = tcmu_set_max_blocks_param(udev, &args[0]);
3567 + break;
3568 + default:
3569 + break;
3570 +diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
3571 +index 45fb284d4c11..e77e63070e99 100644
3572 +--- a/drivers/thermal/rcar_thermal.c
3573 ++++ b/drivers/thermal/rcar_thermal.c
3574 +@@ -598,7 +598,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
3575 + enr_bits |= 3 << (i * 8);
3576 + }
3577 +
3578 +- if (enr_bits)
3579 ++ if (common->base && enr_bits)
3580 + rcar_thermal_common_write(common, ENR, enr_bits);
3581 +
3582 + dev_info(dev, "%d sensor probed\n", i);
3583 +diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
3584 +index 11278836ed12..0bd47007c57f 100644
3585 +--- a/drivers/thermal/thermal_hwmon.c
3586 ++++ b/drivers/thermal/thermal_hwmon.c
3587 +@@ -142,6 +142,7 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
3588 +
3589 + INIT_LIST_HEAD(&hwmon->tz_list);
3590 + strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
3591 ++ strreplace(hwmon->type, '-', '_');
3592 + hwmon->device = hwmon_device_register_with_info(NULL, hwmon->type,
3593 + hwmon, NULL, NULL);
3594 + if (IS_ERR(hwmon->device)) {
3595 +diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
3596 +index bdd17d2aaafd..b121d8f8f3d7 100644
3597 +--- a/drivers/tty/rocket.c
3598 ++++ b/drivers/tty/rocket.c
3599 +@@ -1881,7 +1881,7 @@ static __init int register_PCI(int i, struct pci_dev *dev)
3600 + ByteIO_t UPCIRingInd = 0;
3601 +
3602 + if (!dev || !pci_match_id(rocket_pci_ids, dev) ||
3603 +- pci_enable_device(dev))
3604 ++ pci_enable_device(dev) || i >= NUM_BOARDS)
3605 + return 0;
3606 +
3607 + rcktpt_io_addr[i] = pci_resource_start(dev, 0);
3608 +diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
3609 +index f68c1121fa7c..6c58ad1abd7e 100644
3610 +--- a/drivers/uio/uio.c
3611 ++++ b/drivers/uio/uio.c
3612 +@@ -622,6 +622,12 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
3613 + ssize_t retval;
3614 + s32 irq_on;
3615 +
3616 ++ if (count != sizeof(s32))
3617 ++ return -EINVAL;
3618 ++
3619 ++ if (copy_from_user(&irq_on, buf, count))
3620 ++ return -EFAULT;
3621 ++
3622 + mutex_lock(&idev->info_lock);
3623 + if (!idev->info) {
3624 + retval = -EINVAL;
3625 +@@ -633,21 +639,11 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
3626 + goto out;
3627 + }
3628 +
3629 +- if (count != sizeof(s32)) {
3630 +- retval = -EINVAL;
3631 +- goto out;
3632 +- }
3633 +-
3634 + if (!idev->info->irqcontrol) {
3635 + retval = -ENOSYS;
3636 + goto out;
3637 + }
3638 +
3639 +- if (copy_from_user(&irq_on, buf, count)) {
3640 +- retval = -EFAULT;
3641 +- goto out;
3642 +- }
3643 +-
3644 + retval = idev->info->irqcontrol(idev->info, irq_on);
3645 +
3646 + out:
3647 +@@ -955,8 +951,6 @@ int __uio_register_device(struct module *owner,
3648 + if (ret)
3649 + goto err_uio_dev_add_attributes;
3650 +
3651 +- info->uio_dev = idev;
3652 +-
3653 + if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) {
3654 + /*
3655 + * Note that we deliberately don't use devm_request_irq
3656 +@@ -972,6 +966,7 @@ int __uio_register_device(struct module *owner,
3657 + goto err_request_irq;
3658 + }
3659 +
3660 ++ info->uio_dev = idev;
3661 + return 0;
3662 +
3663 + err_request_irq:
3664 +diff --git a/fs/autofs/autofs_i.h b/fs/autofs/autofs_i.h
3665 +index 9400a9f6318a..5057b9f0f846 100644
3666 +--- a/fs/autofs/autofs_i.h
3667 ++++ b/fs/autofs/autofs_i.h
3668 +@@ -26,6 +26,7 @@
3669 + #include <linux/list.h>
3670 + #include <linux/completion.h>
3671 + #include <linux/file.h>
3672 ++#include <linux/magic.h>
3673 +
3674 + /* This is the range of ioctl() numbers we claim as ours */
3675 + #define AUTOFS_IOC_FIRST AUTOFS_IOC_READY
3676 +@@ -124,7 +125,8 @@ struct autofs_sb_info {
3677 +
3678 + static inline struct autofs_sb_info *autofs_sbi(struct super_block *sb)
3679 + {
3680 +- return (struct autofs_sb_info *)(sb->s_fs_info);
3681 ++ return sb->s_magic != AUTOFS_SUPER_MAGIC ?
3682 ++ NULL : (struct autofs_sb_info *)(sb->s_fs_info);
3683 + }
3684 +
3685 + static inline struct autofs_info *autofs_dentry_ino(struct dentry *dentry)
3686 +diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
3687 +index b51980fc274e..846c052569dd 100644
3688 +--- a/fs/autofs/inode.c
3689 ++++ b/fs/autofs/inode.c
3690 +@@ -10,7 +10,6 @@
3691 + #include <linux/seq_file.h>
3692 + #include <linux/pagemap.h>
3693 + #include <linux/parser.h>
3694 +-#include <linux/magic.h>
3695 +
3696 + #include "autofs_i.h"
3697 +
3698 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3699 +index 53cac20650d8..4ab0bccfa281 100644
3700 +--- a/fs/btrfs/extent-tree.c
3701 ++++ b/fs/btrfs/extent-tree.c
3702 +@@ -5935,7 +5935,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
3703 + * root: the root of the parent directory
3704 + * rsv: block reservation
3705 + * items: the number of items that we need do reservation
3706 +- * qgroup_reserved: used to return the reserved size in qgroup
3707 ++ * use_global_rsv: allow fallback to the global block reservation
3708 + *
3709 + * This function is used to reserve the space for snapshot/subvolume
3710 + * creation and deletion. Those operations are different with the
3711 +@@ -5945,10 +5945,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
3712 + * the space reservation mechanism in start_transaction().
3713 + */
3714 + int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
3715 +- struct btrfs_block_rsv *rsv,
3716 +- int items,
3717 ++ struct btrfs_block_rsv *rsv, int items,
3718 + bool use_global_rsv)
3719 + {
3720 ++ u64 qgroup_num_bytes = 0;
3721 + u64 num_bytes;
3722 + int ret;
3723 + struct btrfs_fs_info *fs_info = root->fs_info;
3724 +@@ -5956,12 +5956,11 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
3725 +
3726 + if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
3727 + /* One for parent inode, two for dir entries */
3728 +- num_bytes = 3 * fs_info->nodesize;
3729 +- ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true);
3730 ++ qgroup_num_bytes = 3 * fs_info->nodesize;
3731 ++ ret = btrfs_qgroup_reserve_meta_prealloc(root,
3732 ++ qgroup_num_bytes, true);
3733 + if (ret)
3734 + return ret;
3735 +- } else {
3736 +- num_bytes = 0;
3737 + }
3738 +
3739 + num_bytes = btrfs_calc_trans_metadata_size(fs_info, items);
3740 +@@ -5973,8 +5972,8 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
3741 + if (ret == -ENOSPC && use_global_rsv)
3742 + ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
3743 +
3744 +- if (ret && num_bytes)
3745 +- btrfs_qgroup_free_meta_prealloc(root, num_bytes);
3746 ++ if (ret && qgroup_num_bytes)
3747 ++ btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes);
3748 +
3749 + return ret;
3750 + }
3751 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3752 +index b077544b5232..f3d6be0c657b 100644
3753 +--- a/fs/btrfs/ioctl.c
3754 ++++ b/fs/btrfs/ioctl.c
3755 +@@ -3463,6 +3463,25 @@ static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 olen,
3756 +
3757 + same_lock_start = min_t(u64, loff, dst_loff);
3758 + same_lock_len = max_t(u64, loff, dst_loff) + len - same_lock_start;
3759 ++ } else {
3760 ++ /*
3761 ++ * If the source and destination inodes are different, the
3762 ++ * source's range end offset matches the source's i_size, that
3763 ++ * i_size is not a multiple of the sector size, and the
3764 ++ * destination range does not go past the destination's i_size,
3765 ++ * we must round down the length to the nearest sector size
3766 ++ * multiple. If we don't do this adjustment we end replacing
3767 ++ * with zeroes the bytes in the range that starts at the
3768 ++ * deduplication range's end offset and ends at the next sector
3769 ++ * size multiple.
3770 ++ */
3771 ++ if (loff + olen == i_size_read(src) &&
3772 ++ dst_loff + len < i_size_read(dst)) {
3773 ++ const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
3774 ++
3775 ++ len = round_down(i_size_read(src), sz) - loff;
3776 ++ olen = len;
3777 ++ }
3778 + }
3779 +
3780 + again:
3781 +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3782 +index 9d02563b2147..44043f809a3c 100644
3783 +--- a/fs/cifs/connect.c
3784 ++++ b/fs/cifs/connect.c
3785 +@@ -2523,7 +2523,7 @@ cifs_setup_ipc(struct cifs_ses *ses, struct smb_vol *volume_info)
3786 + if (tcon == NULL)
3787 + return -ENOMEM;
3788 +
3789 +- snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->serverName);
3790 ++ snprintf(unc, sizeof(unc), "\\\\%s\\IPC$", ses->server->hostname);
3791 +
3792 + /* cannot fail */
3793 + nls_codepage = load_nls_default();
3794 +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3795 +index 9051b9dfd590..d279fa5472db 100644
3796 +--- a/fs/cifs/inode.c
3797 ++++ b/fs/cifs/inode.c
3798 +@@ -469,6 +469,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const char *path,
3799 + oparms.cifs_sb = cifs_sb;
3800 + oparms.desired_access = GENERIC_READ;
3801 + oparms.create_options = CREATE_NOT_DIR;
3802 ++ if (backup_cred(cifs_sb))
3803 ++ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
3804 + oparms.disposition = FILE_OPEN;
3805 + oparms.path = path;
3806 + oparms.fid = &fid;
3807 +diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3808 +index ee6c4a952ce9..5ecbc99f46e4 100644
3809 +--- a/fs/cifs/smb2ops.c
3810 ++++ b/fs/cifs/smb2ops.c
3811 +@@ -626,7 +626,10 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
3812 + oparms.tcon = tcon;
3813 + oparms.desired_access = FILE_READ_ATTRIBUTES;
3814 + oparms.disposition = FILE_OPEN;
3815 +- oparms.create_options = 0;
3816 ++ if (backup_cred(cifs_sb))
3817 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3818 ++ else
3819 ++ oparms.create_options = 0;
3820 + oparms.fid = &fid;
3821 + oparms.reconnect = false;
3822 +
3823 +@@ -775,7 +778,10 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon,
3824 + oparms.tcon = tcon;
3825 + oparms.desired_access = FILE_READ_EA;
3826 + oparms.disposition = FILE_OPEN;
3827 +- oparms.create_options = 0;
3828 ++ if (backup_cred(cifs_sb))
3829 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3830 ++ else
3831 ++ oparms.create_options = 0;
3832 + oparms.fid = &fid;
3833 + oparms.reconnect = false;
3834 +
3835 +@@ -854,7 +860,10 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
3836 + oparms.tcon = tcon;
3837 + oparms.desired_access = FILE_WRITE_EA;
3838 + oparms.disposition = FILE_OPEN;
3839 +- oparms.create_options = 0;
3840 ++ if (backup_cred(cifs_sb))
3841 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3842 ++ else
3843 ++ oparms.create_options = 0;
3844 + oparms.fid = &fid;
3845 + oparms.reconnect = false;
3846 +
3847 +@@ -1460,7 +1469,10 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
3848 + oparms.tcon = tcon;
3849 + oparms.desired_access = FILE_READ_ATTRIBUTES | FILE_READ_DATA;
3850 + oparms.disposition = FILE_OPEN;
3851 +- oparms.create_options = 0;
3852 ++ if (backup_cred(cifs_sb))
3853 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3854 ++ else
3855 ++ oparms.create_options = 0;
3856 + oparms.fid = fid;
3857 + oparms.reconnect = false;
3858 +
3859 +@@ -1735,7 +1747,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
3860 + oparms.tcon = tcon;
3861 + oparms.desired_access = FILE_READ_ATTRIBUTES;
3862 + oparms.disposition = FILE_OPEN;
3863 +- oparms.create_options = 0;
3864 ++ if (backup_cred(cifs_sb))
3865 ++ oparms.create_options = CREATE_OPEN_BACKUP_INTENT;
3866 ++ else
3867 ++ oparms.create_options = 0;
3868 + oparms.fid = &fid;
3869 + oparms.reconnect = false;
3870 +
3871 +@@ -3463,7 +3478,7 @@ struct smb_version_values smb21_values = {
3872 + struct smb_version_values smb3any_values = {
3873 + .version_string = SMB3ANY_VERSION_STRING,
3874 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
3875 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3876 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3877 + .large_lock_type = 0,
3878 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3879 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3880 +@@ -3484,7 +3499,7 @@ struct smb_version_values smb3any_values = {
3881 + struct smb_version_values smbdefault_values = {
3882 + .version_string = SMBDEFAULT_VERSION_STRING,
3883 + .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
3884 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3885 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3886 + .large_lock_type = 0,
3887 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3888 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3889 +@@ -3505,7 +3520,7 @@ struct smb_version_values smbdefault_values = {
3890 + struct smb_version_values smb30_values = {
3891 + .version_string = SMB30_VERSION_STRING,
3892 + .protocol_id = SMB30_PROT_ID,
3893 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3894 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3895 + .large_lock_type = 0,
3896 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3897 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3898 +@@ -3526,7 +3541,7 @@ struct smb_version_values smb30_values = {
3899 + struct smb_version_values smb302_values = {
3900 + .version_string = SMB302_VERSION_STRING,
3901 + .protocol_id = SMB302_PROT_ID,
3902 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3903 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3904 + .large_lock_type = 0,
3905 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3906 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3907 +@@ -3548,7 +3563,7 @@ struct smb_version_values smb302_values = {
3908 + struct smb_version_values smb311_values = {
3909 + .version_string = SMB311_VERSION_STRING,
3910 + .protocol_id = SMB311_PROT_ID,
3911 +- .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
3912 ++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION | SMB2_GLOBAL_CAP_DIRECTORY_LEASING,
3913 + .large_lock_type = 0,
3914 + .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
3915 + .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
3916 +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3917 +index 44e511a35559..82be1dfeca33 100644
3918 +--- a/fs/cifs/smb2pdu.c
3919 ++++ b/fs/cifs/smb2pdu.c
3920 +@@ -2179,6 +2179,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
3921 + if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
3922 + *oplock == SMB2_OPLOCK_LEVEL_NONE)
3923 + req->RequestedOplockLevel = *oplock;
3924 ++ else if (!(server->capabilities & SMB2_GLOBAL_CAP_DIRECTORY_LEASING) &&
3925 ++ (oparms->create_options & CREATE_NOT_FILE))
3926 ++ req->RequestedOplockLevel = *oplock; /* no srv lease support */
3927 + else {
3928 + rc = add_lease_context(server, iov, &n_iov,
3929 + oparms->fid->lease_key, oplock);
3930 +diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
3931 +index 4d8b1de83143..b6f2dc8163e1 100644
3932 +--- a/fs/f2fs/f2fs.h
3933 ++++ b/fs/f2fs/f2fs.h
3934 +@@ -1680,18 +1680,20 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
3935 + sbi->total_valid_block_count -= diff;
3936 + if (!*count) {
3937 + spin_unlock(&sbi->stat_lock);
3938 +- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
3939 + goto enospc;
3940 + }
3941 + }
3942 + spin_unlock(&sbi->stat_lock);
3943 +
3944 +- if (unlikely(release))
3945 ++ if (unlikely(release)) {
3946 ++ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
3947 + dquot_release_reservation_block(inode, release);
3948 ++ }
3949 + f2fs_i_blocks_write(inode, *count, true, true);
3950 + return 0;
3951 +
3952 + enospc:
3953 ++ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
3954 + dquot_release_reservation_block(inode, release);
3955 + return -ENOSPC;
3956 + }
3957 +@@ -1954,8 +1956,13 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
3958 + pgoff_t index, bool for_write)
3959 + {
3960 + #ifdef CONFIG_F2FS_FAULT_INJECTION
3961 +- struct page *page = find_lock_page(mapping, index);
3962 ++ struct page *page;
3963 +
3964 ++ if (!for_write)
3965 ++ page = find_get_page_flags(mapping, index,
3966 ++ FGP_LOCK | FGP_ACCESSED);
3967 ++ else
3968 ++ page = find_lock_page(mapping, index);
3969 + if (page)
3970 + return page;
3971 +
3972 +@@ -2812,7 +2819,7 @@ int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
3973 + int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
3974 + struct writeback_control *wbc,
3975 + bool do_balance, enum iostat_type io_type);
3976 +-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3977 ++int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
3978 + bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
3979 + void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
3980 + void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
3981 +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
3982 +index 3ffa341cf586..4c9f9bcbd2d9 100644
3983 +--- a/fs/f2fs/file.c
3984 ++++ b/fs/f2fs/file.c
3985 +@@ -1882,7 +1882,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
3986 + struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3987 + struct super_block *sb = sbi->sb;
3988 + __u32 in;
3989 +- int ret;
3990 ++ int ret = 0;
3991 +
3992 + if (!capable(CAP_SYS_ADMIN))
3993 + return -EPERM;
3994 +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
3995 +index 9093be6e7a7d..37ab2d10a872 100644
3996 +--- a/fs/f2fs/gc.c
3997 ++++ b/fs/f2fs/gc.c
3998 +@@ -986,7 +986,13 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
3999 + goto next;
4000 +
4001 + sum = page_address(sum_page);
4002 +- f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
4003 ++ if (type != GET_SUM_TYPE((&sum->footer))) {
4004 ++ f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) "
4005 ++ "type [%d, %d] in SSA and SIT",
4006 ++ segno, type, GET_SUM_TYPE((&sum->footer)));
4007 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
4008 ++ goto next;
4009 ++ }
4010 +
4011 + /*
4012 + * this is to avoid deadlock:
4013 +diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
4014 +index 043830be5662..2bcb2d36f024 100644
4015 +--- a/fs/f2fs/inline.c
4016 ++++ b/fs/f2fs/inline.c
4017 +@@ -130,6 +130,16 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
4018 + if (err)
4019 + return err;
4020 +
4021 ++ if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
4022 ++ f2fs_put_dnode(dn);
4023 ++ set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
4024 ++ f2fs_msg(fio.sbi->sb, KERN_WARNING,
4025 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
4026 ++ "run fsck to fix.",
4027 ++ __func__, dn->inode->i_ino, dn->data_blkaddr);
4028 ++ return -EINVAL;
4029 ++ }
4030 ++
4031 + f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
4032 +
4033 + f2fs_do_read_inline_data(page, dn->inode_page);
4034 +@@ -363,6 +373,17 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
4035 + if (err)
4036 + goto out;
4037 +
4038 ++ if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
4039 ++ f2fs_put_dnode(&dn);
4040 ++ set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
4041 ++ f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING,
4042 ++ "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, "
4043 ++ "run fsck to fix.",
4044 ++ __func__, dir->i_ino, dn.data_blkaddr);
4045 ++ err = -EINVAL;
4046 ++ goto out;
4047 ++ }
4048 ++
4049 + f2fs_wait_on_page_writeback(page, DATA, true);
4050 +
4051 + dentry_blk = page_address(page);
4052 +@@ -477,6 +498,7 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
4053 + return 0;
4054 + recover:
4055 + lock_page(ipage);
4056 ++ f2fs_wait_on_page_writeback(ipage, NODE, true);
4057 + memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
4058 + f2fs_i_depth_write(dir, 0);
4059 + f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
4060 +diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
4061 +index f121c864f4c0..cf0f944fcaea 100644
4062 +--- a/fs/f2fs/inode.c
4063 ++++ b/fs/f2fs/inode.c
4064 +@@ -197,6 +197,16 @@ static bool sanity_check_inode(struct inode *inode)
4065 + __func__, inode->i_ino);
4066 + return false;
4067 + }
4068 ++
4069 ++ if (f2fs_has_extra_attr(inode) &&
4070 ++ !f2fs_sb_has_extra_attr(sbi->sb)) {
4071 ++ set_sbi_flag(sbi, SBI_NEED_FSCK);
4072 ++ f2fs_msg(sbi->sb, KERN_WARNING,
4073 ++ "%s: inode (ino=%lx) is with extra_attr, "
4074 ++ "but extra_attr feature is off",
4075 ++ __func__, inode->i_ino);
4076 ++ return false;
4077 ++ }
4078 + return true;
4079 + }
4080 +
4081 +@@ -249,6 +259,11 @@ static int do_read_inode(struct inode *inode)
4082 +
4083 + get_inline_info(inode, ri);
4084 +
4085 ++ if (!sanity_check_inode(inode)) {
4086 ++ f2fs_put_page(node_page, 1);
4087 ++ return -EINVAL;
4088 ++ }
4089 ++
4090 + fi->i_extra_isize = f2fs_has_extra_attr(inode) ?
4091 + le16_to_cpu(ri->i_extra_isize) : 0;
4092 +
4093 +@@ -330,10 +345,6 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
4094 + ret = do_read_inode(inode);
4095 + if (ret)
4096 + goto bad_inode;
4097 +- if (!sanity_check_inode(inode)) {
4098 +- ret = -EINVAL;
4099 +- goto bad_inode;
4100 +- }
4101 + make_now:
4102 + if (ino == F2FS_NODE_INO(sbi)) {
4103 + inode->i_mapping->a_ops = &f2fs_node_aops;
4104 +diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
4105 +index 10643b11bd59..52ed02b0327c 100644
4106 +--- a/fs/f2fs/node.c
4107 ++++ b/fs/f2fs/node.c
4108 +@@ -1633,7 +1633,9 @@ next_step:
4109 + !is_cold_node(page)))
4110 + continue;
4111 + lock_node:
4112 +- if (!trylock_page(page))
4113 ++ if (wbc->sync_mode == WB_SYNC_ALL)
4114 ++ lock_page(page);
4115 ++ else if (!trylock_page(page))
4116 + continue;
4117 +
4118 + if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
4119 +@@ -1968,7 +1970,7 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
4120 + kmem_cache_free(free_nid_slab, i);
4121 + }
4122 +
4123 +-static void scan_nat_page(struct f2fs_sb_info *sbi,
4124 ++static int scan_nat_page(struct f2fs_sb_info *sbi,
4125 + struct page *nat_page, nid_t start_nid)
4126 + {
4127 + struct f2fs_nm_info *nm_i = NM_I(sbi);
4128 +@@ -1986,7 +1988,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
4129 + break;
4130 +
4131 + blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
4132 +- f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
4133 ++
4134 ++ if (blk_addr == NEW_ADDR)
4135 ++ return -EINVAL;
4136 ++
4137 + if (blk_addr == NULL_ADDR) {
4138 + add_free_nid(sbi, start_nid, true, true);
4139 + } else {
4140 +@@ -1995,6 +2000,8 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
4141 + spin_unlock(&NM_I(sbi)->nid_list_lock);
4142 + }
4143 + }
4144 ++
4145 ++ return 0;
4146 + }
4147 +
4148 + static void scan_curseg_cache(struct f2fs_sb_info *sbi)
4149 +@@ -2050,11 +2057,11 @@ out:
4150 + up_read(&nm_i->nat_tree_lock);
4151 + }
4152 +
4153 +-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4154 ++static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4155 + bool sync, bool mount)
4156 + {
4157 + struct f2fs_nm_info *nm_i = NM_I(sbi);
4158 +- int i = 0;
4159 ++ int i = 0, ret;
4160 + nid_t nid = nm_i->next_scan_nid;
4161 +
4162 + if (unlikely(nid >= nm_i->max_nid))
4163 +@@ -2062,17 +2069,17 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4164 +
4165 + /* Enough entries */
4166 + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
4167 +- return;
4168 ++ return 0;
4169 +
4170 + if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
4171 +- return;
4172 ++ return 0;
4173 +
4174 + if (!mount) {
4175 + /* try to find free nids in free_nid_bitmap */
4176 + scan_free_nid_bits(sbi);
4177 +
4178 + if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
4179 +- return;
4180 ++ return 0;
4181 + }
4182 +
4183 + /* readahead nat pages to be scanned */
4184 +@@ -2086,8 +2093,16 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4185 + nm_i->nat_block_bitmap)) {
4186 + struct page *page = get_current_nat_page(sbi, nid);
4187 +
4188 +- scan_nat_page(sbi, page, nid);
4189 ++ ret = scan_nat_page(sbi, page, nid);
4190 + f2fs_put_page(page, 1);
4191 ++
4192 ++ if (ret) {
4193 ++ up_read(&nm_i->nat_tree_lock);
4194 ++ f2fs_bug_on(sbi, !mount);
4195 ++ f2fs_msg(sbi->sb, KERN_ERR,
4196 ++ "NAT is corrupt, run fsck to fix it");
4197 ++ return -EINVAL;
4198 ++ }
4199 + }
4200 +
4201 + nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
4202 +@@ -2108,13 +2123,19 @@ static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
4203 +
4204 + f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
4205 + nm_i->ra_nid_pages, META_NAT, false);
4206 ++
4207 ++ return 0;
4208 + }
4209 +
4210 +-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
4211 ++int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
4212 + {
4213 ++ int ret;
4214 ++
4215 + mutex_lock(&NM_I(sbi)->build_lock);
4216 +- __f2fs_build_free_nids(sbi, sync, mount);
4217 ++ ret = __f2fs_build_free_nids(sbi, sync, mount);
4218 + mutex_unlock(&NM_I(sbi)->build_lock);
4219 ++
4220 ++ return ret;
4221 + }
4222 +
4223 + /*
4224 +@@ -2801,8 +2822,7 @@ int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
4225 + /* load free nid status from nat_bits table */
4226 + load_free_nid_bitmap(sbi);
4227 +
4228 +- f2fs_build_free_nids(sbi, true, true);
4229 +- return 0;
4230 ++ return f2fs_build_free_nids(sbi, true, true);
4231 + }
4232 +
4233 + void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
4234 +diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
4235 +index 38f25f0b193a..ad70e62c5da4 100644
4236 +--- a/fs/f2fs/recovery.c
4237 ++++ b/fs/f2fs/recovery.c
4238 +@@ -241,8 +241,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head,
4239 + struct page *page = NULL;
4240 + block_t blkaddr;
4241 + unsigned int loop_cnt = 0;
4242 +- unsigned int free_blocks = sbi->user_block_count -
4243 +- valid_user_blocks(sbi);
4244 ++ unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
4245 ++ valid_user_blocks(sbi);
4246 + int err = 0;
4247 +
4248 + /* get node pages in the current segment */
4249 +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
4250 +index 9efce174c51a..43fecd5eb252 100644
4251 +--- a/fs/f2fs/segment.c
4252 ++++ b/fs/f2fs/segment.c
4253 +@@ -1643,21 +1643,30 @@ void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
4254 + unsigned int start = 0, end = -1;
4255 + unsigned int secno, start_segno;
4256 + bool force = (cpc->reason & CP_DISCARD);
4257 ++ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
4258 +
4259 + mutex_lock(&dirty_i->seglist_lock);
4260 +
4261 + while (1) {
4262 + int i;
4263 ++
4264 ++ if (need_align && end != -1)
4265 ++ end--;
4266 + start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
4267 + if (start >= MAIN_SEGS(sbi))
4268 + break;
4269 + end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
4270 + start + 1);
4271 +
4272 +- for (i = start; i < end; i++)
4273 +- clear_bit(i, prefree_map);
4274 ++ if (need_align) {
4275 ++ start = rounddown(start, sbi->segs_per_sec);
4276 ++ end = roundup(end, sbi->segs_per_sec);
4277 ++ }
4278 +
4279 +- dirty_i->nr_dirty[PRE] -= end - start;
4280 ++ for (i = start; i < end; i++) {
4281 ++ if (test_and_clear_bit(i, prefree_map))
4282 ++ dirty_i->nr_dirty[PRE]--;
4283 ++ }
4284 +
4285 + if (!test_opt(sbi, DISCARD))
4286 + continue;
4287 +@@ -2437,6 +2446,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
4288 + struct discard_policy dpolicy;
4289 + unsigned long long trimmed = 0;
4290 + int err = 0;
4291 ++ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
4292 +
4293 + if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
4294 + return -EINVAL;
4295 +@@ -2454,6 +2464,10 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
4296 + start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
4297 + end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
4298 + GET_SEGNO(sbi, end);
4299 ++ if (need_align) {
4300 ++ start_segno = rounddown(start_segno, sbi->segs_per_sec);
4301 ++ end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
4302 ++ }
4303 +
4304 + cpc.reason = CP_DISCARD;
4305 + cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
4306 +diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
4307 +index f18fc82fbe99..38c549d77a80 100644
4308 +--- a/fs/f2fs/segment.h
4309 ++++ b/fs/f2fs/segment.h
4310 +@@ -448,6 +448,8 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
4311 + if (test_and_clear_bit(segno, free_i->free_segmap)) {
4312 + free_i->free_segments++;
4313 +
4314 ++ if (IS_CURSEC(sbi, secno))
4315 ++ goto skip_free;
4316 + next = find_next_bit(free_i->free_segmap,
4317 + start_segno + sbi->segs_per_sec, start_segno);
4318 + if (next >= start_segno + sbi->segs_per_sec) {
4319 +@@ -455,6 +457,7 @@ static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
4320 + free_i->free_sections++;
4321 + }
4322 + }
4323 ++skip_free:
4324 + spin_unlock(&free_i->segmap_lock);
4325 + }
4326 +
4327 +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4328 +index 3995e926ba3a..128d489acebb 100644
4329 +--- a/fs/f2fs/super.c
4330 ++++ b/fs/f2fs/super.c
4331 +@@ -2229,9 +2229,9 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
4332 + return 1;
4333 + }
4334 +
4335 +- if (secs_per_zone > total_sections) {
4336 ++ if (secs_per_zone > total_sections || !secs_per_zone) {
4337 + f2fs_msg(sb, KERN_INFO,
4338 +- "Wrong secs_per_zone (%u > %u)",
4339 ++ "Wrong secs_per_zone / total_sections (%u, %u)",
4340 + secs_per_zone, total_sections);
4341 + return 1;
4342 + }
4343 +@@ -2282,12 +2282,17 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4344 + struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
4345 + unsigned int ovp_segments, reserved_segments;
4346 + unsigned int main_segs, blocks_per_seg;
4347 ++ unsigned int sit_segs, nat_segs;
4348 ++ unsigned int sit_bitmap_size, nat_bitmap_size;
4349 ++ unsigned int log_blocks_per_seg;
4350 + int i;
4351 +
4352 + total = le32_to_cpu(raw_super->segment_count);
4353 + fsmeta = le32_to_cpu(raw_super->segment_count_ckpt);
4354 +- fsmeta += le32_to_cpu(raw_super->segment_count_sit);
4355 +- fsmeta += le32_to_cpu(raw_super->segment_count_nat);
4356 ++ sit_segs = le32_to_cpu(raw_super->segment_count_sit);
4357 ++ fsmeta += sit_segs;
4358 ++ nat_segs = le32_to_cpu(raw_super->segment_count_nat);
4359 ++ fsmeta += nat_segs;
4360 + fsmeta += le32_to_cpu(ckpt->rsvd_segment_count);
4361 + fsmeta += le32_to_cpu(raw_super->segment_count_ssa);
4362 +
4363 +@@ -2318,6 +2323,18 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
4364 + return 1;
4365 + }
4366 +
4367 ++ sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
4368 ++ nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
4369 ++ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
4370 ++
4371 ++ if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
4372 ++ nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
4373 ++ f2fs_msg(sbi->sb, KERN_ERR,
4374 ++ "Wrong bitmap size: sit: %u, nat:%u",
4375 ++ sit_bitmap_size, nat_bitmap_size);
4376 ++ return 1;
4377 ++ }
4378 ++
4379 + if (unlikely(f2fs_cp_error(sbi))) {
4380 + f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
4381 + return 1;
4382 +diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
4383 +index 2e7e611deaef..bca1236fd6fa 100644
4384 +--- a/fs/f2fs/sysfs.c
4385 ++++ b/fs/f2fs/sysfs.c
4386 +@@ -9,6 +9,7 @@
4387 + * it under the terms of the GNU General Public License version 2 as
4388 + * published by the Free Software Foundation.
4389 + */
4390 ++#include <linux/compiler.h>
4391 + #include <linux/proc_fs.h>
4392 + #include <linux/f2fs_fs.h>
4393 + #include <linux/seq_file.h>
4394 +@@ -286,8 +287,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
4395 + bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
4396 + a->struct_type == GC_THREAD);
4397 +
4398 +- if (gc_entry)
4399 +- down_read(&sbi->sb->s_umount);
4400 ++ if (gc_entry) {
4401 ++ if (!down_read_trylock(&sbi->sb->s_umount))
4402 ++ return -EAGAIN;
4403 ++ }
4404 + ret = __sbi_store(a, sbi, buf, count);
4405 + if (gc_entry)
4406 + up_read(&sbi->sb->s_umount);
4407 +@@ -516,7 +519,8 @@ static struct kobject f2fs_feat = {
4408 + .kset = &f2fs_kset,
4409 + };
4410 +
4411 +-static int segment_info_seq_show(struct seq_file *seq, void *offset)
4412 ++static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
4413 ++ void *offset)
4414 + {
4415 + struct super_block *sb = seq->private;
4416 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
4417 +@@ -543,7 +547,8 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset)
4418 + return 0;
4419 + }
4420 +
4421 +-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
4422 ++static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
4423 ++ void *offset)
4424 + {
4425 + struct super_block *sb = seq->private;
4426 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
4427 +@@ -567,7 +572,8 @@ static int segment_bits_seq_show(struct seq_file *seq, void *offset)
4428 + return 0;
4429 + }
4430 +
4431 +-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
4432 ++static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
4433 ++ void *offset)
4434 + {
4435 + struct super_block *sb = seq->private;
4436 + struct f2fs_sb_info *sbi = F2FS_SB(sb);
4437 +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
4438 +index 5d57e818d0c3..6d049dfddb14 100644
4439 +--- a/fs/nfs/callback_proc.c
4440 ++++ b/fs/nfs/callback_proc.c
4441 +@@ -215,9 +215,9 @@ static u32 pnfs_check_callback_stateid(struct pnfs_layout_hdr *lo,
4442 + {
4443 + u32 oldseq, newseq;
4444 +
4445 +- /* Is the stateid still not initialised? */
4446 ++ /* Is the stateid not initialised? */
4447 + if (!pnfs_layout_is_valid(lo))
4448 +- return NFS4ERR_DELAY;
4449 ++ return NFS4ERR_NOMATCHING_LAYOUT;
4450 +
4451 + /* Mismatched stateid? */
4452 + if (!nfs4_stateid_match_other(&lo->plh_stateid, new))
4453 +diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
4454 +index a813979b5be0..cb905c0e606c 100644
4455 +--- a/fs/nfs/callback_xdr.c
4456 ++++ b/fs/nfs/callback_xdr.c
4457 +@@ -883,16 +883,21 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp)
4458 +
4459 + if (hdr_arg.minorversion == 0) {
4460 + cps.clp = nfs4_find_client_ident(SVC_NET(rqstp), hdr_arg.cb_ident);
4461 +- if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp))
4462 ++ if (!cps.clp || !check_gss_callback_principal(cps.clp, rqstp)) {
4463 ++ if (cps.clp)
4464 ++ nfs_put_client(cps.clp);
4465 + goto out_invalidcred;
4466 ++ }
4467 + }
4468 +
4469 + cps.minorversion = hdr_arg.minorversion;
4470 + hdr_res.taglen = hdr_arg.taglen;
4471 + hdr_res.tag = hdr_arg.tag;
4472 +- if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
4473 ++ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0) {
4474 ++ if (cps.clp)
4475 ++ nfs_put_client(cps.clp);
4476 + return rpc_system_err;
4477 +-
4478 ++ }
4479 + while (status == 0 && nops != hdr_arg.nops) {
4480 + status = process_op(nops, rqstp, &xdr_in,
4481 + rqstp->rq_argp, &xdr_out, rqstp->rq_resp,
4482 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
4483 +index 979631411a0e..d7124fb12041 100644
4484 +--- a/fs/nfs/nfs4client.c
4485 ++++ b/fs/nfs/nfs4client.c
4486 +@@ -1127,7 +1127,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
4487 + nfs_server_copy_userdata(server, parent_server);
4488 +
4489 + /* Get a client representation */
4490 +-#ifdef CONFIG_SUNRPC_XPRT_RDMA
4491 ++#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
4492 + rpc_set_port(data->addr, NFS_RDMA_PORT);
4493 + error = nfs4_set_client(server, data->hostname,
4494 + data->addr,
4495 +@@ -1139,7 +1139,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
4496 + parent_client->cl_net);
4497 + if (!error)
4498 + goto init_server;
4499 +-#endif /* CONFIG_SUNRPC_XPRT_RDMA */
4500 ++#endif /* IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA) */
4501 +
4502 + rpc_set_port(data->addr, NFS_PORT);
4503 + error = nfs4_set_client(server, data->hostname,
4504 +@@ -1153,7 +1153,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
4505 + if (error < 0)
4506 + goto error;
4507 +
4508 +-#ifdef CONFIG_SUNRPC_XPRT_RDMA
4509 ++#if IS_ENABLED(CONFIG_SUNRPC_XPRT_RDMA)
4510 + init_server:
4511 + #endif
4512 + error = nfs_init_server_rpcclient(server, parent_server->client->cl_timeout, data->authflavor);
4513 +diff --git a/include/linux/hid.h b/include/linux/hid.h
4514 +index 773bcb1d4044..5482dd6ae9ef 100644
4515 +--- a/include/linux/hid.h
4516 ++++ b/include/linux/hid.h
4517 +@@ -520,6 +520,7 @@ struct hid_input {
4518 + const char *name;
4519 + bool registered;
4520 + struct list_head reports; /* the list of reports */
4521 ++ unsigned int application; /* application usage for this input */
4522 + };
4523 +
4524 + enum hid_type {
4525 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
4526 +index 22651e124071..a590419e46c5 100644
4527 +--- a/include/linux/mm_types.h
4528 ++++ b/include/linux/mm_types.h
4529 +@@ -340,7 +340,7 @@ struct kioctx_table;
4530 + struct mm_struct {
4531 + struct vm_area_struct *mmap; /* list of VMAs */
4532 + struct rb_root mm_rb;
4533 +- u32 vmacache_seqnum; /* per-thread vmacache */
4534 ++ u64 vmacache_seqnum; /* per-thread vmacache */
4535 + #ifdef CONFIG_MMU
4536 + unsigned long (*get_unmapped_area) (struct file *filp,
4537 + unsigned long addr, unsigned long len,
4538 +diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
4539 +index 5fe87687664c..d7016dcb245e 100644
4540 +--- a/include/linux/mm_types_task.h
4541 ++++ b/include/linux/mm_types_task.h
4542 +@@ -32,7 +32,7 @@
4543 + #define VMACACHE_MASK (VMACACHE_SIZE - 1)
4544 +
4545 + struct vmacache {
4546 +- u32 seqnum;
4547 ++ u64 seqnum;
4548 + struct vm_area_struct *vmas[VMACACHE_SIZE];
4549 + };
4550 +
4551 +diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
4552 +index 3e8ec3b8a39c..87c635d6c773 100644
4553 +--- a/include/linux/mtd/rawnand.h
4554 ++++ b/include/linux/mtd/rawnand.h
4555 +@@ -986,14 +986,14 @@ struct nand_subop {
4556 + unsigned int last_instr_end_off;
4557 + };
4558 +
4559 +-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
4560 +- unsigned int op_id);
4561 +-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
4562 +- unsigned int op_id);
4563 +-int nand_subop_get_data_start_off(const struct nand_subop *subop,
4564 +- unsigned int op_id);
4565 +-int nand_subop_get_data_len(const struct nand_subop *subop,
4566 +- unsigned int op_id);
4567 ++unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
4568 ++ unsigned int op_id);
4569 ++unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
4570 ++ unsigned int op_id);
4571 ++unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
4572 ++ unsigned int op_id);
4573 ++unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
4574 ++ unsigned int op_id);
4575 +
4576 + /**
4577 + * struct nand_op_parser_addr_constraints - Constraints for address instructions
4578 +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
4579 +index 5c7f010676a7..47a3441cf4c4 100644
4580 +--- a/include/linux/vm_event_item.h
4581 ++++ b/include/linux/vm_event_item.h
4582 +@@ -105,7 +105,6 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
4583 + #ifdef CONFIG_DEBUG_VM_VMACACHE
4584 + VMACACHE_FIND_CALLS,
4585 + VMACACHE_FIND_HITS,
4586 +- VMACACHE_FULL_FLUSHES,
4587 + #endif
4588 + #ifdef CONFIG_SWAP
4589 + SWAP_RA,
4590 +diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
4591 +index a5b3aa8d281f..a09b28f76460 100644
4592 +--- a/include/linux/vmacache.h
4593 ++++ b/include/linux/vmacache.h
4594 +@@ -16,7 +16,6 @@ static inline void vmacache_flush(struct task_struct *tsk)
4595 + memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
4596 + }
4597 +
4598 +-extern void vmacache_flush_all(struct mm_struct *mm);
4599 + extern void vmacache_update(unsigned long addr, struct vm_area_struct *newvma);
4600 + extern struct vm_area_struct *vmacache_find(struct mm_struct *mm,
4601 + unsigned long addr);
4602 +@@ -30,10 +29,6 @@ extern struct vm_area_struct *vmacache_find_exact(struct mm_struct *mm,
4603 + static inline void vmacache_invalidate(struct mm_struct *mm)
4604 + {
4605 + mm->vmacache_seqnum++;
4606 +-
4607 +- /* deal with overflows */
4608 +- if (unlikely(mm->vmacache_seqnum == 0))
4609 +- vmacache_flush_all(mm);
4610 + }
4611 +
4612 + #endif /* __LINUX_VMACACHE_H */
4613 +diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
4614 +index 7363f18e65a5..813282cc8af6 100644
4615 +--- a/include/uapi/linux/ethtool.h
4616 ++++ b/include/uapi/linux/ethtool.h
4617 +@@ -902,13 +902,13 @@ struct ethtool_rx_flow_spec {
4618 + static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
4619 + {
4620 + return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
4621 +-};
4622 ++}
4623 +
4624 + static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
4625 + {
4626 + return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
4627 + ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
4628 +-};
4629 ++}
4630 +
4631 + /**
4632 + * struct ethtool_rxnfc - command to get or set RX flow classification rules
4633 +diff --git a/kernel/cpu.c b/kernel/cpu.c
4634 +index f80afc674f02..517907b082df 100644
4635 +--- a/kernel/cpu.c
4636 ++++ b/kernel/cpu.c
4637 +@@ -608,15 +608,15 @@ static void cpuhp_thread_fun(unsigned int cpu)
4638 + bool bringup = st->bringup;
4639 + enum cpuhp_state state;
4640 +
4641 ++ if (WARN_ON_ONCE(!st->should_run))
4642 ++ return;
4643 ++
4644 + /*
4645 + * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
4646 + * that if we see ->should_run we also see the rest of the state.
4647 + */
4648 + smp_mb();
4649 +
4650 +- if (WARN_ON_ONCE(!st->should_run))
4651 +- return;
4652 +-
4653 + cpuhp_lock_acquire(bringup);
4654 +
4655 + if (st->single) {
4656 +@@ -928,7 +928,8 @@ static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
4657 + ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
4658 + if (ret) {
4659 + st->target = prev_state;
4660 +- undo_cpu_down(cpu, st);
4661 ++ if (st->state < prev_state)
4662 ++ undo_cpu_down(cpu, st);
4663 + break;
4664 + }
4665 + }
4666 +@@ -981,7 +982,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
4667 + * to do the further cleanups.
4668 + */
4669 + ret = cpuhp_down_callbacks(cpu, st, target);
4670 +- if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4671 ++ if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
4672 + cpuhp_reset_state(st, prev_state);
4673 + __cpuhp_kick_ap(st);
4674 + }
4675 +diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
4676 +index f89a78e2792b..443941aa784e 100644
4677 +--- a/kernel/time/clocksource.c
4678 ++++ b/kernel/time/clocksource.c
4679 +@@ -129,19 +129,40 @@ static void inline clocksource_watchdog_unlock(unsigned long *flags)
4680 + spin_unlock_irqrestore(&watchdog_lock, *flags);
4681 + }
4682 +
4683 ++static int clocksource_watchdog_kthread(void *data);
4684 ++static void __clocksource_change_rating(struct clocksource *cs, int rating);
4685 ++
4686 + /*
4687 + * Interval: 0.5sec Threshold: 0.0625s
4688 + */
4689 + #define WATCHDOG_INTERVAL (HZ >> 1)
4690 + #define WATCHDOG_THRESHOLD (NSEC_PER_SEC >> 4)
4691 +
4692 ++static void clocksource_watchdog_work(struct work_struct *work)
4693 ++{
4694 ++ /*
4695 ++ * We cannot directly run clocksource_watchdog_kthread() here, because
4696 ++ * clocksource_select() calls timekeeping_notify() which uses
4697 ++ * stop_machine(). One cannot use stop_machine() from a workqueue() due
4698 ++ * lock inversions wrt CPU hotplug.
4699 ++ *
4700 ++ * Also, we only ever run this work once or twice during the lifetime
4701 ++ * of the kernel, so there is no point in creating a more permanent
4702 ++ * kthread for this.
4703 ++ *
4704 ++ * If kthread_run fails the next watchdog scan over the
4705 ++ * watchdog_list will find the unstable clock again.
4706 ++ */
4707 ++ kthread_run(clocksource_watchdog_kthread, NULL, "kwatchdog");
4708 ++}
4709 ++
4710 + static void __clocksource_unstable(struct clocksource *cs)
4711 + {
4712 + cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
4713 + cs->flags |= CLOCK_SOURCE_UNSTABLE;
4714 +
4715 + /*
4716 +- * If the clocksource is registered clocksource_watchdog_work() will
4717 ++ * If the clocksource is registered clocksource_watchdog_kthread() will
4718 + * re-rate and re-select.
4719 + */
4720 + if (list_empty(&cs->list)) {
4721 +@@ -152,7 +173,7 @@ static void __clocksource_unstable(struct clocksource *cs)
4722 + if (cs->mark_unstable)
4723 + cs->mark_unstable(cs);
4724 +
4725 +- /* kick clocksource_watchdog_work() */
4726 ++ /* kick clocksource_watchdog_kthread() */
4727 + if (finished_booting)
4728 + schedule_work(&watchdog_work);
4729 + }
4730 +@@ -162,7 +183,7 @@ static void __clocksource_unstable(struct clocksource *cs)
4731 + * @cs: clocksource to be marked unstable
4732 + *
4733 + * This function is called by the x86 TSC code to mark clocksources as unstable;
4734 +- * it defers demotion and re-selection to a work.
4735 ++ * it defers demotion and re-selection to a kthread.
4736 + */
4737 + void clocksource_mark_unstable(struct clocksource *cs)
4738 + {
4739 +@@ -387,9 +408,7 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs)
4740 + }
4741 + }
4742 +
4743 +-static void __clocksource_change_rating(struct clocksource *cs, int rating);
4744 +-
4745 +-static int __clocksource_watchdog_work(void)
4746 ++static int __clocksource_watchdog_kthread(void)
4747 + {
4748 + struct clocksource *cs, *tmp;
4749 + unsigned long flags;
4750 +@@ -414,12 +433,13 @@ static int __clocksource_watchdog_work(void)
4751 + return select;
4752 + }
4753 +
4754 +-static void clocksource_watchdog_work(struct work_struct *work)
4755 ++static int clocksource_watchdog_kthread(void *data)
4756 + {
4757 + mutex_lock(&clocksource_mutex);
4758 +- if (__clocksource_watchdog_work())
4759 ++ if (__clocksource_watchdog_kthread())
4760 + clocksource_select();
4761 + mutex_unlock(&clocksource_mutex);
4762 ++ return 0;
4763 + }
4764 +
4765 + static bool clocksource_is_watchdog(struct clocksource *cs)
4766 +@@ -438,7 +458,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
4767 + static void clocksource_select_watchdog(bool fallback) { }
4768 + static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
4769 + static inline void clocksource_resume_watchdog(void) { }
4770 +-static inline int __clocksource_watchdog_work(void) { return 0; }
4771 ++static inline int __clocksource_watchdog_kthread(void) { return 0; }
4772 + static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
4773 + void clocksource_mark_unstable(struct clocksource *cs) { }
4774 +
4775 +@@ -672,7 +692,7 @@ static int __init clocksource_done_booting(void)
4776 + /*
4777 + * Run the watchdog first to eliminate unstable clock sources
4778 + */
4779 +- __clocksource_watchdog_work();
4780 ++ __clocksource_watchdog_kthread();
4781 + clocksource_select();
4782 + mutex_unlock(&clocksource_mutex);
4783 + return 0;
4784 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
4785 +index cc2d23e6ff61..786f8c014e7e 100644
4786 +--- a/kernel/time/timer.c
4787 ++++ b/kernel/time/timer.c
4788 +@@ -1657,6 +1657,22 @@ static inline void __run_timers(struct timer_base *base)
4789 +
4790 + raw_spin_lock_irq(&base->lock);
4791 +
4792 ++ /*
4793 ++ * timer_base::must_forward_clk must be cleared before running
4794 ++ * timers so that any timer functions that call mod_timer() will
4795 ++ * not try to forward the base. Idle tracking / clock forwarding
4796 ++ * logic is only used with BASE_STD timers.
4797 ++ *
4798 ++ * The must_forward_clk flag is cleared unconditionally also for
4799 ++ * the deferrable base. The deferrable base is not affected by idle
4800 ++ * tracking and never forwarded, so clearing the flag is a NOOP.
4801 ++ *
4802 ++ * The fact that the deferrable base is never forwarded can cause
4803 ++ * large variations in granularity for deferrable timers, but they
4804 ++ * can be deferred for long periods due to idle anyway.
4805 ++ */
4806 ++ base->must_forward_clk = false;
4807 ++
4808 + while (time_after_eq(jiffies, base->clk)) {
4809 +
4810 + levels = collect_expired_timers(base, heads);
4811 +@@ -1676,19 +1692,6 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
4812 + {
4813 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
4814 +
4815 +- /*
4816 +- * must_forward_clk must be cleared before running timers so that any
4817 +- * timer functions that call mod_timer will not try to forward the
4818 +- * base. idle trcking / clock forwarding logic is only used with
4819 +- * BASE_STD timers.
4820 +- *
4821 +- * The deferrable base does not do idle tracking at all, so we do
4822 +- * not forward it. This can result in very large variations in
4823 +- * granularity for deferrable timers, but they can be deferred for
4824 +- * long periods due to idle.
4825 +- */
4826 +- base->must_forward_clk = false;
4827 +-
4828 + __run_timers(base);
4829 + if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
4830 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
4831 +diff --git a/mm/debug.c b/mm/debug.c
4832 +index 38c926520c97..bd10aad8539a 100644
4833 +--- a/mm/debug.c
4834 ++++ b/mm/debug.c
4835 +@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
4836 +
4837 + void dump_mm(const struct mm_struct *mm)
4838 + {
4839 +- pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
4840 ++ pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
4841 + #ifdef CONFIG_MMU
4842 + "get_unmapped_area %px\n"
4843 + #endif
4844 +@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
4845 + "tlb_flush_pending %d\n"
4846 + "def_flags: %#lx(%pGv)\n",
4847 +
4848 +- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
4849 ++ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
4850 + #ifdef CONFIG_MMU
4851 + mm->get_unmapped_area,
4852 + #endif
4853 +diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4854 +index 7deb49f69e27..785252397e35 100644
4855 +--- a/mm/memory_hotplug.c
4856 ++++ b/mm/memory_hotplug.c
4857 +@@ -1341,7 +1341,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
4858 + if (__PageMovable(page))
4859 + return pfn;
4860 + if (PageHuge(page)) {
4861 +- if (page_huge_active(page))
4862 ++ if (hugepage_migration_supported(page_hstate(page)) &&
4863 ++ page_huge_active(page))
4864 + return pfn;
4865 + else
4866 + pfn = round_up(pfn + 1,
4867 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
4868 +index 3222193c46c6..65f2e6481c99 100644
4869 +--- a/mm/page_alloc.c
4870 ++++ b/mm/page_alloc.c
4871 +@@ -7649,6 +7649,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
4872 + * handle each tail page individually in migration.
4873 + */
4874 + if (PageHuge(page)) {
4875 ++
4876 ++ if (!hugepage_migration_supported(page_hstate(page)))
4877 ++ goto unmovable;
4878 ++
4879 + iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
4880 + continue;
4881 + }
4882 +diff --git a/mm/vmacache.c b/mm/vmacache.c
4883 +index db7596eb6132..f1729617dc85 100644
4884 +--- a/mm/vmacache.c
4885 ++++ b/mm/vmacache.c
4886 +@@ -7,44 +7,6 @@
4887 + #include <linux/mm.h>
4888 + #include <linux/vmacache.h>
4889 +
4890 +-/*
4891 +- * Flush vma caches for threads that share a given mm.
4892 +- *
4893 +- * The operation is safe because the caller holds the mmap_sem
4894 +- * exclusively and other threads accessing the vma cache will
4895 +- * have mmap_sem held at least for read, so no extra locking
4896 +- * is required to maintain the vma cache.
4897 +- */
4898 +-void vmacache_flush_all(struct mm_struct *mm)
4899 +-{
4900 +- struct task_struct *g, *p;
4901 +-
4902 +- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
4903 +-
4904 +- /*
4905 +- * Single threaded tasks need not iterate the entire
4906 +- * list of process. We can avoid the flushing as well
4907 +- * since the mm's seqnum was increased and don't have
4908 +- * to worry about other threads' seqnum. Current's
4909 +- * flush will occur upon the next lookup.
4910 +- */
4911 +- if (atomic_read(&mm->mm_users) == 1)
4912 +- return;
4913 +-
4914 +- rcu_read_lock();
4915 +- for_each_process_thread(g, p) {
4916 +- /*
4917 +- * Only flush the vmacache pointers as the
4918 +- * mm seqnum is already set and curr's will
4919 +- * be set upon invalidation when the next
4920 +- * lookup is done.
4921 +- */
4922 +- if (mm == p->mm)
4923 +- vmacache_flush(p);
4924 +- }
4925 +- rcu_read_unlock();
4926 +-}
4927 +-
4928 + /*
4929 + * This task may be accessing a foreign mm via (for example)
4930 + * get_user_pages()->find_vma(). The vmacache is task-local and this
4931 +diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
4932 +index 3bba8f4b08a9..253975cce943 100644
4933 +--- a/net/bluetooth/hidp/core.c
4934 ++++ b/net/bluetooth/hidp/core.c
4935 +@@ -775,7 +775,7 @@ static int hidp_setup_hid(struct hidp_session *session,
4936 + hid->version = req->version;
4937 + hid->country = req->country;
4938 +
4939 +- strncpy(hid->name, req->name, sizeof(req->name) - 1);
4940 ++ strncpy(hid->name, req->name, sizeof(hid->name));
4941 +
4942 + snprintf(hid->phys, sizeof(hid->phys), "%pMR",
4943 + &l2cap_pi(session->ctrl_sock->sk)->chan->src);
4944 +diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
4945 +index 2589a6b78aa1..013fdb6fa07a 100644
4946 +--- a/net/dcb/dcbnl.c
4947 ++++ b/net/dcb/dcbnl.c
4948 +@@ -1786,7 +1786,7 @@ static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
4949 + if (itr->app.selector == app->selector &&
4950 + itr->app.protocol == app->protocol &&
4951 + itr->ifindex == ifindex &&
4952 +- (!prio || itr->app.priority == prio))
4953 ++ ((prio == -1) || itr->app.priority == prio))
4954 + return itr;
4955 + }
4956 +
4957 +@@ -1821,7 +1821,8 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
4958 + u8 prio = 0;
4959 +
4960 + spin_lock_bh(&dcb_lock);
4961 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
4962 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
4963 ++ if (itr)
4964 + prio = itr->app.priority;
4965 + spin_unlock_bh(&dcb_lock);
4966 +
4967 +@@ -1849,7 +1850,8 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
4968 +
4969 + spin_lock_bh(&dcb_lock);
4970 + /* Search for existing match and replace */
4971 +- if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
4972 ++ itr = dcb_app_lookup(new, dev->ifindex, -1);
4973 ++ if (itr) {
4974 + if (new->priority)
4975 + itr->app.priority = new->priority;
4976 + else {
4977 +@@ -1882,7 +1884,8 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
4978 + u8 prio = 0;
4979 +
4980 + spin_lock_bh(&dcb_lock);
4981 +- if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
4982 ++ itr = dcb_app_lookup(app, dev->ifindex, -1);
4983 ++ if (itr)
4984 + prio |= 1 << itr->app.priority;
4985 + spin_unlock_bh(&dcb_lock);
4986 +
4987 +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
4988 +index 932985ca4e66..3f80a5ca4050 100644
4989 +--- a/net/mac80211/rx.c
4990 ++++ b/net/mac80211/rx.c
4991 +@@ -1612,6 +1612,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
4992 + */
4993 + if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
4994 + !ieee80211_has_morefrags(hdr->frame_control) &&
4995 ++ !is_multicast_ether_addr(hdr->addr1) &&
4996 + (ieee80211_is_mgmt(hdr->frame_control) ||
4997 + ieee80211_is_data(hdr->frame_control)) &&
4998 + !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
4999 +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
5000 +index 20a171ac4bb2..16849969c138 100644
5001 +--- a/sound/pci/hda/hda_codec.c
5002 ++++ b/sound/pci/hda/hda_codec.c
5003 +@@ -3910,7 +3910,8 @@ void snd_hda_bus_reset_codecs(struct hda_bus *bus)
5004 +
5005 + list_for_each_codec(codec, bus) {
5006 + /* FIXME: maybe a better way needed for forced reset */
5007 +- cancel_delayed_work_sync(&codec->jackpoll_work);
5008 ++ if (current_work() != &codec->jackpoll_work.work)
5009 ++ cancel_delayed_work_sync(&codec->jackpoll_work);
5010 + #ifdef CONFIG_PM
5011 + if (hda_codec_is_power_on(codec)) {
5012 + hda_call_codec_suspend(codec);
5013 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5014 +index f6af3e1c2b93..d14b05f68d6d 100644
5015 +--- a/sound/pci/hda/patch_realtek.c
5016 ++++ b/sound/pci/hda/patch_realtek.c
5017 +@@ -6530,6 +6530,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5018 + SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
5019 + SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5020 + SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
5021 ++ SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
5022 + SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
5023 + SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5024 + SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5025 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
5026 +index 5feae9666822..55d6c9488d8e 100644
5027 +--- a/sound/soc/soc-pcm.c
5028 ++++ b/sound/soc/soc-pcm.c
5029 +@@ -1165,6 +1165,9 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
5030 + snd_pcm_sframes_t codec_delay = 0;
5031 + int i;
5032 +
5033 ++ /* clearing the previous total delay */
5034 ++ runtime->delay = 0;
5035 ++
5036 + for_each_rtdcom(rtd, rtdcom) {
5037 + component = rtdcom->component;
5038 +
5039 +@@ -1176,6 +1179,8 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
5040 + offset = component->driver->ops->pointer(substream);
5041 + break;
5042 + }
5043 ++ /* base delay if assigned in pointer callback */
5044 ++ delay = runtime->delay;
5045 +
5046 + if (cpu_dai->driver->ops->delay)
5047 + delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
5048 +diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
5049 +index f5a3b402589e..67b042738ed7 100644
5050 +--- a/tools/perf/Makefile.config
5051 ++++ b/tools/perf/Makefile.config
5052 +@@ -905,8 +905,8 @@ bindir = $(abspath $(prefix)/$(bindir_relative))
5053 + mandir = share/man
5054 + infodir = share/info
5055 + perfexecdir = libexec/perf-core
5056 +-perf_include_dir = lib/include/perf
5057 +-perf_examples_dir = lib/examples/perf
5058 ++perf_include_dir = lib/perf/include
5059 ++perf_examples_dir = lib/perf/examples
5060 + sharedir = $(prefix)/share
5061 + template_dir = share/perf-core/templates
5062 + STRACE_GROUPS_DIR = share/perf-core/strace/groups
5063 +diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
5064 +index 6a8738f7ead3..eab66e3b0a19 100644
5065 +--- a/tools/perf/builtin-c2c.c
5066 ++++ b/tools/perf/builtin-c2c.c
5067 +@@ -2349,6 +2349,9 @@ static int perf_c2c__browse_cacheline(struct hist_entry *he)
5068 + " s Toggle full length of symbol and source line columns \n"
5069 + " q Return back to cacheline list \n";
5070 +
5071 ++ if (!he)
5072 ++ return 0;
5073 ++
5074 + /* Display compact version first. */
5075 + c2c.symbol_full = false;
5076 +
5077 +diff --git a/tools/perf/perf.h b/tools/perf/perf.h
5078 +index d215714f48df..21bf7f5a3cf5 100644
5079 +--- a/tools/perf/perf.h
5080 ++++ b/tools/perf/perf.h
5081 +@@ -25,7 +25,9 @@ static inline unsigned long long rdclock(void)
5082 + return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
5083 + }
5084 +
5085 ++#ifndef MAX_NR_CPUS
5086 + #define MAX_NR_CPUS 1024
5087 ++#endif
5088 +
5089 + extern const char *input_name;
5090 + extern bool perf_host, perf_guest;
5091 +diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
5092 +index 94fce4f537e9..0d5504751cc5 100644
5093 +--- a/tools/perf/util/evsel.c
5094 ++++ b/tools/perf/util/evsel.c
5095 +@@ -848,6 +848,12 @@ static void apply_config_terms(struct perf_evsel *evsel,
5096 + }
5097 + }
5098 +
5099 ++static bool is_dummy_event(struct perf_evsel *evsel)
5100 ++{
5101 ++ return (evsel->attr.type == PERF_TYPE_SOFTWARE) &&
5102 ++ (evsel->attr.config == PERF_COUNT_SW_DUMMY);
5103 ++}
5104 ++
5105 + /*
5106 + * The enable_on_exec/disabled value strategy:
5107 + *
5108 +@@ -1086,6 +1092,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
5109 + else
5110 + perf_evsel__reset_sample_bit(evsel, PERIOD);
5111 + }
5112 ++
5113 ++ /*
5114 ++ * For initial_delay, a dummy event is added implicitly.
5115 ++ * The software event will trigger -EOPNOTSUPP error out,
5116 ++ * if BRANCH_STACK bit is set.
5117 ++ */
5118 ++ if (opts->initial_delay && is_dummy_event(evsel))
5119 ++ perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
5120 + }
5121 +
5122 + static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
5123 +diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
5124 +index b53596ad601b..2e7fd8227969 100644
5125 +--- a/tools/testing/nvdimm/pmem-dax.c
5126 ++++ b/tools/testing/nvdimm/pmem-dax.c
5127 +@@ -31,17 +31,21 @@ long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
5128 + if (get_nfit_res(pmem->phys_addr + offset)) {
5129 + struct page *page;
5130 +
5131 +- *kaddr = pmem->virt_addr + offset;
5132 ++ if (kaddr)
5133 ++ *kaddr = pmem->virt_addr + offset;
5134 + page = vmalloc_to_page(pmem->virt_addr + offset);
5135 +- *pfn = page_to_pfn_t(page);
5136 ++ if (pfn)
5137 ++ *pfn = page_to_pfn_t(page);
5138 + pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
5139 + __func__, pmem, pgoff, page_to_pfn(page));
5140 +
5141 + return 1;
5142 + }
5143 +
5144 +- *kaddr = pmem->virt_addr + offset;
5145 +- *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
5146 ++ if (kaddr)
5147 ++ *kaddr = pmem->virt_addr + offset;
5148 ++ if (pfn)
5149 ++ *pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
5150 +
5151 + /*
5152 + * If badblocks are present, limit known good range to the
5153 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
5154 +index 41106d9d5cc7..f9c856c8e472 100644
5155 +--- a/tools/testing/selftests/bpf/test_verifier.c
5156 ++++ b/tools/testing/selftests/bpf/test_verifier.c
5157 +@@ -6997,7 +6997,7 @@ static struct bpf_test tests[] = {
5158 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5159 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5160 + BPF_FUNC_map_lookup_elem),
5161 +- BPF_MOV64_REG(BPF_REG_0, 0),
5162 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5163 + BPF_EXIT_INSN(),
5164 + },
5165 + .fixup_map_in_map = { 3 },
5166 +@@ -7020,7 +7020,7 @@ static struct bpf_test tests[] = {
5167 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
5168 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5169 + BPF_FUNC_map_lookup_elem),
5170 +- BPF_MOV64_REG(BPF_REG_0, 0),
5171 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5172 + BPF_EXIT_INSN(),
5173 + },
5174 + .fixup_map_in_map = { 3 },
5175 +@@ -7042,7 +7042,7 @@ static struct bpf_test tests[] = {
5176 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
5177 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
5178 + BPF_FUNC_map_lookup_elem),
5179 +- BPF_MOV64_REG(BPF_REG_0, 0),
5180 ++ BPF_MOV64_IMM(BPF_REG_0, 0),
5181 + BPF_EXIT_INSN(),
5182 + },
5183 + .fixup_map_in_map = { 3 },
5184 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
5185 +index 70952bd98ff9..13147a1f5731 100644
5186 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
5187 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
5188 +@@ -17,7 +17,7 @@
5189 + "cmdUnderTest": "$TC actions add action connmark",
5190 + "expExitCode": "0",
5191 + "verifyCmd": "$TC actions list action connmark",
5192 +- "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
5193 ++ "matchPattern": "action order [0-9]+: connmark zone 0 pipe",
5194 + "matchCount": "1",
5195 + "teardown": [
5196 + "$TC actions flush action connmark"
5197 +@@ -41,7 +41,7 @@
5198 + "cmdUnderTest": "$TC actions add action connmark pass index 1",
5199 + "expExitCode": "0",
5200 + "verifyCmd": "$TC actions get action connmark index 1",
5201 +- "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
5202 ++ "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 1 ref",
5203 + "matchCount": "1",
5204 + "teardown": [
5205 + "$TC actions flush action connmark"
5206 +@@ -65,7 +65,7 @@
5207 + "cmdUnderTest": "$TC actions add action connmark drop index 100",
5208 + "expExitCode": "0",
5209 + "verifyCmd": "$TC actions get action connmark index 100",
5210 +- "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
5211 ++ "matchPattern": "action order [0-9]+: connmark zone 0 drop.*index 100 ref",
5212 + "matchCount": "1",
5213 + "teardown": [
5214 + "$TC actions flush action connmark"
5215 +@@ -89,7 +89,7 @@
5216 + "cmdUnderTest": "$TC actions add action connmark pipe index 455",
5217 + "expExitCode": "0",
5218 + "verifyCmd": "$TC actions get action connmark index 455",
5219 +- "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
5220 ++ "matchPattern": "action order [0-9]+: connmark zone 0 pipe.*index 455 ref",
5221 + "matchCount": "1",
5222 + "teardown": [
5223 + "$TC actions flush action connmark"
5224 +@@ -113,7 +113,7 @@
5225 + "cmdUnderTest": "$TC actions add action connmark reclassify index 7",
5226 + "expExitCode": "0",
5227 + "verifyCmd": "$TC actions list action connmark",
5228 +- "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
5229 ++ "matchPattern": "action order [0-9]+: connmark zone 0 reclassify.*index 7 ref",
5230 + "matchCount": "1",
5231 + "teardown": [
5232 + "$TC actions flush action connmark"
5233 +@@ -137,7 +137,7 @@
5234 + "cmdUnderTest": "$TC actions add action connmark continue index 17",
5235 + "expExitCode": "0",
5236 + "verifyCmd": "$TC actions list action connmark",
5237 +- "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
5238 ++ "matchPattern": "action order [0-9]+: connmark zone 0 continue.*index 17 ref",
5239 + "matchCount": "1",
5240 + "teardown": [
5241 + "$TC actions flush action connmark"
5242 +@@ -161,7 +161,7 @@
5243 + "cmdUnderTest": "$TC actions add action connmark jump 10 index 17",
5244 + "expExitCode": "0",
5245 + "verifyCmd": "$TC actions list action connmark",
5246 +- "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
5247 ++ "matchPattern": "action order [0-9]+: connmark zone 0 jump 10.*index 17 ref",
5248 + "matchCount": "1",
5249 + "teardown": [
5250 + "$TC actions flush action connmark"
5251 +@@ -185,7 +185,7 @@
5252 + "cmdUnderTest": "$TC actions add action connmark zone 100 pipe index 1",
5253 + "expExitCode": "0",
5254 + "verifyCmd": "$TC actions get action connmark index 1",
5255 +- "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
5256 ++ "matchPattern": "action order [0-9]+: connmark zone 100 pipe.*index 1 ref",
5257 + "matchCount": "1",
5258 + "teardown": [
5259 + "$TC actions flush action connmark"
5260 +@@ -209,7 +209,7 @@
5261 + "cmdUnderTest": "$TC actions add action connmark zone 65536 reclassify index 21",
5262 + "expExitCode": "255",
5263 + "verifyCmd": "$TC actions get action connmark index 1",
5264 +- "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
5265 ++ "matchPattern": "action order [0-9]+: connmark zone 65536 reclassify.*index 21 ref",
5266 + "matchCount": "0",
5267 + "teardown": [
5268 + "$TC actions flush action connmark"
5269 +@@ -233,7 +233,7 @@
5270 + "cmdUnderTest": "$TC actions add action connmark zone 655 unsupp_arg pass index 2",
5271 + "expExitCode": "255",
5272 + "verifyCmd": "$TC actions get action connmark index 2",
5273 +- "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
5274 ++ "matchPattern": "action order [0-9]+: connmark zone 655 unsupp_arg pass.*index 2 ref",
5275 + "matchCount": "0",
5276 + "teardown": [
5277 + "$TC actions flush action connmark"
5278 +@@ -258,7 +258,7 @@
5279 + "cmdUnderTest": "$TC actions replace action connmark zone 555 reclassify index 555",
5280 + "expExitCode": "0",
5281 + "verifyCmd": "$TC actions get action connmark index 555",
5282 +- "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
5283 ++ "matchPattern": "action order [0-9]+: connmark zone 555 reclassify.*index 555 ref",
5284 + "matchCount": "1",
5285 + "teardown": [
5286 + "$TC actions flush action connmark"
5287 +@@ -282,7 +282,7 @@
5288 + "cmdUnderTest": "$TC actions add action connmark zone 555 pipe index 5 cookie aabbccddeeff112233445566778800a1",
5289 + "expExitCode": "0",
5290 + "verifyCmd": "$TC actions get action connmark index 5",
5291 +- "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
5292 ++ "matchPattern": "action order [0-9]+: connmark zone 555 pipe.*index 5 ref.*cookie aabbccddeeff112233445566778800a1",
5293 + "matchCount": "1",
5294 + "teardown": [
5295 + "$TC actions flush action connmark"
5296 +diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
5297 +index 6e4edfae1799..db49fd0f8445 100644
5298 +--- a/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
5299 ++++ b/tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
5300 +@@ -44,7 +44,8 @@
5301 + "matchPattern": "action order [0-9]*: mirred \\(Egress Redirect to device lo\\).*index 2 ref",
5302 + "matchCount": "1",
5303 + "teardown": [
5304 +- "$TC actions flush action mirred"
5305 ++ "$TC actions flush action mirred",
5306 ++ "$TC actions flush action gact"
5307 + ]
5308 + },
5309 + {
5310 +diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
5311 +index c2b95a22959b..fd8c88463928 100644
5312 +--- a/virt/kvm/arm/mmu.c
5313 ++++ b/virt/kvm/arm/mmu.c
5314 +@@ -1831,13 +1831,20 @@ static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data
5315 + void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
5316 + {
5317 + unsigned long end = hva + PAGE_SIZE;
5318 ++ kvm_pfn_t pfn = pte_pfn(pte);
5319 + pte_t stage2_pte;
5320 +
5321 + if (!kvm->arch.pgd)
5322 + return;
5323 +
5324 + trace_kvm_set_spte_hva(hva);
5325 +- stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
5326 ++
5327 ++ /*
5328 ++ * We've moved a page around, probably through CoW, so let's treat it
5329 ++ * just like a translation fault and clean the cache to the PoC.
5330 ++ */
5331 ++ clean_dcache_guest_page(pfn, PAGE_SIZE);
5332 ++ stage2_pte = pfn_pte(pfn, PAGE_S2);
5333 + handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
5334 + }
5335 +