Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Tue, 29 Oct 2019 12:04:56
Message-Id: 1572350669.3af0f636399a165ea0a180550c66d5a5515a9c60.mpagano@gentoo
1 commit: 3af0f636399a165ea0a180550c66d5a5515a9c60
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Oct 29 12:04:29 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Oct 29 12:04:29 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=3af0f636
7
8 Linux patch 4.19.81
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1080_linux-4.19.81.patch | 2700 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2704 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 4a75724..3811a86 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -359,6 +359,10 @@ Patch: 1079_linux-4.19.80.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.80
23
24 +Patch: 1080_linux-4.19.81.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.81
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1080_linux-4.19.81.patch b/1080_linux-4.19.81.patch
33 new file mode 100644
34 index 0000000..55ea23d
35 --- /dev/null
36 +++ b/1080_linux-4.19.81.patch
37 @@ -0,0 +1,2700 @@
38 +diff --git a/Makefile b/Makefile
39 +index ced4a9fd9754..3c146e8d93dc 100644
40 +--- a/Makefile
41 ++++ b/Makefile
42 +@@ -1,7 +1,7 @@
43 + # SPDX-License-Identifier: GPL-2.0
44 + VERSION = 4
45 + PATCHLEVEL = 19
46 +-SUBLEVEL = 80
47 ++SUBLEVEL = 81
48 + EXTRAVERSION =
49 + NAME = "People's Front"
50 +
51 +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
52 +index d4b7c59eec68..cf1e4f747242 100644
53 +--- a/arch/arm/boot/dts/am4372.dtsi
54 ++++ b/arch/arm/boot/dts/am4372.dtsi
55 +@@ -1142,6 +1142,8 @@
56 + ti,hwmods = "dss_dispc";
57 + clocks = <&disp_clk>;
58 + clock-names = "fck";
59 ++
60 ++ max-memory-bandwidth = <230000000>;
61 + };
62 +
63 + rfbi: rfbi@4832a800 {
64 +diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
65 +index 9ded7bf972e7..3b8fe014a3e9 100644
66 +--- a/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
67 ++++ b/arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
68 +@@ -946,7 +946,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
69 + .rev_offs = 0x0000,
70 + .sysc_offs = 0x0010,
71 + .syss_offs = 0x0014,
72 +- .sysc_flags = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
73 ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
74 ++ SYSC_HAS_RESET_STATUS,
75 + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
76 + SIDLE_SMART_WKUP),
77 + .sysc_fields = &omap_hwmod_sysc_type2,
78 +diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
79 +index ca03af8fe43f..ddf96adf65ab 100644
80 +--- a/arch/arm/mach-omap2/pm.c
81 ++++ b/arch/arm/mach-omap2/pm.c
82 +@@ -77,83 +77,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
83 + return 0;
84 + }
85 +
86 +-/*
87 +- * This API is to be called during init to set the various voltage
88 +- * domains to the voltage as per the opp table. Typically we boot up
89 +- * at the nominal voltage. So this function finds out the rate of
90 +- * the clock associated with the voltage domain, finds out the correct
91 +- * opp entry and sets the voltage domain to the voltage specified
92 +- * in the opp entry
93 +- */
94 +-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
95 +- const char *oh_name)
96 +-{
97 +- struct voltagedomain *voltdm;
98 +- struct clk *clk;
99 +- struct dev_pm_opp *opp;
100 +- unsigned long freq, bootup_volt;
101 +- struct device *dev;
102 +-
103 +- if (!vdd_name || !clk_name || !oh_name) {
104 +- pr_err("%s: invalid parameters\n", __func__);
105 +- goto exit;
106 +- }
107 +-
108 +- if (!strncmp(oh_name, "mpu", 3))
109 +- /*
110 +- * All current OMAPs share voltage rail and clock
111 +- * source, so CPU0 is used to represent the MPU-SS.
112 +- */
113 +- dev = get_cpu_device(0);
114 +- else
115 +- dev = omap_device_get_by_hwmod_name(oh_name);
116 +-
117 +- if (IS_ERR(dev)) {
118 +- pr_err("%s: Unable to get dev pointer for hwmod %s\n",
119 +- __func__, oh_name);
120 +- goto exit;
121 +- }
122 +-
123 +- voltdm = voltdm_lookup(vdd_name);
124 +- if (!voltdm) {
125 +- pr_err("%s: unable to get vdd pointer for vdd_%s\n",
126 +- __func__, vdd_name);
127 +- goto exit;
128 +- }
129 +-
130 +- clk = clk_get(NULL, clk_name);
131 +- if (IS_ERR(clk)) {
132 +- pr_err("%s: unable to get clk %s\n", __func__, clk_name);
133 +- goto exit;
134 +- }
135 +-
136 +- freq = clk_get_rate(clk);
137 +- clk_put(clk);
138 +-
139 +- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
140 +- if (IS_ERR(opp)) {
141 +- pr_err("%s: unable to find boot up OPP for vdd_%s\n",
142 +- __func__, vdd_name);
143 +- goto exit;
144 +- }
145 +-
146 +- bootup_volt = dev_pm_opp_get_voltage(opp);
147 +- dev_pm_opp_put(opp);
148 +-
149 +- if (!bootup_volt) {
150 +- pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
151 +- __func__, vdd_name);
152 +- goto exit;
153 +- }
154 +-
155 +- voltdm_scale(voltdm, bootup_volt);
156 +- return 0;
157 +-
158 +-exit:
159 +- pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
160 +- return -EINVAL;
161 +-}
162 +-
163 + #ifdef CONFIG_SUSPEND
164 + static int omap_pm_enter(suspend_state_t suspend_state)
165 + {
166 +@@ -211,25 +134,6 @@ void omap_common_suspend_init(void *pm_suspend)
167 + }
168 + #endif /* CONFIG_SUSPEND */
169 +
170 +-static void __init omap3_init_voltages(void)
171 +-{
172 +- if (!soc_is_omap34xx())
173 +- return;
174 +-
175 +- omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
176 +- omap2_set_init_voltage("core", "l3_ick", "l3_main");
177 +-}
178 +-
179 +-static void __init omap4_init_voltages(void)
180 +-{
181 +- if (!soc_is_omap44xx())
182 +- return;
183 +-
184 +- omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
185 +- omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
186 +- omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
187 +-}
188 +-
189 + int __maybe_unused omap_pm_nop_init(void)
190 + {
191 + return 0;
192 +@@ -249,10 +153,6 @@ int __init omap2_common_pm_late_init(void)
193 + omap4_twl_init();
194 + omap_voltage_late_init();
195 +
196 +- /* Initialize the voltages */
197 +- omap3_init_voltages();
198 +- omap4_init_voltages();
199 +-
200 + /* Smartreflex device init */
201 + omap_devinit_smartreflex();
202 +
203 +diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
204 +index b4d78959cadf..bc9a37b3cecd 100644
205 +--- a/arch/arm/xen/efi.c
206 ++++ b/arch/arm/xen/efi.c
207 +@@ -31,7 +31,9 @@ void __init xen_efi_runtime_setup(void)
208 + efi.get_variable = xen_efi_get_variable;
209 + efi.get_next_variable = xen_efi_get_next_variable;
210 + efi.set_variable = xen_efi_set_variable;
211 ++ efi.set_variable_nonblocking = xen_efi_set_variable;
212 + efi.query_variable_info = xen_efi_query_variable_info;
213 ++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
214 + efi.update_capsule = xen_efi_update_capsule;
215 + efi.query_capsule_caps = xen_efi_query_capsule_caps;
216 + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
217 +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
218 +index 9ccf16939d13..71888808ded7 100644
219 +--- a/arch/arm64/kernel/cpu_errata.c
220 ++++ b/arch/arm64/kernel/cpu_errata.c
221 +@@ -23,6 +23,7 @@
222 + #include <asm/cpu.h>
223 + #include <asm/cputype.h>
224 + #include <asm/cpufeature.h>
225 ++#include <asm/smp_plat.h>
226 +
227 + static bool __maybe_unused
228 + is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
229 +@@ -618,6 +619,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
230 + return (need_wa > 0);
231 + }
232 +
233 ++static const __maybe_unused struct midr_range tx2_family_cpus[] = {
234 ++ MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
235 ++ MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
236 ++ {},
237 ++};
238 ++
239 ++static bool __maybe_unused
240 ++needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
241 ++ int scope)
242 ++{
243 ++ int i;
244 ++
245 ++ if (!is_affected_midr_range_list(entry, scope) ||
246 ++ !is_hyp_mode_available())
247 ++ return false;
248 ++
249 ++ for_each_possible_cpu(i) {
250 ++ if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
251 ++ return true;
252 ++ }
253 ++
254 ++ return false;
255 ++}
256 ++
257 + #ifdef CONFIG_HARDEN_EL2_VECTORS
258 +
259 + static const struct midr_range arm64_harden_el2_vectors[] = {
260 +@@ -801,6 +826,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
261 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
262 + .matches = has_cortex_a76_erratum_1463225,
263 + },
264 ++#endif
265 ++#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
266 ++ {
267 ++ .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
268 ++ .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
269 ++ ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
270 ++ .matches = needs_tx2_tvm_workaround,
271 ++ },
272 + #endif
273 + {
274 + }
275 +diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
276 +index 2bae201aa365..1c7bf11f8450 100644
277 +--- a/arch/mips/boot/dts/qca/ar9331.dtsi
278 ++++ b/arch/mips/boot/dts/qca/ar9331.dtsi
279 +@@ -99,7 +99,7 @@
280 +
281 + miscintc: interrupt-controller@18060010 {
282 + compatible = "qca,ar7240-misc-intc";
283 +- reg = <0x18060010 0x4>;
284 ++ reg = <0x18060010 0x8>;
285 +
286 + interrupt-parent = <&cpuintc>;
287 + interrupts = <6>;
288 +diff --git a/arch/mips/loongson64/common/serial.c b/arch/mips/loongson64/common/serial.c
289 +index ffefc1cb2612..98c3a7feb10f 100644
290 +--- a/arch/mips/loongson64/common/serial.c
291 ++++ b/arch/mips/loongson64/common/serial.c
292 +@@ -110,7 +110,7 @@ static int __init serial_init(void)
293 + }
294 + module_init(serial_init);
295 +
296 +-static void __init serial_exit(void)
297 ++static void __exit serial_exit(void)
298 + {
299 + platform_device_unregister(&uart8250_device);
300 + }
301 +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
302 +index 355f8eadb1cd..3944c49eee0c 100644
303 +--- a/arch/mips/mm/tlbex.c
304 ++++ b/arch/mips/mm/tlbex.c
305 +@@ -654,6 +654,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
306 + int restore_scratch)
307 + {
308 + if (restore_scratch) {
309 ++ /*
310 ++ * Ensure the MFC0 below observes the value written to the
311 ++ * KScratch register by the prior MTC0.
312 ++ */
313 ++ if (scratch_reg >= 0)
314 ++ uasm_i_ehb(p);
315 ++
316 + /* Reset default page size */
317 + if (PM_DEFAULT_MASK >> 16) {
318 + uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
319 +@@ -668,12 +675,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
320 + uasm_i_mtc0(p, 0, C0_PAGEMASK);
321 + uasm_il_b(p, r, lid);
322 + }
323 +- if (scratch_reg >= 0) {
324 +- uasm_i_ehb(p);
325 ++ if (scratch_reg >= 0)
326 + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
327 +- } else {
328 ++ else
329 + UASM_i_LW(p, 1, scratchpad_offset(0), 0);
330 +- }
331 + } else {
332 + /* Reset default page size */
333 + if (PM_DEFAULT_MASK >> 16) {
334 +@@ -922,6 +927,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
335 + }
336 + if (mode != not_refill && check_for_high_segbits) {
337 + uasm_l_large_segbits_fault(l, *p);
338 ++
339 ++ if (mode == refill_scratch && scratch_reg >= 0)
340 ++ uasm_i_ehb(p);
341 ++
342 + /*
343 + * We get here if we are an xsseg address, or if we are
344 + * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
345 +@@ -938,12 +947,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
346 + uasm_i_jr(p, ptr);
347 +
348 + if (mode == refill_scratch) {
349 +- if (scratch_reg >= 0) {
350 +- uasm_i_ehb(p);
351 ++ if (scratch_reg >= 0)
352 + UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
353 +- } else {
354 ++ else
355 + UASM_i_LW(p, 1, scratchpad_offset(0), 0);
356 +- }
357 + } else {
358 + uasm_i_nop(p);
359 + }
360 +diff --git a/arch/parisc/mm/ioremap.c b/arch/parisc/mm/ioremap.c
361 +index 92a9b5f12f98..f29f682352f0 100644
362 +--- a/arch/parisc/mm/ioremap.c
363 ++++ b/arch/parisc/mm/ioremap.c
364 +@@ -3,7 +3,7 @@
365 + * arch/parisc/mm/ioremap.c
366 + *
367 + * (C) Copyright 1995 1996 Linus Torvalds
368 +- * (C) Copyright 2001-2006 Helge Deller <deller@×××.de>
369 ++ * (C) Copyright 2001-2019 Helge Deller <deller@×××.de>
370 + * (C) Copyright 2005 Kyle McMartin <kyle@××××××××××××.org>
371 + */
372 +
373 +@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
374 + addr = (void __iomem *) area->addr;
375 + if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
376 + phys_addr, pgprot)) {
377 +- vfree(addr);
378 ++ vunmap(addr);
379 + return NULL;
380 + }
381 +
382 +@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
383 + }
384 + EXPORT_SYMBOL(__ioremap);
385 +
386 +-void iounmap(const volatile void __iomem *addr)
387 ++void iounmap(const volatile void __iomem *io_addr)
388 + {
389 +- if (addr > high_memory)
390 +- return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
391 ++ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
392 ++
393 ++ if (is_vmalloc_addr((void *)addr))
394 ++ vunmap((void *)addr);
395 + }
396 + EXPORT_SYMBOL(iounmap);
397 +diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
398 +index 7685444a106b..145517934171 100644
399 +--- a/arch/x86/kernel/apic/x2apic_cluster.c
400 ++++ b/arch/x86/kernel/apic/x2apic_cluster.c
401 +@@ -158,7 +158,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
402 + {
403 + struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
404 +
405 +- cpumask_clear_cpu(dead_cpu, &cmsk->mask);
406 ++ if (cmsk)
407 ++ cpumask_clear_cpu(dead_cpu, &cmsk->mask);
408 + free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
409 + return 0;
410 + }
411 +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
412 +index 250cfa85b633..88dc38b4a147 100644
413 +--- a/arch/x86/kernel/head64.c
414 ++++ b/arch/x86/kernel/head64.c
415 +@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
416 + * we might write invalid pmds, when the kernel is relocated
417 + * cleanup_highmap() fixes this up along with the mappings
418 + * beyond _end.
419 ++ *
420 ++ * Only the region occupied by the kernel image has so far
421 ++ * been checked against the table of usable memory regions
422 ++ * provided by the firmware, so invalidate pages outside that
423 ++ * region. A page table entry that maps to a reserved area of
424 ++ * memory would allow processor speculation into that area,
425 ++ * and on some hardware (particularly the UV platform) even
426 ++ * speculative access to some reserved areas is caught as an
427 ++ * error, causing the BIOS to halt the system.
428 + */
429 +
430 + pmd = fixup_pointer(level2_kernel_pgt, physaddr);
431 +- for (i = 0; i < PTRS_PER_PMD; i++) {
432 ++
433 ++ /* invalidate pages before the kernel image */
434 ++ for (i = 0; i < pmd_index((unsigned long)_text); i++)
435 ++ pmd[i] &= ~_PAGE_PRESENT;
436 ++
437 ++ /* fixup pages that are part of the kernel image */
438 ++ for (; i <= pmd_index((unsigned long)_end); i++)
439 + if (pmd[i] & _PAGE_PRESENT)
440 + pmd[i] += load_delta;
441 +- }
442 ++
443 ++ /* invalidate pages after the kernel image */
444 ++ for (; i < PTRS_PER_PMD; i++)
445 ++ pmd[i] &= ~_PAGE_PRESENT;
446 +
447 + /*
448 + * Fixup phys_base - remove the memory encryption mask to obtain
449 +diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c
450 +index 1804b27f9632..66bcdeeee639 100644
451 +--- a/arch/x86/xen/efi.c
452 ++++ b/arch/x86/xen/efi.c
453 +@@ -77,7 +77,9 @@ static efi_system_table_t __init *xen_efi_probe(void)
454 + efi.get_variable = xen_efi_get_variable;
455 + efi.get_next_variable = xen_efi_get_next_variable;
456 + efi.set_variable = xen_efi_set_variable;
457 ++ efi.set_variable_nonblocking = xen_efi_set_variable;
458 + efi.query_variable_info = xen_efi_query_variable_info;
459 ++ efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
460 + efi.update_capsule = xen_efi_update_capsule;
461 + efi.query_capsule_caps = xen_efi_query_capsule_caps;
462 + efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
463 +diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
464 +index 04f19de46700..4092555828b1 100644
465 +--- a/arch/xtensa/kernel/xtensa_ksyms.c
466 ++++ b/arch/xtensa/kernel/xtensa_ksyms.c
467 +@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
468 + // FIXME EXPORT_SYMBOL(screen_info);
469 + #endif
470 +
471 +-EXPORT_SYMBOL(outsb);
472 +-EXPORT_SYMBOL(outsw);
473 +-EXPORT_SYMBOL(outsl);
474 +-EXPORT_SYMBOL(insb);
475 +-EXPORT_SYMBOL(insw);
476 +-EXPORT_SYMBOL(insl);
477 +-
478 + extern long common_exception_return;
479 + EXPORT_SYMBOL(common_exception_return);
480 +
481 +diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
482 +index 60fac2d066cf..98caba3e962e 100644
483 +--- a/block/blk-rq-qos.h
484 ++++ b/block/blk-rq-qos.h
485 +@@ -80,16 +80,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
486 +
487 + static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
488 + {
489 +- struct rq_qos *cur, *prev = NULL;
490 +- for (cur = q->rq_qos; cur; cur = cur->next) {
491 +- if (cur == rqos) {
492 +- if (prev)
493 +- prev->next = rqos->next;
494 +- else
495 +- q->rq_qos = cur;
496 ++ struct rq_qos **cur;
497 ++
498 ++ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
499 ++ if (*cur == rqos) {
500 ++ *cur = rqos->next;
501 + break;
502 + }
503 +- prev = cur;
504 + }
505 + }
506 +
507 +diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
508 +index a1aa59849b96..41228e545e82 100644
509 +--- a/drivers/acpi/cppc_acpi.c
510 ++++ b/drivers/acpi/cppc_acpi.c
511 +@@ -909,8 +909,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
512 + pcc_data[pcc_ss_id]->refcount--;
513 + if (!pcc_data[pcc_ss_id]->refcount) {
514 + pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
515 +- pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
516 + kfree(pcc_data[pcc_ss_id]);
517 ++ pcc_data[pcc_ss_id] = NULL;
518 + }
519 + }
520 + }
521 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
522 +index 5d110b1362e7..fa1c5a442957 100644
523 +--- a/drivers/ata/ahci.c
524 ++++ b/drivers/ata/ahci.c
525 +@@ -1633,7 +1633,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
526 + */
527 + if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
528 + return;
529 +- if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
530 ++
531 ++ /* Skip applying the quirk on Denverton and beyond */
532 ++ if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
533 + return;
534 +
535 + /*
536 +diff --git a/drivers/base/core.c b/drivers/base/core.c
537 +index fcda6313e7de..985ccced33a2 100644
538 +--- a/drivers/base/core.c
539 ++++ b/drivers/base/core.c
540 +@@ -8,6 +8,7 @@
541 + * Copyright (c) 2006 Novell, Inc.
542 + */
543 +
544 ++#include <linux/cpufreq.h>
545 + #include <linux/device.h>
546 + #include <linux/err.h>
547 + #include <linux/fwnode.h>
548 +@@ -2943,6 +2944,8 @@ void device_shutdown(void)
549 + wait_for_device_probe();
550 + device_block_probing();
551 +
552 ++ cpufreq_suspend();
553 ++
554 + spin_lock(&devices_kset->list_lock);
555 + /*
556 + * Walk the devices list backward, shutting down each in turn.
557 +diff --git a/drivers/base/memory.c b/drivers/base/memory.c
558 +index 817320c7c4c1..85ee64d0a44e 100644
559 +--- a/drivers/base/memory.c
560 ++++ b/drivers/base/memory.c
561 +@@ -554,6 +554,9 @@ store_soft_offline_page(struct device *dev,
562 + pfn >>= PAGE_SHIFT;
563 + if (!pfn_valid(pfn))
564 + return -ENXIO;
565 ++ /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
566 ++ if (!pfn_to_online_page(pfn))
567 ++ return -EIO;
568 + ret = soft_offline_page(pfn_to_page(pfn), 0);
569 + return ret == 0 ? count : ret;
570 + }
571 +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
572 +index d3213594d1a7..ace5ec65e36f 100644
573 +--- a/drivers/cpufreq/cpufreq.c
574 ++++ b/drivers/cpufreq/cpufreq.c
575 +@@ -2578,14 +2578,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
576 + }
577 + EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
578 +
579 +-/*
580 +- * Stop cpufreq at shutdown to make sure it isn't holding any locks
581 +- * or mutexes when secondary CPUs are halted.
582 +- */
583 +-static struct syscore_ops cpufreq_syscore_ops = {
584 +- .shutdown = cpufreq_suspend,
585 +-};
586 +-
587 + struct kobject *cpufreq_global_kobject;
588 + EXPORT_SYMBOL(cpufreq_global_kobject);
589 +
590 +@@ -2597,8 +2589,6 @@ static int __init cpufreq_core_init(void)
591 + cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
592 + BUG_ON(!cpufreq_global_kobject);
593 +
594 +- register_syscore_ops(&cpufreq_syscore_ops);
595 +-
596 + return 0;
597 + }
598 + module_param(off, int, 0444);
599 +diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
600 +index 473aeec4b1da..574bce603337 100644
601 +--- a/drivers/edac/ghes_edac.c
602 ++++ b/drivers/edac/ghes_edac.c
603 +@@ -532,7 +532,11 @@ void ghes_edac_unregister(struct ghes *ghes)
604 + if (!ghes_pvt)
605 + return;
606 +
607 ++ if (atomic_dec_return(&ghes_init))
608 ++ return;
609 ++
610 + mci = ghes_pvt->mci;
611 ++ ghes_pvt = NULL;
612 + edac_mc_del_mc(mci->pdev);
613 + edac_mc_free(mci);
614 + }
615 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
616 +index b40e9c76af0c..5e29f14f4b30 100644
617 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
618 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
619 +@@ -841,6 +841,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
620 + if (ret == -EPROBE_DEFER)
621 + return ret;
622 +
623 ++#ifdef CONFIG_DRM_AMDGPU_SI
624 ++ if (!amdgpu_si_support) {
625 ++ switch (flags & AMD_ASIC_MASK) {
626 ++ case CHIP_TAHITI:
627 ++ case CHIP_PITCAIRN:
628 ++ case CHIP_VERDE:
629 ++ case CHIP_OLAND:
630 ++ case CHIP_HAINAN:
631 ++ dev_info(&pdev->dev,
632 ++ "SI support provided by radeon.\n");
633 ++ dev_info(&pdev->dev,
634 ++ "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
635 ++ );
636 ++ return -ENODEV;
637 ++ }
638 ++ }
639 ++#endif
640 ++#ifdef CONFIG_DRM_AMDGPU_CIK
641 ++ if (!amdgpu_cik_support) {
642 ++ switch (flags & AMD_ASIC_MASK) {
643 ++ case CHIP_KAVERI:
644 ++ case CHIP_BONAIRE:
645 ++ case CHIP_HAWAII:
646 ++ case CHIP_KABINI:
647 ++ case CHIP_MULLINS:
648 ++ dev_info(&pdev->dev,
649 ++ "CIK support provided by radeon.\n");
650 ++ dev_info(&pdev->dev,
651 ++ "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
652 ++ );
653 ++ return -ENODEV;
654 ++ }
655 ++ }
656 ++#endif
657 ++
658 + /* Get rid of things like offb */
659 + ret = amdgpu_kick_out_firmware_fb(pdev);
660 + if (ret)
661 +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
662 +index fc93b103f777..ba10577569f8 100644
663 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
664 ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
665 +@@ -87,41 +87,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
666 + struct amdgpu_device *adev;
667 + int r, acpi_status;
668 +
669 +-#ifdef CONFIG_DRM_AMDGPU_SI
670 +- if (!amdgpu_si_support) {
671 +- switch (flags & AMD_ASIC_MASK) {
672 +- case CHIP_TAHITI:
673 +- case CHIP_PITCAIRN:
674 +- case CHIP_VERDE:
675 +- case CHIP_OLAND:
676 +- case CHIP_HAINAN:
677 +- dev_info(dev->dev,
678 +- "SI support provided by radeon.\n");
679 +- dev_info(dev->dev,
680 +- "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
681 +- );
682 +- return -ENODEV;
683 +- }
684 +- }
685 +-#endif
686 +-#ifdef CONFIG_DRM_AMDGPU_CIK
687 +- if (!amdgpu_cik_support) {
688 +- switch (flags & AMD_ASIC_MASK) {
689 +- case CHIP_KAVERI:
690 +- case CHIP_BONAIRE:
691 +- case CHIP_HAWAII:
692 +- case CHIP_KABINI:
693 +- case CHIP_MULLINS:
694 +- dev_info(dev->dev,
695 +- "CIK support provided by radeon.\n");
696 +- dev_info(dev->dev,
697 +- "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
698 +- );
699 +- return -ENODEV;
700 +- }
701 +- }
702 +-#endif
703 +-
704 + adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
705 + if (adev == NULL) {
706 + return -ENOMEM;
707 +diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
708 +index e5e7e65934da..f5926bf5dabd 100644
709 +--- a/drivers/gpu/drm/drm_edid.c
710 ++++ b/drivers/gpu/drm/drm_edid.c
711 +@@ -166,6 +166,9 @@ static const struct edid_quirk {
712 + /* Medion MD 30217 PG */
713 + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
714 +
715 ++ /* Lenovo G50 */
716 ++ { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
717 ++
718 + /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
719 + { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
720 +
721 +diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
722 +index d83310751a8e..c26f09b47ecb 100644
723 +--- a/drivers/gpu/drm/radeon/radeon_drv.c
724 ++++ b/drivers/gpu/drm/radeon/radeon_drv.c
725 +@@ -395,19 +395,11 @@ radeon_pci_remove(struct pci_dev *pdev)
726 + static void
727 + radeon_pci_shutdown(struct pci_dev *pdev)
728 + {
729 +- struct drm_device *ddev = pci_get_drvdata(pdev);
730 +-
731 + /* if we are running in a VM, make sure the device
732 + * torn down properly on reboot/shutdown
733 + */
734 + if (radeon_device_is_virtual())
735 + radeon_pci_remove(pdev);
736 +-
737 +- /* Some adapters need to be suspended before a
738 +- * shutdown occurs in order to prevent an error
739 +- * during kexec.
740 +- */
741 +- radeon_suspend_kms(ddev, true, true, false);
742 + }
743 +
744 + static int radeon_pmops_suspend(struct device *dev)
745 +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
746 +index 6fe91c1b692d..185655f22f89 100644
747 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
748 ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
749 +@@ -273,15 +273,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
750 + else
751 + ret = vmf_insert_pfn(&cvma, address, pfn);
752 +
753 +- /*
754 +- * Somebody beat us to this PTE or prefaulting to
755 +- * an already populated PTE, or prefaulting error.
756 +- */
757 +-
758 +- if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
759 +- break;
760 +- else if (unlikely(ret & VM_FAULT_ERROR))
761 +- goto out_io_unlock;
762 ++ /* Never error on prefaulted PTEs */
763 ++ if (unlikely((ret & VM_FAULT_ERROR))) {
764 ++ if (i == 0)
765 ++ goto out_io_unlock;
766 ++ else
767 ++ break;
768 ++ }
769 +
770 + address += PAGE_SIZE;
771 + if (unlikely(++page_offset >= page_last))
772 +diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
773 +index 7b76e6f81aeb..f2fb7318abc1 100644
774 +--- a/drivers/infiniband/hw/cxgb4/mem.c
775 ++++ b/drivers/infiniband/hw/cxgb4/mem.c
776 +@@ -274,13 +274,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
777 + struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
778 + {
779 + int err;
780 +- struct fw_ri_tpte tpt;
781 ++ struct fw_ri_tpte *tpt;
782 + u32 stag_idx;
783 + static atomic_t key;
784 +
785 + if (c4iw_fatal_error(rdev))
786 + return -EIO;
787 +
788 ++ tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
789 ++ if (!tpt)
790 ++ return -ENOMEM;
791 ++
792 + stag_state = stag_state > 0;
793 + stag_idx = (*stag) >> 8;
794 +
795 +@@ -290,6 +294,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
796 + mutex_lock(&rdev->stats.lock);
797 + rdev->stats.stag.fail++;
798 + mutex_unlock(&rdev->stats.lock);
799 ++ kfree(tpt);
800 + return -ENOMEM;
801 + }
802 + mutex_lock(&rdev->stats.lock);
803 +@@ -304,28 +309,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
804 +
805 + /* write TPT entry */
806 + if (reset_tpt_entry)
807 +- memset(&tpt, 0, sizeof(tpt));
808 ++ memset(tpt, 0, sizeof(*tpt));
809 + else {
810 +- tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
811 ++ tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
812 + FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
813 + FW_RI_TPTE_STAGSTATE_V(stag_state) |
814 + FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
815 +- tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
816 ++ tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
817 + (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
818 + FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
819 + FW_RI_VA_BASED_TO))|
820 + FW_RI_TPTE_PS_V(page_size));
821 +- tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
822 ++ tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
823 + FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
824 +- tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
825 +- tpt.va_hi = cpu_to_be32((u32)(to >> 32));
826 +- tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
827 +- tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
828 +- tpt.len_hi = cpu_to_be32((u32)(len >> 32));
829 ++ tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
830 ++ tpt->va_hi = cpu_to_be32((u32)(to >> 32));
831 ++ tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
832 ++ tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
833 ++ tpt->len_hi = cpu_to_be32((u32)(len >> 32));
834 + }
835 + err = write_adapter_mem(rdev, stag_idx +
836 + (rdev->lldi.vr->stag.start >> 5),
837 +- sizeof(tpt), &tpt, skb, wr_waitp);
838 ++ sizeof(*tpt), tpt, skb, wr_waitp);
839 +
840 + if (reset_tpt_entry) {
841 + c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
842 +@@ -333,6 +338,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
843 + rdev->stats.stag.cur -= 32;
844 + mutex_unlock(&rdev->stats.lock);
845 + }
846 ++ kfree(tpt);
847 + return err;
848 + }
849 +
850 +diff --git a/drivers/input/misc/da9063_onkey.c b/drivers/input/misc/da9063_onkey.c
851 +index 3e9c353d82ef..a01b25facf46 100644
852 +--- a/drivers/input/misc/da9063_onkey.c
853 ++++ b/drivers/input/misc/da9063_onkey.c
854 +@@ -248,10 +248,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
855 + onkey->input->phys = onkey->phys;
856 + onkey->input->dev.parent = &pdev->dev;
857 +
858 +- if (onkey->key_power)
859 +- input_set_capability(onkey->input, EV_KEY, KEY_POWER);
860 +-
861 +- input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
862 ++ input_set_capability(onkey->input, EV_KEY, KEY_POWER);
863 +
864 + INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
865 +
866 +diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
867 +index 7fb358f96195..162526a0d463 100644
868 +--- a/drivers/input/rmi4/rmi_driver.c
869 ++++ b/drivers/input/rmi4/rmi_driver.c
870 +@@ -149,7 +149,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
871 + }
872 +
873 + mutex_lock(&data->irq_mutex);
874 +- bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
875 ++ bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
876 + data->irq_count);
877 + /*
878 + * At this point, irq_status has all bits that are set in the
879 +@@ -388,6 +388,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
880 + bitmap_copy(data->current_irq_mask, data->new_irq_mask,
881 + data->num_of_irq_regs);
882 +
883 ++ bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
884 ++
885 + error_unlock:
886 + mutex_unlock(&data->irq_mutex);
887 + return error;
888 +@@ -401,6 +403,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
889 + struct device *dev = &rmi_dev->dev;
890 +
891 + mutex_lock(&data->irq_mutex);
892 ++ bitmap_andnot(data->fn_irq_bits,
893 ++ data->fn_irq_bits, mask, data->irq_count);
894 + bitmap_andnot(data->new_irq_mask,
895 + data->current_irq_mask, mask, data->irq_count);
896 +
897 +diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
898 +index b29a8327eed1..84ff70027c25 100644
899 +--- a/drivers/md/dm-cache-target.c
900 ++++ b/drivers/md/dm-cache-target.c
901 +@@ -541,7 +541,7 @@ static void wake_migration_worker(struct cache *cache)
902 +
903 + static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
904 + {
905 +- return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
906 ++ return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
907 + }
908 +
909 + static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
910 +@@ -553,9 +553,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
911 + {
912 + struct dm_cache_migration *mg;
913 +
914 +- mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
915 +- if (!mg)
916 +- return NULL;
917 ++ mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
918 +
919 + memset(mg, 0, sizeof(*mg));
920 +
921 +@@ -663,10 +661,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
922 + struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
923 +
924 + cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
925 +- if (!cell_prealloc) {
926 +- defer_bio(cache, bio);
927 +- return false;
928 +- }
929 +
930 + build_key(oblock, end, &key);
931 + r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
932 +@@ -1492,11 +1486,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
933 + struct dm_bio_prison_cell_v2 *prealloc;
934 +
935 + prealloc = alloc_prison_cell(cache);
936 +- if (!prealloc) {
937 +- DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
938 +- mg_complete(mg, false);
939 +- return -ENOMEM;
940 +- }
941 +
942 + /*
943 + * Prevent writes to the block, but allow reads to continue.
944 +@@ -1534,11 +1523,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
945 + }
946 +
947 + mg = alloc_migration(cache);
948 +- if (!mg) {
949 +- policy_complete_background_work(cache->policy, op, false);
950 +- background_work_end(cache);
951 +- return -ENOMEM;
952 +- }
953 +
954 + mg->op = op;
955 + mg->overwrite_bio = bio;
956 +@@ -1627,10 +1611,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
957 + struct dm_bio_prison_cell_v2 *prealloc;
958 +
959 + prealloc = alloc_prison_cell(cache);
960 +- if (!prealloc) {
961 +- invalidate_complete(mg, false);
962 +- return -ENOMEM;
963 +- }
964 +
965 + build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
966 + r = dm_cell_lock_v2(cache->prison, &key,
967 +@@ -1668,10 +1648,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
968 + return -EPERM;
969 +
970 + mg = alloc_migration(cache);
971 +- if (!mg) {
972 +- background_work_end(cache);
973 +- return -ENOMEM;
974 +- }
975 +
976 + mg->overwrite_bio = bio;
977 + mg->invalidate_cblock = cblock;
978 +diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
979 +index 43fa7dbf844b..3cafbfd655f5 100644
980 +--- a/drivers/md/raid0.c
981 ++++ b/drivers/md/raid0.c
982 +@@ -158,7 +158,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
983 + } else {
984 + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
985 + mdname(mddev));
986 +- pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
987 ++ pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
988 + err = -ENOTSUPP;
989 + goto abort;
990 + }
991 +diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
992 +index bcdca9fbef51..29f5021d21ea 100644
993 +--- a/drivers/memstick/host/jmb38x_ms.c
994 ++++ b/drivers/memstick/host/jmb38x_ms.c
995 +@@ -949,7 +949,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
996 + if (!cnt) {
997 + rc = -ENODEV;
998 + pci_dev_busy = 1;
999 +- goto err_out;
1000 ++ goto err_out_int;
1001 + }
1002 +
1003 + jm = kzalloc(sizeof(struct jmb38x_ms)
1004 +diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
1005 +index a8af682a9182..28f5aaca505a 100644
1006 +--- a/drivers/mmc/host/cqhci.c
1007 ++++ b/drivers/mmc/host/cqhci.c
1008 +@@ -617,7 +617,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1009 + cq_host->slot[tag].flags = 0;
1010 +
1011 + cq_host->qcnt += 1;
1012 +-
1013 ++ /* Make sure descriptors are ready before ringing the doorbell */
1014 ++ wmb();
1015 + cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
1016 + if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
1017 + pr_debug("%s: cqhci: doorbell not set for tag %d\n",
1018 +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
1019 +index bdd8f2df6630..33232cc9fb04 100644
1020 +--- a/drivers/net/dsa/qca8k.c
1021 ++++ b/drivers/net/dsa/qca8k.c
1022 +@@ -543,7 +543,7 @@ qca8k_setup(struct dsa_switch *ds)
1023 + BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
1024 +
1025 + /* Setup connection between CPU port & user ports */
1026 +- for (i = 0; i < DSA_MAX_PORTS; i++) {
1027 ++ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
1028 + /* CPU port gets connected to all user ports of the switch */
1029 + if (dsa_is_cpu_port(ds, i)) {
1030 + qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
1031 +@@ -897,7 +897,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
1032 + if (id != QCA8K_ID_QCA8337)
1033 + return -ENODEV;
1034 +
1035 +- priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
1036 ++ priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
1037 + if (!priv->ds)
1038 + return -ENOMEM;
1039 +
1040 +diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
1041 +index a4d5049df692..f4b14b6acd22 100644
1042 +--- a/drivers/net/dsa/rtl8366rb.c
1043 ++++ b/drivers/net/dsa/rtl8366rb.c
1044 +@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
1045 + irq = of_irq_get(intc, 0);
1046 + if (irq <= 0) {
1047 + dev_err(smi->dev, "failed to get parent IRQ\n");
1048 +- return irq ? irq : -EINVAL;
1049 ++ ret = irq ? irq : -EINVAL;
1050 ++ goto out_put_node;
1051 + }
1052 +
1053 + /* This clears the IRQ status register */
1054 +@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
1055 + &val);
1056 + if (ret) {
1057 + dev_err(smi->dev, "can't read interrupt status\n");
1058 +- return ret;
1059 ++ goto out_put_node;
1060 + }
1061 +
1062 + /* Fetch IRQ edge information from the descriptor */
1063 +@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
1064 + val);
1065 + if (ret) {
1066 + dev_err(smi->dev, "could not configure IRQ polarity\n");
1067 +- return ret;
1068 ++ goto out_put_node;
1069 + }
1070 +
1071 + ret = devm_request_threaded_irq(smi->dev, irq, NULL,
1072 +@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
1073 + "RTL8366RB", smi);
1074 + if (ret) {
1075 + dev_err(smi->dev, "unable to request irq: %d\n", ret);
1076 +- return ret;
1077 ++ goto out_put_node;
1078 + }
1079 + smi->irqdomain = irq_domain_add_linear(intc,
1080 + RTL8366RB_NUM_INTERRUPT,
1081 +@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
1082 + smi);
1083 + if (!smi->irqdomain) {
1084 + dev_err(smi->dev, "failed to create IRQ domain\n");
1085 +- return -EINVAL;
1086 ++ ret = -EINVAL;
1087 ++ goto out_put_node;
1088 + }
1089 + for (i = 0; i < smi->num_ports; i++)
1090 + irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
1091 +
1092 +- return 0;
1093 ++out_put_node:
1094 ++ of_node_put(intc);
1095 ++ return ret;
1096 + }
1097 +
1098 + static int rtl8366rb_set_addr(struct realtek_smi *smi)
1099 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1100 +index 14b49612aa86..4dabf37319c8 100644
1101 +--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1102 ++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1103 +@@ -369,6 +369,7 @@ struct bcmgenet_mib_counters {
1104 + #define EXT_PWR_DOWN_PHY_EN (1 << 20)
1105 +
1106 + #define EXT_RGMII_OOB_CTRL 0x0C
1107 ++#define RGMII_MODE_EN_V123 (1 << 0)
1108 + #define RGMII_LINK (1 << 4)
1109 + #define OOB_DISABLE (1 << 5)
1110 + #define RGMII_MODE_EN (1 << 6)
1111 +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1112 +index de0e24d912fe..0d527fa5de61 100644
1113 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
1114 ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1115 +@@ -261,7 +261,11 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
1116 + */
1117 + if (priv->ext_phy) {
1118 + reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
1119 +- reg |= RGMII_MODE_EN | id_mode_dis;
1120 ++ reg |= id_mode_dis;
1121 ++ if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
1122 ++ reg |= RGMII_MODE_EN_V123;
1123 ++ else
1124 ++ reg |= RGMII_MODE_EN;
1125 + bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
1126 + }
1127 +
1128 +@@ -276,11 +280,12 @@ int bcmgenet_mii_probe(struct net_device *dev)
1129 + struct bcmgenet_priv *priv = netdev_priv(dev);
1130 + struct device_node *dn = priv->pdev->dev.of_node;
1131 + struct phy_device *phydev;
1132 +- u32 phy_flags;
1133 ++ u32 phy_flags = 0;
1134 + int ret;
1135 +
1136 + /* Communicate the integrated PHY revision */
1137 +- phy_flags = priv->gphy_rev;
1138 ++ if (priv->internal_phy)
1139 ++ phy_flags = priv->gphy_rev;
1140 +
1141 + /* Initialize link state variables that bcmgenet_mii_setup() uses */
1142 + priv->old_link = -1;
1143 +diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
1144 +index baf5cc251f32..9a3bc0994a1d 100644
1145 +--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
1146 ++++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
1147 +@@ -156,11 +156,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
1148 + {
1149 + u32 time_cnt;
1150 + u32 reg_value;
1151 ++ int ret;
1152 +
1153 + regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
1154 +
1155 + for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
1156 +- regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
1157 ++ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
1158 ++ if (ret)
1159 ++ return ret;
1160 ++
1161 + reg_value &= st_msk;
1162 + if ((!!check_st) == (!!reg_value))
1163 + break;
1164 +diff --git a/drivers/net/ethernet/i825xx/lasi_82596.c b/drivers/net/ethernet/i825xx/lasi_82596.c
1165 +index b69c622ba8b2..6f0e4019adef 100644
1166 +--- a/drivers/net/ethernet/i825xx/lasi_82596.c
1167 ++++ b/drivers/net/ethernet/i825xx/lasi_82596.c
1168 +@@ -96,6 +96,8 @@
1169 +
1170 + #define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
1171 +
1172 ++#define LIB82596_DMA_ATTR DMA_ATTR_NON_CONSISTENT
1173 ++
1174 + #define DMA_WBACK(ndev, addr, len) \
1175 + do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
1176 +
1177 +@@ -199,7 +201,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
1178 +
1179 + unregister_netdev (dev);
1180 + dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
1181 +- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
1182 ++ lp->dma_addr, LIB82596_DMA_ATTR);
1183 + free_netdev (dev);
1184 + return 0;
1185 + }
1186 +diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
1187 +index 2f7ae118217f..d0e8193ca470 100644
1188 +--- a/drivers/net/ethernet/i825xx/lib82596.c
1189 ++++ b/drivers/net/ethernet/i825xx/lib82596.c
1190 +@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
1191 +
1192 + dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
1193 + &lp->dma_addr, GFP_KERNEL,
1194 +- DMA_ATTR_NON_CONSISTENT);
1195 ++ LIB82596_DMA_ATTR);
1196 + if (!dma) {
1197 + printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1198 + return -ENOMEM;
1199 +@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
1200 + i = register_netdev(dev);
1201 + if (i) {
1202 + dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
1203 +- dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
1204 ++ dma, lp->dma_addr, LIB82596_DMA_ATTR);
1205 + return i;
1206 + }
1207 +
1208 +diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
1209 +index b2c04a789744..43c1fd18670b 100644
1210 +--- a/drivers/net/ethernet/i825xx/sni_82596.c
1211 ++++ b/drivers/net/ethernet/i825xx/sni_82596.c
1212 +@@ -23,6 +23,8 @@
1213 +
1214 + static const char sni_82596_string[] = "snirm_82596";
1215 +
1216 ++#define LIB82596_DMA_ATTR 0
1217 ++
1218 + #define DMA_WBACK(priv, addr, len) do { } while (0)
1219 + #define DMA_INV(priv, addr, len) do { } while (0)
1220 + #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
1221 +@@ -151,7 +153,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
1222 +
1223 + unregister_netdev(dev);
1224 + dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
1225 +- lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
1226 ++ lp->dma_addr, LIB82596_DMA_ATTR);
1227 + iounmap(lp->ca);
1228 + iounmap(lp->mpu_port);
1229 + free_netdev (dev);
1230 +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1231 +index aa067a7a72d4..8fa14736449b 100644
1232 +--- a/drivers/net/ethernet/ibm/ibmvnic.c
1233 ++++ b/drivers/net/ethernet/ibm/ibmvnic.c
1234 +@@ -2731,12 +2731,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1235 +
1236 + if (adapter->resetting &&
1237 + adapter->reset_reason == VNIC_RESET_MOBILITY) {
1238 +- u64 val = (0xff000000) | scrq->hw_irq;
1239 ++ struct irq_desc *desc = irq_to_desc(scrq->irq);
1240 ++ struct irq_chip *chip = irq_desc_get_chip(desc);
1241 +
1242 +- rc = plpar_hcall_norets(H_EOI, val);
1243 +- if (rc)
1244 +- dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
1245 +- val, rc);
1246 ++ chip->irq_eoi(&desc->irq_data);
1247 + }
1248 +
1249 + rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1250 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1251 +index 0101ebaecf02..014fe93ed2d8 100644
1252 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1253 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
1254 +@@ -4522,8 +4522,10 @@ int stmmac_suspend(struct device *dev)
1255 + stmmac_mac_set(priv, priv->ioaddr, false);
1256 + pinctrl_pm_select_sleep_state(priv->device);
1257 + /* Disable clock in case of PWM is off */
1258 +- clk_disable(priv->plat->pclk);
1259 +- clk_disable(priv->plat->stmmac_clk);
1260 ++ if (priv->plat->clk_ptp_ref)
1261 ++ clk_disable_unprepare(priv->plat->clk_ptp_ref);
1262 ++ clk_disable_unprepare(priv->plat->pclk);
1263 ++ clk_disable_unprepare(priv->plat->stmmac_clk);
1264 + }
1265 + mutex_unlock(&priv->lock);
1266 +
1267 +@@ -4588,8 +4590,10 @@ int stmmac_resume(struct device *dev)
1268 + } else {
1269 + pinctrl_pm_select_default_state(priv->device);
1270 + /* enable the clk previously disabled */
1271 +- clk_enable(priv->plat->stmmac_clk);
1272 +- clk_enable(priv->plat->pclk);
1273 ++ clk_prepare_enable(priv->plat->stmmac_clk);
1274 ++ clk_prepare_enable(priv->plat->pclk);
1275 ++ if (priv->plat->clk_ptp_ref)
1276 ++ clk_prepare_enable(priv->plat->clk_ptp_ref);
1277 + /* reset the phy so that it's ready */
1278 + if (priv->mii)
1279 + stmmac_mdio_reset(priv->mii);
1280 +diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
1281 +index b2ff903a9cb6..38a41651e451 100644
1282 +--- a/drivers/net/ieee802154/ca8210.c
1283 ++++ b/drivers/net/ieee802154/ca8210.c
1284 +@@ -3151,12 +3151,12 @@ static int ca8210_probe(struct spi_device *spi_device)
1285 + goto error;
1286 + }
1287 +
1288 ++ priv->spi->dev.platform_data = pdata;
1289 + ret = ca8210_get_platform_data(priv->spi, pdata);
1290 + if (ret) {
1291 + dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
1292 + goto error;
1293 + }
1294 +- priv->spi->dev.platform_data = pdata;
1295 +
1296 + ret = ca8210_dev_com_init(priv);
1297 + if (ret) {
1298 +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1299 +index a065a6184f7e..a291e5f2daef 100644
1300 +--- a/drivers/net/usb/r8152.c
1301 ++++ b/drivers/net/usb/r8152.c
1302 +@@ -4474,10 +4474,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
1303 + struct r8152 *tp = usb_get_intfdata(intf);
1304 +
1305 + clear_bit(SELECTIVE_SUSPEND, &tp->flags);
1306 +- mutex_lock(&tp->control);
1307 + tp->rtl_ops.init(tp);
1308 + queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
1309 +- mutex_unlock(&tp->control);
1310 ++ set_ethernet_addr(tp);
1311 + return rtl8152_resume(intf);
1312 + }
1313 +
1314 +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
1315 +index 82add0ac4a5f..27b6b141cb71 100644
1316 +--- a/drivers/net/xen-netback/interface.c
1317 ++++ b/drivers/net/xen-netback/interface.c
1318 +@@ -718,7 +718,6 @@ err_unmap:
1319 + xenvif_unmap_frontend_data_rings(queue);
1320 + netif_napi_del(&queue->napi);
1321 + err:
1322 +- module_put(THIS_MODULE);
1323 + return err;
1324 + }
1325 +
1326 +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1327 +index ae0b01059fc6..5d0f99bcc987 100644
1328 +--- a/drivers/nvme/host/core.c
1329 ++++ b/drivers/nvme/host/core.c
1330 +@@ -111,10 +111,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
1331 + */
1332 + if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
1333 + return;
1334 +- revalidate_disk(ns->disk);
1335 + blk_set_queue_dying(ns->queue);
1336 + /* Forcibly unquiesce queues to avoid blocking dispatch */
1337 + blk_mq_unquiesce_queue(ns->queue);
1338 ++ /*
1339 ++ * Revalidate after unblocking dispatchers that may be holding bd_butex
1340 ++ */
1341 ++ revalidate_disk(ns->disk);
1342 + }
1343 +
1344 + static void nvme_queue_scan(struct nvme_ctrl *ctrl)
1345 +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1346 +index 6384930a6749..2baf1f82f893 100644
1347 +--- a/drivers/pci/pci.c
1348 ++++ b/drivers/pci/pci.c
1349 +@@ -925,19 +925,6 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1350 + }
1351 + }
1352 +
1353 +-/**
1354 +- * pci_power_up - Put the given device into D0 forcibly
1355 +- * @dev: PCI device to power up
1356 +- */
1357 +-void pci_power_up(struct pci_dev *dev)
1358 +-{
1359 +- if (platform_pci_power_manageable(dev))
1360 +- platform_pci_set_power_state(dev, PCI_D0);
1361 +-
1362 +- pci_raw_set_power_state(dev, PCI_D0);
1363 +- pci_update_current_state(dev, PCI_D0);
1364 +-}
1365 +-
1366 + /**
1367 + * pci_platform_power_transition - Use platform to change device power state
1368 + * @dev: PCI device to handle.
1369 +@@ -1116,6 +1103,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1370 + }
1371 + EXPORT_SYMBOL(pci_set_power_state);
1372 +
1373 ++/**
1374 ++ * pci_power_up - Put the given device into D0 forcibly
1375 ++ * @dev: PCI device to power up
1376 ++ */
1377 ++void pci_power_up(struct pci_dev *dev)
1378 ++{
1379 ++ __pci_start_power_transition(dev, PCI_D0);
1380 ++ pci_raw_set_power_state(dev, PCI_D0);
1381 ++ pci_update_current_state(dev, PCI_D0);
1382 ++}
1383 ++
1384 + /**
1385 + * pci_choose_state - Choose the power state of a PCI device
1386 + * @dev: PCI device to be suspended
1387 +diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
1388 +index b7e272d6ae81..227646eb817c 100644
1389 +--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
1390 ++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
1391 +@@ -1524,7 +1524,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1392 + .matches = {
1393 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1394 + DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
1395 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1396 + },
1397 + },
1398 + {
1399 +@@ -1532,7 +1531,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1400 + .matches = {
1401 + DMI_MATCH(DMI_SYS_VENDOR, "HP"),
1402 + DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
1403 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1404 + },
1405 + },
1406 + {
1407 +@@ -1540,7 +1538,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1408 + .matches = {
1409 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1410 + DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
1411 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1412 + },
1413 + },
1414 + {
1415 +@@ -1548,7 +1545,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
1416 + .matches = {
1417 + DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
1418 + DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
1419 +- DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
1420 + },
1421 + },
1422 + {}
1423 +diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
1424 +index aa48b3f23c7f..3aac640596ad 100644
1425 +--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
1426 ++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
1427 +@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
1428 + PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
1429 + BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
1430 + 18, 2, "gpio", "uart"),
1431 +- PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
1432 +- PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
1433 +- PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
1434 +- PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
1435 ++ PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
1436 ++ PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
1437 ++ PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
1438 ++ PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
1439 +
1440 + };
1441 +
1442 +@@ -218,11 +218,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
1443 + };
1444 +
1445 + static inline void armada_37xx_update_reg(unsigned int *reg,
1446 +- unsigned int offset)
1447 ++ unsigned int *offset)
1448 + {
1449 + /* We never have more than 2 registers */
1450 +- if (offset >= GPIO_PER_REG) {
1451 +- offset -= GPIO_PER_REG;
1452 ++ if (*offset >= GPIO_PER_REG) {
1453 ++ *offset -= GPIO_PER_REG;
1454 + *reg += sizeof(u32);
1455 + }
1456 + }
1457 +@@ -373,7 +373,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
1458 + {
1459 + int offset = irqd_to_hwirq(d);
1460 +
1461 +- armada_37xx_update_reg(reg, offset);
1462 ++ armada_37xx_update_reg(reg, &offset);
1463 + }
1464 +
1465 + static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
1466 +@@ -383,7 +383,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
1467 + unsigned int reg = OUTPUT_EN;
1468 + unsigned int mask;
1469 +
1470 +- armada_37xx_update_reg(&reg, offset);
1471 ++ armada_37xx_update_reg(&reg, &offset);
1472 + mask = BIT(offset);
1473 +
1474 + return regmap_update_bits(info->regmap, reg, mask, 0);
1475 +@@ -396,7 +396,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
1476 + unsigned int reg = OUTPUT_EN;
1477 + unsigned int val, mask;
1478 +
1479 +- armada_37xx_update_reg(&reg, offset);
1480 ++ armada_37xx_update_reg(&reg, &offset);
1481 + mask = BIT(offset);
1482 + regmap_read(info->regmap, reg, &val);
1483 +
1484 +@@ -410,7 +410,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
1485 + unsigned int reg = OUTPUT_EN;
1486 + unsigned int mask, val, ret;
1487 +
1488 +- armada_37xx_update_reg(&reg, offset);
1489 ++ armada_37xx_update_reg(&reg, &offset);
1490 + mask = BIT(offset);
1491 +
1492 + ret = regmap_update_bits(info->regmap, reg, mask, mask);
1493 +@@ -431,7 +431,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
1494 + unsigned int reg = INPUT_VAL;
1495 + unsigned int val, mask;
1496 +
1497 +- armada_37xx_update_reg(&reg, offset);
1498 ++ armada_37xx_update_reg(&reg, &offset);
1499 + mask = BIT(offset);
1500 +
1501 + regmap_read(info->regmap, reg, &val);
1502 +@@ -446,7 +446,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
1503 + unsigned int reg = OUTPUT_VAL;
1504 + unsigned int mask, val;
1505 +
1506 +- armada_37xx_update_reg(&reg, offset);
1507 ++ armada_37xx_update_reg(&reg, &offset);
1508 + mask = BIT(offset);
1509 + val = value ? mask : 0;
1510 +
1511 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1512 +index aff073a5b52b..df888506e363 100644
1513 +--- a/drivers/s390/scsi/zfcp_fsf.c
1514 ++++ b/drivers/s390/scsi/zfcp_fsf.c
1515 +@@ -21,6 +21,11 @@
1516 +
1517 + struct kmem_cache *zfcp_fsf_qtcb_cache;
1518 +
1519 ++static bool ber_stop = true;
1520 ++module_param(ber_stop, bool, 0600);
1521 ++MODULE_PARM_DESC(ber_stop,
1522 ++ "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
1523 ++
1524 + static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
1525 + {
1526 + struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
1527 +@@ -230,10 +235,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
1528 + case FSF_STATUS_READ_SENSE_DATA_AVAIL:
1529 + break;
1530 + case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
1531 +- dev_warn(&adapter->ccw_device->dev,
1532 +- "The error threshold for checksum statistics "
1533 +- "has been exceeded\n");
1534 + zfcp_dbf_hba_bit_err("fssrh_3", req);
1535 ++ if (ber_stop) {
1536 ++ dev_warn(&adapter->ccw_device->dev,
1537 ++ "All paths over this FCP device are disused because of excessive bit errors\n");
1538 ++ zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
1539 ++ } else {
1540 ++ dev_warn(&adapter->ccw_device->dev,
1541 ++ "The error threshold for checksum statistics has been exceeded\n");
1542 ++ }
1543 + break;
1544 + case FSF_STATUS_READ_LINK_DOWN:
1545 + zfcp_fsf_status_read_link_down(req);
1546 +diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
1547 +index 1c5051b1c125..9e287927b7f9 100644
1548 +--- a/drivers/scsi/ch.c
1549 ++++ b/drivers/scsi/ch.c
1550 +@@ -578,7 +578,6 @@ ch_release(struct inode *inode, struct file *file)
1551 + scsi_changer *ch = file->private_data;
1552 +
1553 + scsi_device_put(ch->device);
1554 +- ch->device = NULL;
1555 + file->private_data = NULL;
1556 + kref_put(&ch->ref, ch_destroy);
1557 + return 0;
1558 +diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
1559 +index 8c7154143a4e..a84878fbf45d 100644
1560 +--- a/drivers/scsi/megaraid.c
1561 ++++ b/drivers/scsi/megaraid.c
1562 +@@ -4189,11 +4189,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1563 + */
1564 + if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
1565 + pdev->subsystem_device == 0xC000)
1566 +- return -ENODEV;
1567 ++ goto out_disable_device;
1568 + /* Now check the magic signature byte */
1569 + pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
1570 + if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
1571 +- return -ENODEV;
1572 ++ goto out_disable_device;
1573 + /* Ok it is probably a megaraid */
1574 + }
1575 +
1576 +diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
1577 +index 7a1cc0b25e59..d6dc320f81a7 100644
1578 +--- a/drivers/scsi/qla2xxx/qla_target.c
1579 ++++ b/drivers/scsi/qla2xxx/qla_target.c
1580 +@@ -1023,6 +1023,7 @@ void qlt_free_session_done(struct work_struct *work)
1581 +
1582 + if (logout_started) {
1583 + bool traced = false;
1584 ++ u16 cnt = 0;
1585 +
1586 + while (!READ_ONCE(sess->logout_completed)) {
1587 + if (!traced) {
1588 +@@ -1032,6 +1033,9 @@ void qlt_free_session_done(struct work_struct *work)
1589 + traced = true;
1590 + }
1591 + msleep(100);
1592 ++ cnt++;
1593 ++ if (cnt > 200)
1594 ++ break;
1595 + }
1596 +
1597 + ql_dbg(ql_dbg_disc, vha, 0xf087,
1598 +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
1599 +index b7a8fdfeb2f4..e731af504f07 100644
1600 +--- a/drivers/scsi/scsi_error.c
1601 ++++ b/drivers/scsi/scsi_error.c
1602 +@@ -970,6 +970,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
1603 + ses->sdb = scmd->sdb;
1604 + ses->next_rq = scmd->request->next_rq;
1605 + ses->result = scmd->result;
1606 ++ ses->resid_len = scmd->req.resid_len;
1607 + ses->underflow = scmd->underflow;
1608 + ses->prot_op = scmd->prot_op;
1609 + ses->eh_eflags = scmd->eh_eflags;
1610 +@@ -981,6 +982,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
1611 + memset(&scmd->sdb, 0, sizeof(scmd->sdb));
1612 + scmd->request->next_rq = NULL;
1613 + scmd->result = 0;
1614 ++ scmd->req.resid_len = 0;
1615 +
1616 + if (sense_bytes) {
1617 + scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
1618 +@@ -1034,6 +1036,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
1619 + scmd->sdb = ses->sdb;
1620 + scmd->request->next_rq = ses->next_rq;
1621 + scmd->result = ses->result;
1622 ++ scmd->req.resid_len = ses->resid_len;
1623 + scmd->underflow = ses->underflow;
1624 + scmd->prot_op = ses->prot_op;
1625 + scmd->eh_eflags = ses->eh_eflags;
1626 +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1627 +index 3aee9464a7bf..186f779fa60c 100644
1628 +--- a/drivers/scsi/scsi_sysfs.c
1629 ++++ b/drivers/scsi/scsi_sysfs.c
1630 +@@ -723,6 +723,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
1631 + const char *buf, size_t count)
1632 + {
1633 + struct kernfs_node *kn;
1634 ++ struct scsi_device *sdev = to_scsi_device(dev);
1635 ++
1636 ++ /*
1637 ++ * We need to try to get module, avoiding the module been removed
1638 ++ * during delete.
1639 ++ */
1640 ++ if (scsi_device_get(sdev))
1641 ++ return -ENODEV;
1642 +
1643 + kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
1644 + WARN_ON_ONCE(!kn);
1645 +@@ -737,9 +745,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
1646 + * state into SDEV_DEL.
1647 + */
1648 + device_remove_file(dev, attr);
1649 +- scsi_remove_device(to_scsi_device(dev));
1650 ++ scsi_remove_device(sdev);
1651 + if (kn)
1652 + sysfs_unbreak_active_protection(kn);
1653 ++ scsi_device_put(sdev);
1654 + return count;
1655 + };
1656 + static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
1657 +diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1658 +index 77cb45ef55fc..f8a09e6678d4 100644
1659 +--- a/drivers/scsi/sd.c
1660 ++++ b/drivers/scsi/sd.c
1661 +@@ -1646,7 +1646,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
1662 + /* we need to evaluate the error return */
1663 + if (scsi_sense_valid(sshdr) &&
1664 + (sshdr->asc == 0x3a || /* medium not present */
1665 +- sshdr->asc == 0x20)) /* invalid command */
1666 ++ sshdr->asc == 0x20 || /* invalid command */
1667 ++ (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */
1668 + /* this is no error here */
1669 + return 0;
1670 +
1671 +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1672 +index b8b59cfeacd1..4aaba3e03055 100644
1673 +--- a/drivers/scsi/ufs/ufshcd.c
1674 ++++ b/drivers/scsi/ufs/ufshcd.c
1675 +@@ -7874,6 +7874,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
1676 + {
1677 + int ret = 0;
1678 +
1679 ++ if (!hba->is_powered)
1680 ++ goto out;
1681 ++
1682 + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
1683 + goto out;
1684 +
1685 +diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
1686 +index d4cf09b11e33..095df245ced5 100644
1687 +--- a/drivers/staging/wlan-ng/cfg80211.c
1688 ++++ b/drivers/staging/wlan-ng/cfg80211.c
1689 +@@ -476,10 +476,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
1690 + /* Set the encryption - we only support wep */
1691 + if (is_wep) {
1692 + if (sme->key) {
1693 +- if (sme->key_idx >= NUM_WEPKEYS) {
1694 +- err = -EINVAL;
1695 +- goto exit;
1696 +- }
1697 ++ if (sme->key_idx >= NUM_WEPKEYS)
1698 ++ return -EINVAL;
1699 +
1700 + result = prism2_domibset_uint32(wlandev,
1701 + DIDmib_dot11smt_dot11PrivacyTable_dot11WEPDefaultKeyID,
1702 +diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
1703 +index 502e9bf1746f..4a80103675d5 100644
1704 +--- a/drivers/usb/class/usblp.c
1705 ++++ b/drivers/usb/class/usblp.c
1706 +@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
1707 + kfree(usblp->readbuf);
1708 + kfree(usblp->device_id_string);
1709 + kfree(usblp->statusbuf);
1710 ++ usb_put_intf(usblp->intf);
1711 + kfree(usblp);
1712 + }
1713 +
1714 +@@ -1107,7 +1108,7 @@ static int usblp_probe(struct usb_interface *intf,
1715 + init_waitqueue_head(&usblp->wwait);
1716 + init_usb_anchor(&usblp->urbs);
1717 + usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
1718 +- usblp->intf = intf;
1719 ++ usblp->intf = usb_get_intf(intf);
1720 +
1721 + /* Malloc device ID string buffer to the largest expected length,
1722 + * since we can re-query it on an ioctl and a dynamic string
1723 +@@ -1196,6 +1197,7 @@ abort:
1724 + kfree(usblp->readbuf);
1725 + kfree(usblp->statusbuf);
1726 + kfree(usblp->device_id_string);
1727 ++ usb_put_intf(usblp->intf);
1728 + kfree(usblp);
1729 + abort_ret:
1730 + return retval;
1731 +diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c
1732 +index eafc2a00c96a..21921db068f6 100644
1733 +--- a/drivers/usb/gadget/udc/lpc32xx_udc.c
1734 ++++ b/drivers/usb/gadget/udc/lpc32xx_udc.c
1735 +@@ -1165,11 +1165,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
1736 + tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
1737 +
1738 + bl = bytes - n;
1739 +- if (bl > 3)
1740 +- bl = 3;
1741 ++ if (bl > 4)
1742 ++ bl = 4;
1743 +
1744 + for (i = 0; i < bl; i++)
1745 +- data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
1746 ++ data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
1747 + }
1748 + break;
1749 +
1750 +diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
1751 +index b9cbcf35d4e1..6b3a6fd7d271 100644
1752 +--- a/drivers/usb/misc/ldusb.c
1753 ++++ b/drivers/usb/misc/ldusb.c
1754 +@@ -380,10 +380,7 @@ static int ld_usb_release(struct inode *inode, struct file *file)
1755 + goto exit;
1756 + }
1757 +
1758 +- if (mutex_lock_interruptible(&dev->mutex)) {
1759 +- retval = -ERESTARTSYS;
1760 +- goto exit;
1761 +- }
1762 ++ mutex_lock(&dev->mutex);
1763 +
1764 + if (dev->open_count != 1) {
1765 + retval = -ENODEV;
1766 +@@ -467,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
1767 +
1768 + /* wait for data */
1769 + spin_lock_irq(&dev->rbsl);
1770 +- if (dev->ring_head == dev->ring_tail) {
1771 ++ while (dev->ring_head == dev->ring_tail) {
1772 + dev->interrupt_in_done = 0;
1773 + spin_unlock_irq(&dev->rbsl);
1774 + if (file->f_flags & O_NONBLOCK) {
1775 +@@ -477,12 +474,17 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
1776 + retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
1777 + if (retval < 0)
1778 + goto unlock_exit;
1779 +- } else {
1780 +- spin_unlock_irq(&dev->rbsl);
1781 ++
1782 ++ spin_lock_irq(&dev->rbsl);
1783 + }
1784 ++ spin_unlock_irq(&dev->rbsl);
1785 +
1786 + /* actual_buffer contains actual_length + interrupt_in_buffer */
1787 + actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
1788 ++ if (*actual_buffer > dev->interrupt_in_endpoint_size) {
1789 ++ retval = -EIO;
1790 ++ goto unlock_exit;
1791 ++ }
1792 + bytes_to_read = min(count, *actual_buffer);
1793 + if (bytes_to_read < *actual_buffer)
1794 + dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
1795 +@@ -693,10 +695,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
1796 + dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
1797 +
1798 + dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
1799 +- dev->ring_buffer =
1800 +- kmalloc_array(ring_buffer_size,
1801 +- sizeof(size_t) + dev->interrupt_in_endpoint_size,
1802 +- GFP_KERNEL);
1803 ++ dev->ring_buffer = kcalloc(ring_buffer_size,
1804 ++ sizeof(size_t) + dev->interrupt_in_endpoint_size,
1805 ++ GFP_KERNEL);
1806 + if (!dev->ring_buffer)
1807 + goto error;
1808 + dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
1809 +diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
1810 +index 9d4c52a7ebe0..62dab2441ec4 100644
1811 +--- a/drivers/usb/misc/legousbtower.c
1812 ++++ b/drivers/usb/misc/legousbtower.c
1813 +@@ -419,10 +419,7 @@ static int tower_release (struct inode *inode, struct file *file)
1814 + goto exit;
1815 + }
1816 +
1817 +- if (mutex_lock_interruptible(&dev->lock)) {
1818 +- retval = -ERESTARTSYS;
1819 +- goto exit;
1820 +- }
1821 ++ mutex_lock(&dev->lock);
1822 +
1823 + if (dev->open_count != 1) {
1824 + dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
1825 +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
1826 +index e3c5832337e0..c9201e0a8241 100644
1827 +--- a/drivers/usb/serial/ti_usb_3410_5052.c
1828 ++++ b/drivers/usb/serial/ti_usb_3410_5052.c
1829 +@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
1830 + struct ti_port *tport;
1831 + int port_number;
1832 + int status;
1833 +- int do_unlock;
1834 + unsigned long flags;
1835 +
1836 + tdev = usb_get_serial_data(port->serial);
1837 +@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
1838 + "%s - cannot send close port command, %d\n"
1839 + , __func__, status);
1840 +
1841 +- /* if mutex_lock is interrupted, continue anyway */
1842 +- do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
1843 ++ mutex_lock(&tdev->td_open_close_lock);
1844 + --tport->tp_tdev->td_open_port_count;
1845 +- if (tport->tp_tdev->td_open_port_count <= 0) {
1846 ++ if (tport->tp_tdev->td_open_port_count == 0) {
1847 + /* last port is closed, shut down interrupt urb */
1848 + usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
1849 +- tport->tp_tdev->td_open_port_count = 0;
1850 + }
1851 +- if (do_unlock)
1852 +- mutex_unlock(&tdev->td_open_close_lock);
1853 ++ mutex_unlock(&tdev->td_open_close_lock);
1854 + }
1855 +
1856 +
1857 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
1858 +index e49e29288049..72c745682996 100644
1859 +--- a/fs/btrfs/extent-tree.c
1860 ++++ b/fs/btrfs/extent-tree.c
1861 +@@ -10000,6 +10000,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
1862 + btrfs_err(info,
1863 + "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
1864 + cache->key.objectid);
1865 ++ btrfs_put_block_group(cache);
1866 + ret = -EINVAL;
1867 + goto error;
1868 + }
1869 +diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
1870 +index c84186563c31..4870440d6424 100644
1871 +--- a/fs/btrfs/file.c
1872 ++++ b/fs/btrfs/file.c
1873 +@@ -2056,25 +2056,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1874 + struct btrfs_trans_handle *trans;
1875 + struct btrfs_log_ctx ctx;
1876 + int ret = 0, err;
1877 +- u64 len;
1878 +
1879 +- /*
1880 +- * If the inode needs a full sync, make sure we use a full range to
1881 +- * avoid log tree corruption, due to hole detection racing with ordered
1882 +- * extent completion for adjacent ranges, and assertion failures during
1883 +- * hole detection.
1884 +- */
1885 +- if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1886 +- &BTRFS_I(inode)->runtime_flags)) {
1887 +- start = 0;
1888 +- end = LLONG_MAX;
1889 +- }
1890 +-
1891 +- /*
1892 +- * The range length can be represented by u64, we have to do the typecasts
1893 +- * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
1894 +- */
1895 +- len = (u64)end - (u64)start + 1;
1896 + trace_btrfs_sync_file(file, datasync);
1897 +
1898 + btrfs_init_log_ctx(&ctx, inode);
1899 +@@ -2100,6 +2082,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1900 +
1901 + atomic_inc(&root->log_batch);
1902 +
1903 ++ /*
1904 ++ * If the inode needs a full sync, make sure we use a full range to
1905 ++ * avoid log tree corruption, due to hole detection racing with ordered
1906 ++ * extent completion for adjacent ranges, and assertion failures during
1907 ++ * hole detection. Do this while holding the inode lock, to avoid races
1908 ++ * with other tasks.
1909 ++ */
1910 ++ if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
1911 ++ &BTRFS_I(inode)->runtime_flags)) {
1912 ++ start = 0;
1913 ++ end = LLONG_MAX;
1914 ++ }
1915 ++
1916 + /*
1917 + * Before we acquired the inode's lock, someone may have dirtied more
1918 + * pages in the target range. We need to make sure that writeback for
1919 +@@ -2127,8 +2122,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1920 + /*
1921 + * We have to do this here to avoid the priority inversion of waiting on
1922 + * IO of a lower priority task while holding a transaciton open.
1923 ++ *
1924 ++ * Also, the range length can be represented by u64, we have to do the
1925 ++ * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
1926 + */
1927 +- ret = btrfs_wait_ordered_range(inode, start, len);
1928 ++ ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
1929 + if (ret) {
1930 + up_write(&BTRFS_I(inode)->dio_sem);
1931 + inode_unlock(inode);
1932 +diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
1933 +index 5d57ed629345..bccd9dede2af 100644
1934 +--- a/fs/btrfs/relocation.c
1935 ++++ b/fs/btrfs/relocation.c
1936 +@@ -3187,6 +3187,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
1937 + if (!page) {
1938 + btrfs_delalloc_release_metadata(BTRFS_I(inode),
1939 + PAGE_SIZE, true);
1940 ++ btrfs_delalloc_release_extents(BTRFS_I(inode),
1941 ++ PAGE_SIZE, true);
1942 + ret = -ENOMEM;
1943 + goto out;
1944 + }
1945 +diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1946 +index 617f86beb08b..b4e33ef2ff31 100644
1947 +--- a/fs/cifs/file.c
1948 ++++ b/fs/cifs/file.c
1949 +@@ -403,10 +403,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
1950 + bool oplock_break_cancelled;
1951 +
1952 + spin_lock(&tcon->open_file_lock);
1953 +-
1954 ++ spin_lock(&cifsi->open_file_lock);
1955 + spin_lock(&cifs_file->file_info_lock);
1956 + if (--cifs_file->count > 0) {
1957 + spin_unlock(&cifs_file->file_info_lock);
1958 ++ spin_unlock(&cifsi->open_file_lock);
1959 + spin_unlock(&tcon->open_file_lock);
1960 + return;
1961 + }
1962 +@@ -419,9 +420,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
1963 + cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
1964 +
1965 + /* remove it from the lists */
1966 +- spin_lock(&cifsi->open_file_lock);
1967 + list_del(&cifs_file->flist);
1968 +- spin_unlock(&cifsi->open_file_lock);
1969 + list_del(&cifs_file->tlist);
1970 +
1971 + if (list_empty(&cifsi->openFileList)) {
1972 +@@ -437,6 +436,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
1973 + cifs_set_oplock_level(cifsi, 0);
1974 + }
1975 +
1976 ++ spin_unlock(&cifsi->open_file_lock);
1977 + spin_unlock(&tcon->open_file_lock);
1978 +
1979 + oplock_break_cancelled = wait_oplock_handler ?
1980 +diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
1981 +index 47db8eb6cbcf..c7f0c8566442 100644
1982 +--- a/fs/cifs/smb1ops.c
1983 ++++ b/fs/cifs/smb1ops.c
1984 +@@ -183,6 +183,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
1985 + /* we do not want to loop forever */
1986 + last_mid = cur_mid;
1987 + cur_mid++;
1988 ++ /* avoid 0xFFFF MID */
1989 ++ if (cur_mid == 0xffff)
1990 ++ cur_mid++;
1991 +
1992 + /*
1993 + * This nested loop looks more expensive than it is.
1994 +diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
1995 +index bd3475694e83..c492cbb2410f 100644
1996 +--- a/fs/ocfs2/journal.c
1997 ++++ b/fs/ocfs2/journal.c
1998 +@@ -231,7 +231,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
1999 + /* At this point, we know that no more recovery threads can be
2000 + * launched, so wait for any recovery completion work to
2001 + * complete. */
2002 +- flush_workqueue(osb->ocfs2_wq);
2003 ++ if (osb->ocfs2_wq)
2004 ++ flush_workqueue(osb->ocfs2_wq);
2005 +
2006 + /*
2007 + * Now that recovery is shut down, and the osb is about to be
2008 +diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
2009 +index 30208233f65b..a46aff7135d3 100644
2010 +--- a/fs/ocfs2/localalloc.c
2011 ++++ b/fs/ocfs2/localalloc.c
2012 +@@ -391,7 +391,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
2013 + struct ocfs2_dinode *alloc = NULL;
2014 +
2015 + cancel_delayed_work(&osb->la_enable_wq);
2016 +- flush_workqueue(osb->ocfs2_wq);
2017 ++ if (osb->ocfs2_wq)
2018 ++ flush_workqueue(osb->ocfs2_wq);
2019 +
2020 + if (osb->local_alloc_state == OCFS2_LA_UNUSED)
2021 + goto out;
2022 +diff --git a/fs/proc/page.c b/fs/proc/page.c
2023 +index 792c78a49174..64293df0faa3 100644
2024 +--- a/fs/proc/page.c
2025 ++++ b/fs/proc/page.c
2026 +@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
2027 + return -EINVAL;
2028 +
2029 + while (count > 0) {
2030 +- if (pfn_valid(pfn))
2031 +- ppage = pfn_to_page(pfn);
2032 +- else
2033 +- ppage = NULL;
2034 ++ /*
2035 ++ * TODO: ZONE_DEVICE support requires to identify
2036 ++ * memmaps that were actually initialized.
2037 ++ */
2038 ++ ppage = pfn_to_online_page(pfn);
2039 ++
2040 + if (!ppage || PageSlab(ppage))
2041 + pcount = 0;
2042 + else
2043 +@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
2044 + return -EINVAL;
2045 +
2046 + while (count > 0) {
2047 +- if (pfn_valid(pfn))
2048 +- ppage = pfn_to_page(pfn);
2049 +- else
2050 +- ppage = NULL;
2051 ++ /*
2052 ++ * TODO: ZONE_DEVICE support requires to identify
2053 ++ * memmaps that were actually initialized.
2054 ++ */
2055 ++ ppage = pfn_to_online_page(pfn);
2056 +
2057 + if (put_user(stable_page_flags(ppage), out)) {
2058 + ret = -EFAULT;
2059 +@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
2060 + return -EINVAL;
2061 +
2062 + while (count > 0) {
2063 +- if (pfn_valid(pfn))
2064 +- ppage = pfn_to_page(pfn);
2065 +- else
2066 +- ppage = NULL;
2067 ++ /*
2068 ++ * TODO: ZONE_DEVICE support requires to identify
2069 ++ * memmaps that were actually initialized.
2070 ++ */
2071 ++ ppage = pfn_to_online_page(pfn);
2072 +
2073 + if (ppage)
2074 + ino = page_cgroup_ino(ppage);
2075 +diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
2076 +index 2b7e227960e1..91f403341dd7 100644
2077 +--- a/include/scsi/scsi_eh.h
2078 ++++ b/include/scsi/scsi_eh.h
2079 +@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
2080 + struct scsi_eh_save {
2081 + /* saved state */
2082 + int result;
2083 ++ unsigned int resid_len;
2084 + int eh_eflags;
2085 + enum dma_data_direction data_direction;
2086 + unsigned underflow;
2087 +diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
2088 +index b401c4e36394..eb3f668b8bce 100644
2089 +--- a/include/trace/events/btrfs.h
2090 ++++ b/include/trace/events/btrfs.h
2091 +@@ -1655,6 +1655,7 @@ TRACE_EVENT(qgroup_update_reserve,
2092 + __entry->qgid = qgroup->qgroupid;
2093 + __entry->cur_reserved = qgroup->rsv.values[type];
2094 + __entry->diff = diff;
2095 ++ __entry->type = type;
2096 + ),
2097 +
2098 + TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
2099 +@@ -1677,6 +1678,7 @@ TRACE_EVENT(qgroup_meta_reserve,
2100 + TP_fast_assign_btrfs(root->fs_info,
2101 + __entry->refroot = root->objectid;
2102 + __entry->diff = diff;
2103 ++ __entry->type = type;
2104 + ),
2105 +
2106 + TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
2107 +@@ -1693,7 +1695,6 @@ TRACE_EVENT(qgroup_meta_convert,
2108 + TP_STRUCT__entry_btrfs(
2109 + __field( u64, refroot )
2110 + __field( s64, diff )
2111 +- __field( int, type )
2112 + ),
2113 +
2114 + TP_fast_assign_btrfs(root->fs_info,
2115 +diff --git a/kernel/events/core.c b/kernel/events/core.c
2116 +index 7ca44b8523c8..625ba462e5bb 100644
2117 +--- a/kernel/events/core.c
2118 ++++ b/kernel/events/core.c
2119 +@@ -6813,7 +6813,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
2120 + static int __perf_pmu_output_stop(void *info)
2121 + {
2122 + struct perf_event *event = info;
2123 +- struct pmu *pmu = event->pmu;
2124 ++ struct pmu *pmu = event->ctx->pmu;
2125 + struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2126 + struct remote_output ro = {
2127 + .rb = event->rb,
2128 +diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
2129 +index e6945b55c688..f5b3bf0e69f6 100644
2130 +--- a/kernel/trace/trace_event_perf.c
2131 ++++ b/kernel/trace/trace_event_perf.c
2132 +@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
2133 + goto out;
2134 + }
2135 +
2136 ++ mutex_lock(&event_mutex);
2137 + ret = perf_trace_event_init(tp_event, p_event);
2138 + if (ret)
2139 + destroy_local_trace_kprobe(tp_event);
2140 ++ mutex_unlock(&event_mutex);
2141 + out:
2142 + kfree(func);
2143 + return ret;
2144 +@@ -282,8 +284,10 @@ out:
2145 +
2146 + void perf_kprobe_destroy(struct perf_event *p_event)
2147 + {
2148 ++ mutex_lock(&event_mutex);
2149 + perf_trace_event_close(p_event);
2150 + perf_trace_event_unreg(p_event);
2151 ++ mutex_unlock(&event_mutex);
2152 +
2153 + destroy_local_trace_kprobe(p_event->tp_event);
2154 + }
2155 +diff --git a/lib/textsearch.c b/lib/textsearch.c
2156 +index 5939549c0e7b..9135c29add62 100644
2157 +--- a/lib/textsearch.c
2158 ++++ b/lib/textsearch.c
2159 +@@ -93,9 +93,9 @@
2160 + * goto errout;
2161 + * }
2162 + *
2163 +- * pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
2164 ++ * pos = textsearch_find_continuous(conf, &state, example, strlen(example));
2165 + * if (pos != UINT_MAX)
2166 +- * panic("Oh my god, dancing chickens at \%d\n", pos);
2167 ++ * panic("Oh my god, dancing chickens at %d\n", pos);
2168 + *
2169 + * textsearch_destroy(conf);
2170 + */
2171 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2172 +index 57053affaad2..6f4ce9547658 100644
2173 +--- a/mm/hugetlb.c
2174 ++++ b/mm/hugetlb.c
2175 +@@ -1073,11 +1073,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
2176 + struct page *page;
2177 +
2178 + for (i = start_pfn; i < end_pfn; i++) {
2179 +- if (!pfn_valid(i))
2180 ++ page = pfn_to_online_page(i);
2181 ++ if (!page)
2182 + return false;
2183 +
2184 +- page = pfn_to_page(i);
2185 +-
2186 + if (page_zone(page) != z)
2187 + return false;
2188 +
2189 +diff --git a/mm/memfd.c b/mm/memfd.c
2190 +index 2bb5e257080e..5859705dafe1 100644
2191 +--- a/mm/memfd.c
2192 ++++ b/mm/memfd.c
2193 +@@ -34,11 +34,12 @@ static void memfd_tag_pins(struct address_space *mapping)
2194 + void __rcu **slot;
2195 + pgoff_t start;
2196 + struct page *page;
2197 ++ unsigned int tagged = 0;
2198 +
2199 + lru_add_drain();
2200 + start = 0;
2201 +- rcu_read_lock();
2202 +
2203 ++ xa_lock_irq(&mapping->i_pages);
2204 + radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
2205 + page = radix_tree_deref_slot(slot);
2206 + if (!page || radix_tree_exception(page)) {
2207 +@@ -47,18 +48,19 @@ static void memfd_tag_pins(struct address_space *mapping)
2208 + continue;
2209 + }
2210 + } else if (page_count(page) - page_mapcount(page) > 1) {
2211 +- xa_lock_irq(&mapping->i_pages);
2212 + radix_tree_tag_set(&mapping->i_pages, iter.index,
2213 + MEMFD_TAG_PINNED);
2214 +- xa_unlock_irq(&mapping->i_pages);
2215 + }
2216 +
2217 +- if (need_resched()) {
2218 +- slot = radix_tree_iter_resume(slot, &iter);
2219 +- cond_resched_rcu();
2220 +- }
2221 ++ if (++tagged % 1024)
2222 ++ continue;
2223 ++
2224 ++ slot = radix_tree_iter_resume(slot, &iter);
2225 ++ xa_unlock_irq(&mapping->i_pages);
2226 ++ cond_resched();
2227 ++ xa_lock_irq(&mapping->i_pages);
2228 + }
2229 +- rcu_read_unlock();
2230 ++ xa_unlock_irq(&mapping->i_pages);
2231 + }
2232 +
2233 + /*
2234 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2235 +index 2994ceb2e7b0..148fdd929a19 100644
2236 +--- a/mm/memory-failure.c
2237 ++++ b/mm/memory-failure.c
2238 +@@ -202,7 +202,6 @@ struct to_kill {
2239 + struct task_struct *tsk;
2240 + unsigned long addr;
2241 + short size_shift;
2242 +- char addr_valid;
2243 + };
2244 +
2245 + /*
2246 +@@ -327,22 +326,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
2247 + }
2248 + }
2249 + tk->addr = page_address_in_vma(p, vma);
2250 +- tk->addr_valid = 1;
2251 + if (is_zone_device_page(p))
2252 + tk->size_shift = dev_pagemap_mapping_shift(p, vma);
2253 + else
2254 + tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
2255 +
2256 + /*
2257 +- * In theory we don't have to kill when the page was
2258 +- * munmaped. But it could be also a mremap. Since that's
2259 +- * likely very rare kill anyways just out of paranoia, but use
2260 +- * a SIGKILL because the error is not contained anymore.
2261 ++ * Send SIGKILL if "tk->addr == -EFAULT". Also, as
2262 ++ * "tk->size_shift" is always non-zero for !is_zone_device_page(),
2263 ++ * so "tk->size_shift == 0" effectively checks no mapping on
2264 ++ * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
2265 ++ * to a process' address space, it's possible not all N VMAs
2266 ++ * contain mappings for the page, but at least one VMA does.
2267 ++ * Only deliver SIGBUS with payload derived from the VMA that
2268 ++ * has a mapping for the page.
2269 + */
2270 +- if (tk->addr == -EFAULT || tk->size_shift == 0) {
2271 ++ if (tk->addr == -EFAULT) {
2272 + pr_info("Memory failure: Unable to find user space address %lx in %s\n",
2273 + page_to_pfn(p), tsk->comm);
2274 +- tk->addr_valid = 0;
2275 ++ } else if (tk->size_shift == 0) {
2276 ++ kfree(tk);
2277 ++ return;
2278 + }
2279 + get_task_struct(tsk);
2280 + tk->tsk = tsk;
2281 +@@ -369,7 +373,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
2282 + * make sure the process doesn't catch the
2283 + * signal and then access the memory. Just kill it.
2284 + */
2285 +- if (fail || tk->addr_valid == 0) {
2286 ++ if (fail || tk->addr == -EFAULT) {
2287 + pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
2288 + pfn, tk->tsk->comm, tk->tsk->pid);
2289 + do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
2290 +@@ -1258,17 +1262,19 @@ int memory_failure(unsigned long pfn, int flags)
2291 + if (!sysctl_memory_failure_recovery)
2292 + panic("Memory failure on page %lx", pfn);
2293 +
2294 +- if (!pfn_valid(pfn)) {
2295 ++ p = pfn_to_online_page(pfn);
2296 ++ if (!p) {
2297 ++ if (pfn_valid(pfn)) {
2298 ++ pgmap = get_dev_pagemap(pfn, NULL);
2299 ++ if (pgmap)
2300 ++ return memory_failure_dev_pagemap(pfn, flags,
2301 ++ pgmap);
2302 ++ }
2303 + pr_err("Memory failure: %#lx: memory outside kernel control\n",
2304 + pfn);
2305 + return -ENXIO;
2306 + }
2307 +
2308 +- pgmap = get_dev_pagemap(pfn, NULL);
2309 +- if (pgmap)
2310 +- return memory_failure_dev_pagemap(pfn, flags, pgmap);
2311 +-
2312 +- p = pfn_to_page(pfn);
2313 + if (PageHuge(p))
2314 + return memory_failure_hugetlb(pfn, flags);
2315 + if (TestSetPageHWPoison(p)) {
2316 +diff --git a/mm/page_owner.c b/mm/page_owner.c
2317 +index d80adfe702d3..63b1053f5b41 100644
2318 +--- a/mm/page_owner.c
2319 ++++ b/mm/page_owner.c
2320 +@@ -273,7 +273,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
2321 + * not matter as the mixed block count will still be correct
2322 + */
2323 + for (; pfn < end_pfn; ) {
2324 +- if (!pfn_valid(pfn)) {
2325 ++ page = pfn_to_online_page(pfn);
2326 ++ if (!page) {
2327 + pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
2328 + continue;
2329 + }
2330 +@@ -281,13 +282,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
2331 + block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
2332 + block_end_pfn = min(block_end_pfn, end_pfn);
2333 +
2334 +- page = pfn_to_page(pfn);
2335 + pageblock_mt = get_pageblock_migratetype(page);
2336 +
2337 + for (; pfn < block_end_pfn; pfn++) {
2338 + if (!pfn_valid_within(pfn))
2339 + continue;
2340 +
2341 ++ /* The pageblock is online, no need to recheck. */
2342 + page = pfn_to_page(pfn);
2343 +
2344 + if (page_zone(page) != zone)
2345 +diff --git a/mm/slub.c b/mm/slub.c
2346 +index 09c0e24a06d8..9c3937c5ce38 100644
2347 +--- a/mm/slub.c
2348 ++++ b/mm/slub.c
2349 +@@ -4797,7 +4797,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
2350 + }
2351 + }
2352 +
2353 +- get_online_mems();
2354 ++ /*
2355 ++ * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
2356 ++ * already held which will conflict with an existing lock order:
2357 ++ *
2358 ++ * mem_hotplug_lock->slab_mutex->kernfs_mutex
2359 ++ *
2360 ++ * We don't really need mem_hotplug_lock (to hold off
2361 ++ * slab_mem_going_offline_callback) here because slab's memory hot
2362 ++ * unplug code doesn't destroy the kmem_cache->node[] data.
2363 ++ */
2364 ++
2365 + #ifdef CONFIG_SLUB_DEBUG
2366 + if (flags & SO_ALL) {
2367 + struct kmem_cache_node *n;
2368 +@@ -4838,7 +4848,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
2369 + x += sprintf(buf + x, " N%d=%lu",
2370 + node, nodes[node]);
2371 + #endif
2372 +- put_online_mems();
2373 + kfree(nodes);
2374 + return x + sprintf(buf + x, "\n");
2375 + }
2376 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2377 +index 7065d68086ab..69127f6039b2 100644
2378 +--- a/net/ipv4/route.c
2379 ++++ b/net/ipv4/route.c
2380 +@@ -1476,7 +1476,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
2381 + prev = cmpxchg(p, orig, rt);
2382 + if (prev == orig) {
2383 + if (orig) {
2384 +- dst_dev_put(&orig->dst);
2385 ++ rt_add_uncached_list(orig);
2386 + dst_release(&orig->dst);
2387 + }
2388 + } else {
2389 +@@ -2381,14 +2381,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2390 + int orig_oif = fl4->flowi4_oif;
2391 + unsigned int flags = 0;
2392 + struct rtable *rth;
2393 +- int err = -ENETUNREACH;
2394 ++ int err;
2395 +
2396 + if (fl4->saddr) {
2397 +- rth = ERR_PTR(-EINVAL);
2398 + if (ipv4_is_multicast(fl4->saddr) ||
2399 + ipv4_is_lbcast(fl4->saddr) ||
2400 +- ipv4_is_zeronet(fl4->saddr))
2401 ++ ipv4_is_zeronet(fl4->saddr)) {
2402 ++ rth = ERR_PTR(-EINVAL);
2403 + goto out;
2404 ++ }
2405 ++
2406 ++ rth = ERR_PTR(-ENETUNREACH);
2407 +
2408 + /* I removed check for oif == dev_out->oif here.
2409 + It was wrong for two reasons:
2410 +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
2411 +index 2b6d43022383..acf0749ee5bb 100644
2412 +--- a/net/ipv6/ip6_input.c
2413 ++++ b/net/ipv6/ip6_input.c
2414 +@@ -80,8 +80,10 @@ static void ip6_sublist_rcv_finish(struct list_head *head)
2415 + {
2416 + struct sk_buff *skb, *next;
2417 +
2418 +- list_for_each_entry_safe(skb, next, head, list)
2419 ++ list_for_each_entry_safe(skb, next, head, list) {
2420 ++ skb_list_del_init(skb);
2421 + dst_input(skb);
2422 ++ }
2423 + }
2424 +
2425 + static void ip6_list_rcv_finish(struct net *net, struct sock *sk,
2426 +diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
2427 +index d37d4acafebf..316250ae9071 100644
2428 +--- a/net/mac80211/debugfs_netdev.c
2429 ++++ b/net/mac80211/debugfs_netdev.c
2430 +@@ -490,9 +490,14 @@ static ssize_t ieee80211_if_fmt_aqm(
2431 + const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
2432 + {
2433 + struct ieee80211_local *local = sdata->local;
2434 +- struct txq_info *txqi = to_txq_info(sdata->vif.txq);
2435 ++ struct txq_info *txqi;
2436 + int len;
2437 +
2438 ++ if (!sdata->vif.txq)
2439 ++ return 0;
2440 ++
2441 ++ txqi = to_txq_info(sdata->vif.txq);
2442 ++
2443 + spin_lock_bh(&local->fq.lock);
2444 + rcu_read_lock();
2445 +
2446 +@@ -659,7 +664,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
2447 + DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
2448 + DEBUGFS_ADD(hw_queues);
2449 +
2450 +- if (sdata->local->ops->wake_tx_queue)
2451 ++ if (sdata->local->ops->wake_tx_queue &&
2452 ++ sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
2453 ++ sdata->vif.type != NL80211_IFTYPE_NAN)
2454 + DEBUGFS_ADD(aqm);
2455 + }
2456 +
2457 +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
2458 +index b5c06242a92e..5c9dcafbc342 100644
2459 +--- a/net/mac80211/mlme.c
2460 ++++ b/net/mac80211/mlme.c
2461 +@@ -2554,7 +2554,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
2462 +
2463 + rcu_read_lock();
2464 + ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
2465 +- if (WARN_ON_ONCE(ssid == NULL))
2466 ++ if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
2467 ++ "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
2468 + ssid_len = 0;
2469 + else
2470 + ssid_len = ssid[1];
2471 +@@ -5039,7 +5040,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
2472 +
2473 + rcu_read_lock();
2474 + ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
2475 +- if (!ssidie) {
2476 ++ if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
2477 + rcu_read_unlock();
2478 + kfree(assoc_data);
2479 + return -EINVAL;
2480 +diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c
2481 +index af1497ab9464..69d6173f91e2 100644
2482 +--- a/net/netfilter/nft_connlimit.c
2483 ++++ b/net/netfilter/nft_connlimit.c
2484 +@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
2485 + static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
2486 + {
2487 + struct nft_connlimit *priv = nft_expr_priv(expr);
2488 ++ bool ret;
2489 +
2490 +- return nf_conncount_gc_list(net, &priv->list);
2491 ++ local_bh_disable();
2492 ++ ret = nf_conncount_gc_list(net, &priv->list);
2493 ++ local_bh_enable();
2494 ++
2495 ++ return ret;
2496 + }
2497 +
2498 + static struct nft_expr_type nft_connlimit_type;
2499 +diff --git a/net/sched/act_api.c b/net/sched/act_api.c
2500 +index 7c4a4b874248..f2c4bfc79663 100644
2501 +--- a/net/sched/act_api.c
2502 ++++ b/net/sched/act_api.c
2503 +@@ -1307,11 +1307,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
2504 + struct netlink_ext_ack *extack)
2505 + {
2506 + size_t attr_size = 0;
2507 +- int ret = 0;
2508 ++ int loop, ret;
2509 + struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
2510 +
2511 +- ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
2512 +- &attr_size, true, extack);
2513 ++ for (loop = 0; loop < 10; loop++) {
2514 ++ ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
2515 ++ actions, &attr_size, true, extack);
2516 ++ if (ret != -EAGAIN)
2517 ++ break;
2518 ++ }
2519 ++
2520 + if (ret < 0)
2521 + return ret;
2522 + ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
2523 +@@ -1361,11 +1366,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
2524 + */
2525 + if (n->nlmsg_flags & NLM_F_REPLACE)
2526 + ovr = 1;
2527 +-replay:
2528 + ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
2529 + extack);
2530 +- if (ret == -EAGAIN)
2531 +- goto replay;
2532 + break;
2533 + case RTM_DELACTION:
2534 + ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
2535 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2536 +index 9f5b4e547b63..227b050cfe45 100644
2537 +--- a/net/sctp/socket.c
2538 ++++ b/net/sctp/socket.c
2539 +@@ -8957,7 +8957,7 @@ struct proto sctp_prot = {
2540 + .backlog_rcv = sctp_backlog_rcv,
2541 + .hash = sctp_hash,
2542 + .unhash = sctp_unhash,
2543 +- .get_port = sctp_get_port,
2544 ++ .no_autobind = true,
2545 + .obj_size = sizeof(struct sctp_sock),
2546 + .useroffset = offsetof(struct sctp_sock, subscribe),
2547 + .usersize = offsetof(struct sctp_sock, initmsg) -
2548 +@@ -8999,7 +8999,7 @@ struct proto sctpv6_prot = {
2549 + .backlog_rcv = sctp_backlog_rcv,
2550 + .hash = sctp_hash,
2551 + .unhash = sctp_unhash,
2552 +- .get_port = sctp_get_port,
2553 ++ .no_autobind = true,
2554 + .obj_size = sizeof(struct sctp6_sock),
2555 + .useroffset = offsetof(struct sctp6_sock, sctp.subscribe),
2556 + .usersize = offsetof(struct sctp6_sock, sctp.initmsg) -
2557 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
2558 +index 334e3181f1c5..a28d6456e93e 100644
2559 +--- a/net/wireless/nl80211.c
2560 ++++ b/net/wireless/nl80211.c
2561 +@@ -5843,6 +5843,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
2562 + if (!rdev->ops->del_mpath)
2563 + return -EOPNOTSUPP;
2564 +
2565 ++ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
2566 ++ return -EOPNOTSUPP;
2567 ++
2568 + return rdev_del_mpath(rdev, dev, dst);
2569 + }
2570 +
2571 +diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
2572 +index c67d7a82ab13..73fd0eae08ca 100644
2573 +--- a/net/wireless/wext-sme.c
2574 ++++ b/net/wireless/wext-sme.c
2575 +@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
2576 + struct iw_point *data, char *ssid)
2577 + {
2578 + struct wireless_dev *wdev = dev->ieee80211_ptr;
2579 ++ int ret = 0;
2580 +
2581 + /* call only for station! */
2582 + if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
2583 +@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
2584 + if (ie) {
2585 + data->flags = 1;
2586 + data->length = ie[1];
2587 +- memcpy(ssid, ie + 2, data->length);
2588 ++ if (data->length > IW_ESSID_MAX_SIZE)
2589 ++ ret = -EINVAL;
2590 ++ else
2591 ++ memcpy(ssid, ie + 2, data->length);
2592 + }
2593 + rcu_read_unlock();
2594 + } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
2595 +@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
2596 + }
2597 + wdev_unlock(wdev);
2598 +
2599 +- return 0;
2600 ++ return ret;
2601 + }
2602 +
2603 + int cfg80211_mgd_wext_siwap(struct net_device *dev,
2604 +diff --git a/scripts/namespace.pl b/scripts/namespace.pl
2605 +index 6135574a6f39..1da7bca201a4 100755
2606 +--- a/scripts/namespace.pl
2607 ++++ b/scripts/namespace.pl
2608 +@@ -65,13 +65,14 @@
2609 + use warnings;
2610 + use strict;
2611 + use File::Find;
2612 ++use File::Spec;
2613 +
2614 + my $nm = ($ENV{'NM'} || "nm") . " -p";
2615 + my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
2616 +-my $srctree = "";
2617 +-my $objtree = "";
2618 +-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
2619 +-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
2620 ++my $srctree = File::Spec->curdir();
2621 ++my $objtree = File::Spec->curdir();
2622 ++$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
2623 ++$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
2624 +
2625 + if ($#ARGV != -1) {
2626 + print STDERR "usage: $0 takes no parameters\n";
2627 +@@ -231,9 +232,9 @@ sub do_nm
2628 + }
2629 + ($source = $basename) =~ s/\.o$//;
2630 + if (-e "$source.c" || -e "$source.S") {
2631 +- $source = "$objtree$File::Find::dir/$source";
2632 ++ $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
2633 + } else {
2634 +- $source = "$srctree$File::Find::dir/$source";
2635 ++ $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
2636 + }
2637 + if (! -e "$source.c" && ! -e "$source.S") {
2638 + # No obvious source, exclude the object if it is conglomerate
2639 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2640 +index 107ec7f3e221..c827a2a89cc3 100644
2641 +--- a/sound/pci/hda/patch_hdmi.c
2642 ++++ b/sound/pci/hda/patch_hdmi.c
2643 +@@ -3264,6 +3264,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
2644 + nvhdmi_chmap_cea_alloc_validate_get_type;
2645 + spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
2646 +
2647 ++ codec->link_down_at_suspend = 1;
2648 ++
2649 + return 0;
2650 + }
2651 +
2652 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2653 +index e1b08d6f2a51..dd46354270d0 100644
2654 +--- a/sound/pci/hda/patch_realtek.c
2655 ++++ b/sound/pci/hda/patch_realtek.c
2656 +@@ -405,6 +405,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
2657 + case 0x10ec0700:
2658 + case 0x10ec0701:
2659 + case 0x10ec0703:
2660 ++ case 0x10ec0711:
2661 + alc_update_coef_idx(codec, 0x10, 1<<15, 0);
2662 + break;
2663 + case 0x10ec0662:
2664 +@@ -5676,6 +5677,7 @@ enum {
2665 + ALC225_FIXUP_WYSE_AUTO_MUTE,
2666 + ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
2667 + ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
2668 ++ ALC256_FIXUP_ASUS_HEADSET_MIC,
2669 + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
2670 + ALC299_FIXUP_PREDATOR_SPK,
2671 + ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
2672 +@@ -6692,6 +6694,15 @@ static const struct hda_fixup alc269_fixups[] = {
2673 + .chained = true,
2674 + .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
2675 + },
2676 ++ [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
2677 ++ .type = HDA_FIXUP_PINS,
2678 ++ .v.pins = (const struct hda_pintbl[]) {
2679 ++ { 0x19, 0x03a11020 }, /* headset mic with jack detect */
2680 ++ { }
2681 ++ },
2682 ++ .chained = true,
2683 ++ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
2684 ++ },
2685 + [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
2686 + .type = HDA_FIXUP_PINS,
2687 + .v.pins = (const struct hda_pintbl[]) {
2688 +@@ -6888,6 +6899,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
2689 + SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
2690 + SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
2691 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
2692 ++ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
2693 + SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
2694 + SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
2695 + SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
2696 +@@ -7752,6 +7764,7 @@ static int patch_alc269(struct hda_codec *codec)
2697 + case 0x10ec0700:
2698 + case 0x10ec0701:
2699 + case 0x10ec0703:
2700 ++ case 0x10ec0711:
2701 + spec->codec_variant = ALC269_TYPE_ALC700;
2702 + spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
2703 + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
2704 +@@ -8883,6 +8896,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
2705 + HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
2706 + HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
2707 + HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
2708 ++ HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
2709 + HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
2710 + HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
2711 + HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
2712 +diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
2713 +index d23c2bbff0cf..15a31820df16 100644
2714 +--- a/sound/soc/sh/rcar/core.c
2715 ++++ b/sound/soc/sh/rcar/core.c
2716 +@@ -674,6 +674,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
2717 + }
2718 +
2719 + /* set format */
2720 ++ rdai->bit_clk_inv = 0;
2721 + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
2722 + case SND_SOC_DAIFMT_I2S:
2723 + rdai->sys_delay = 0;
2724 +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2725 +index 13ea63c959d3..1828225ba882 100644
2726 +--- a/sound/usb/pcm.c
2727 ++++ b/sound/usb/pcm.c
2728 +@@ -355,6 +355,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
2729 + ep = 0x81;
2730 + ifnum = 1;
2731 + goto add_sync_ep_from_ifnum;
2732 ++ case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
2733 ++ /* BOSS Katana amplifiers do not need quirks */
2734 ++ return 0;
2735 + }
2736 +
2737 + if (attr == USB_ENDPOINT_SYNC_ASYNC &&