Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Mon, 07 Aug 2017 10:27:03
Message-Id: 1502101611.75b675050b8c4ce31c20d14ab334f93a025e145d.mpagano@gentoo
1 commit: 75b675050b8c4ce31c20d14ab334f93a025e145d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Aug 7 10:26:51 2017 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Aug 7 10:26:51 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=75b67505
7
8 Linux patch 4.9.41
9
10 0000_README | 4 +
11 1040_linux-4.9.41.patch | 3462 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3466 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 82eac05..eacc709 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -203,6 +203,10 @@ Patch: 1039_linux-4.9.40.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.40
21
22 +Patch: 1040_linux-4.9.41.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.41
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1040_linux-4.9.41.patch b/1040_linux-4.9.41.patch
31 new file mode 100644
32 index 0000000..a43b44b
33 --- /dev/null
34 +++ b/1040_linux-4.9.41.patch
35 @@ -0,0 +1,3462 @@
36 +diff --git a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
37 +index 3e5b9793341f..8682ab6d4a50 100644
38 +--- a/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
39 ++++ b/Documentation/devicetree/bindings/input/tps65218-pwrbutton.txt
40 +@@ -8,8 +8,9 @@ This driver provides a simple power button event via an Interrupt.
41 + Required properties:
42 + - compatible: should be "ti,tps65217-pwrbutton" or "ti,tps65218-pwrbutton"
43 +
44 +-Required properties for TPS65218:
45 ++Required properties:
46 + - interrupts: should be one of the following
47 ++ - <2>: For controllers compatible with tps65217
48 + - <3 IRQ_TYPE_EDGE_BOTH>: For controllers compatible with tps65218
49 +
50 + Examples:
51 +@@ -17,6 +18,7 @@ Examples:
52 + &tps {
53 + tps65217-pwrbutton {
54 + compatible = "ti,tps65217-pwrbutton";
55 ++ interrupts = <2>;
56 + };
57 + };
58 +
59 +diff --git a/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
60 +index 98d131acee95..a11072c5a866 100644
61 +--- a/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
62 ++++ b/Documentation/devicetree/bindings/power/supply/tps65217_charger.txt
63 +@@ -2,11 +2,16 @@ TPS65217 Charger
64 +
65 + Required Properties:
66 + -compatible: "ti,tps65217-charger"
67 ++-interrupts: TPS65217 interrupt numbers for the AC and USB charger input change.
68 ++ Should be <0> for the USB charger and <1> for the AC adapter.
69 ++-interrupt-names: Should be "USB" and "AC"
70 +
71 + This node is a subnode of the tps65217 PMIC.
72 +
73 + Example:
74 +
75 + tps65217-charger {
76 +- compatible = "ti,tps65090-charger";
77 ++ compatible = "ti,tps65217-charger";
78 ++ interrupts = <0>, <1>;
79 ++ interrupt-names = "USB", "AC";
80 + };
81 +diff --git a/Makefile b/Makefile
82 +index d9397a912c31..82eb3d1ee801 100644
83 +--- a/Makefile
84 ++++ b/Makefile
85 +@@ -1,6 +1,6 @@
86 + VERSION = 4
87 + PATCHLEVEL = 9
88 +-SUBLEVEL = 40
89 ++SUBLEVEL = 41
90 + EXTRAVERSION =
91 + NAME = Roaring Lionus
92 +
93 +diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
94 +index f39142acc89e..be131b296a55 100644
95 +--- a/arch/arc/kernel/mcip.c
96 ++++ b/arch/arc/kernel/mcip.c
97 +@@ -10,6 +10,7 @@
98 +
99 + #include <linux/smp.h>
100 + #include <linux/irq.h>
101 ++#include <linux/irqchip/chained_irq.h>
102 + #include <linux/spinlock.h>
103 + #include <asm/irqflags-arcv2.h>
104 + #include <asm/mcip.h>
105 +@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
106 + static void idu_cascade_isr(struct irq_desc *desc)
107 + {
108 + struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
109 ++ struct irq_chip *core_chip = irq_desc_get_chip(desc);
110 + irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
111 + irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
112 +
113 ++ chained_irq_enter(core_chip, desc);
114 + generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
115 ++ chained_irq_exit(core_chip, desc);
116 + }
117 +
118 + static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
119 +diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi
120 +index 03cec62260e1..db858fff4e18 100644
121 +--- a/arch/arm/boot/dts/am57xx-idk-common.dtsi
122 ++++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi
123 +@@ -294,7 +294,7 @@
124 + };
125 +
126 + &usb2 {
127 +- dr_mode = "otg";
128 ++ dr_mode = "peripheral";
129 + };
130 +
131 + &mmc2 {
132 +diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
133 +index 87ca50b53002..4d448f145ed1 100644
134 +--- a/arch/arm/boot/dts/omap3-n900.dts
135 ++++ b/arch/arm/boot/dts/omap3-n900.dts
136 +@@ -734,6 +734,8 @@
137 + vmmc_aux-supply = <&vsim>;
138 + bus-width = <8>;
139 + non-removable;
140 ++ no-sdio;
141 ++ no-sd;
142 + };
143 +
144 + &mmc3 {
145 +diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
146 +index bc4bfe02e611..60d3fecd7a22 100644
147 +--- a/arch/arm/configs/s3c2410_defconfig
148 ++++ b/arch/arm/configs/s3c2410_defconfig
149 +@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
150 + CONFIG_NETFILTER=y
151 + CONFIG_NF_CONNTRACK=m
152 + CONFIG_NF_CONNTRACK_EVENTS=y
153 +-CONFIG_NF_CT_PROTO_DCCP=m
154 +-CONFIG_NF_CT_PROTO_SCTP=m
155 +-CONFIG_NF_CT_PROTO_UDPLITE=m
156 ++CONFIG_NF_CT_PROTO_DCCP=y
157 ++CONFIG_NF_CT_PROTO_SCTP=y
158 ++CONFIG_NF_CT_PROTO_UDPLITE=y
159 + CONFIG_NF_CONNTRACK_AMANDA=m
160 + CONFIG_NF_CONNTRACK_FTP=m
161 + CONFIG_NF_CONNTRACK_H323=m
162 +diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
163 +index 1052b29697b8..b5c1714ebfdd 100644
164 +--- a/arch/arm/mach-omap2/omap_hwmod.c
165 ++++ b/arch/arm/mach-omap2/omap_hwmod.c
166 +@@ -790,14 +790,14 @@ static int _init_main_clk(struct omap_hwmod *oh)
167 + int ret = 0;
168 + char name[MOD_CLK_MAX_NAME_LEN];
169 + struct clk *clk;
170 ++ static const char modck[] = "_mod_ck";
171 +
172 +- /* +7 magic comes from '_mod_ck' suffix */
173 +- if (strlen(oh->name) + 7 > MOD_CLK_MAX_NAME_LEN)
174 ++ if (strlen(oh->name) >= MOD_CLK_MAX_NAME_LEN - strlen(modck))
175 + pr_warn("%s: warning: cropping name for %s\n", __func__,
176 + oh->name);
177 +
178 +- strncpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - 7);
179 +- strcat(name, "_mod_ck");
180 ++ strlcpy(name, oh->name, MOD_CLK_MAX_NAME_LEN - strlen(modck));
181 ++ strlcat(name, modck, MOD_CLK_MAX_NAME_LEN);
182 +
183 + clk = clk_get(NULL, name);
184 + if (!IS_ERR(clk)) {
185 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
186 +index 358089687a69..ef1b9e573af0 100644
187 +--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
188 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
189 +@@ -27,7 +27,7 @@
190 + stdout-path = "serial0:115200n8";
191 + };
192 +
193 +- memory {
194 ++ memory@0 {
195 + device_type = "memory";
196 + reg = <0x0 0x0 0x0 0x40000000>;
197 + };
198 +diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
199 +index 68a908334c7b..54dc28351c8c 100644
200 +--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
201 ++++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
202 +@@ -72,7 +72,7 @@
203 + <1 10 0xf08>;
204 + };
205 +
206 +- amba_apu {
207 ++ amba_apu: amba_apu@0 {
208 + compatible = "simple-bus";
209 + #address-cells = <2>;
210 + #size-cells = <1>;
211 +@@ -175,7 +175,7 @@
212 + };
213 +
214 + i2c0: i2c@ff020000 {
215 +- compatible = "cdns,i2c-r1p10";
216 ++ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
217 + status = "disabled";
218 + interrupt-parent = <&gic>;
219 + interrupts = <0 17 4>;
220 +@@ -185,7 +185,7 @@
221 + };
222 +
223 + i2c1: i2c@ff030000 {
224 +- compatible = "cdns,i2c-r1p10";
225 ++ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
226 + status = "disabled";
227 + interrupt-parent = <&gic>;
228 + interrupts = <0 18 4>;
229 +diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
230 +index 8b8ac3db4092..0e90c7e0279c 100644
231 +--- a/arch/arm64/mm/fault.c
232 ++++ b/arch/arm64/mm/fault.c
233 +@@ -101,21 +101,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
234 + break;
235 +
236 + pud = pud_offset(pgd, addr);
237 +- printk(", *pud=%016llx", pud_val(*pud));
238 ++ pr_cont(", *pud=%016llx", pud_val(*pud));
239 + if (pud_none(*pud) || pud_bad(*pud))
240 + break;
241 +
242 + pmd = pmd_offset(pud, addr);
243 +- printk(", *pmd=%016llx", pmd_val(*pmd));
244 ++ pr_cont(", *pmd=%016llx", pmd_val(*pmd));
245 + if (pmd_none(*pmd) || pmd_bad(*pmd))
246 + break;
247 +
248 + pte = pte_offset_map(pmd, addr);
249 +- printk(", *pte=%016llx", pte_val(*pte));
250 ++ pr_cont(", *pte=%016llx", pte_val(*pte));
251 + pte_unmap(pte);
252 + } while(0);
253 +
254 +- printk("\n");
255 ++ pr_cont("\n");
256 + }
257 +
258 + #ifdef CONFIG_ARM64_HW_AFDBM
259 +diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
260 +index d68b9ede8423..c50609aead35 100644
261 +--- a/arch/openrisc/kernel/vmlinux.lds.S
262 ++++ b/arch/openrisc/kernel/vmlinux.lds.S
263 +@@ -38,6 +38,8 @@ SECTIONS
264 + /* Read-only sections, merged into text segment: */
265 + . = LOAD_BASE ;
266 +
267 ++ _text = .;
268 ++
269 + /* _s_kernel_ro must be page aligned */
270 + . = ALIGN(PAGE_SIZE);
271 + _s_kernel_ro = .;
272 +diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
273 +index 53ec75f8e237..c721ea2fdbd8 100644
274 +--- a/arch/parisc/kernel/cache.c
275 ++++ b/arch/parisc/kernel/cache.c
276 +@@ -452,8 +452,8 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
277 + before it can be accessed through the kernel mapping. */
278 + preempt_disable();
279 + flush_dcache_page_asm(__pa(vfrom), vaddr);
280 +- preempt_enable();
281 + copy_page_asm(vto, vfrom);
282 ++ preempt_enable();
283 + }
284 + EXPORT_SYMBOL(copy_user_page);
285 +
286 +@@ -538,6 +538,10 @@ void flush_cache_mm(struct mm_struct *mm)
287 + struct vm_area_struct *vma;
288 + pgd_t *pgd;
289 +
290 ++ /* Flush the TLB to avoid speculation if coherency is required. */
291 ++ if (parisc_requires_coherency())
292 ++ flush_tlb_all();
293 ++
294 + /* Flushing the whole cache on each cpu takes forever on
295 + rp3440, etc. So, avoid it if the mm isn't too big. */
296 + if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
297 +@@ -594,33 +598,22 @@ flush_user_icache_range(unsigned long start, unsigned long end)
298 + void flush_cache_range(struct vm_area_struct *vma,
299 + unsigned long start, unsigned long end)
300 + {
301 +- unsigned long addr;
302 +- pgd_t *pgd;
303 +-
304 + BUG_ON(!vma->vm_mm->context);
305 +
306 ++ /* Flush the TLB to avoid speculation if coherency is required. */
307 ++ if (parisc_requires_coherency())
308 ++ flush_tlb_range(vma, start, end);
309 ++
310 + if ((end - start) >= parisc_cache_flush_threshold) {
311 + flush_cache_all();
312 + return;
313 + }
314 +
315 +- if (vma->vm_mm->context == mfsp(3)) {
316 +- flush_user_dcache_range_asm(start, end);
317 +- if (vma->vm_flags & VM_EXEC)
318 +- flush_user_icache_range_asm(start, end);
319 +- return;
320 +- }
321 ++ BUG_ON(vma->vm_mm->context != mfsp(3));
322 +
323 +- pgd = vma->vm_mm->pgd;
324 +- for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
325 +- unsigned long pfn;
326 +- pte_t *ptep = get_ptep(pgd, addr);
327 +- if (!ptep)
328 +- continue;
329 +- pfn = pte_pfn(*ptep);
330 +- if (pfn_valid(pfn))
331 +- __flush_cache_page(vma, addr, PFN_PHYS(pfn));
332 +- }
333 ++ flush_user_dcache_range_asm(start, end);
334 ++ if (vma->vm_flags & VM_EXEC)
335 ++ flush_user_icache_range_asm(start, end);
336 + }
337 +
338 + void
339 +@@ -629,7 +622,8 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
340 + BUG_ON(!vma->vm_mm->context);
341 +
342 + if (pfn_valid(pfn)) {
343 +- flush_tlb_page(vma, vmaddr);
344 ++ if (parisc_requires_coherency())
345 ++ flush_tlb_page(vma, vmaddr);
346 + __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
347 + }
348 + }
349 +diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
350 +index e7ffde2758fc..7593787ed4c3 100644
351 +--- a/arch/parisc/kernel/process.c
352 ++++ b/arch/parisc/kernel/process.c
353 +@@ -50,6 +50,7 @@
354 + #include <linux/uaccess.h>
355 + #include <linux/rcupdate.h>
356 + #include <linux/random.h>
357 ++#include <linux/nmi.h>
358 +
359 + #include <asm/io.h>
360 + #include <asm/asm-offsets.h>
361 +@@ -142,6 +143,7 @@ void machine_power_off(void)
362 +
363 + /* prevent soft lockup/stalled CPU messages for endless loop. */
364 + rcu_sysrq_start();
365 ++ lockup_detector_suspend();
366 + for (;;);
367 + }
368 +
369 +diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
370 +index 329771559cbb..8b3b46b7b0f2 100644
371 +--- a/arch/powerpc/include/asm/topology.h
372 ++++ b/arch/powerpc/include/asm/topology.h
373 +@@ -44,22 +44,8 @@ extern void __init dump_numa_cpu_topology(void);
374 + extern int sysfs_add_device_to_node(struct device *dev, int nid);
375 + extern void sysfs_remove_device_from_node(struct device *dev, int nid);
376 +
377 +-static inline int early_cpu_to_node(int cpu)
378 +-{
379 +- int nid;
380 +-
381 +- nid = numa_cpu_lookup_table[cpu];
382 +-
383 +- /*
384 +- * Fall back to node 0 if nid is unset (it should be, except bugs).
385 +- * This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
386 +- */
387 +- return (nid < 0) ? 0 : nid;
388 +-}
389 + #else
390 +
391 +-static inline int early_cpu_to_node(int cpu) { return 0; }
392 +-
393 + static inline void dump_numa_cpu_topology(void) {}
394 +
395 + static inline int sysfs_add_device_to_node(struct device *dev, int nid)
396 +diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
397 +index ada71bee176d..a12be60181bf 100644
398 +--- a/arch/powerpc/kernel/setup_64.c
399 ++++ b/arch/powerpc/kernel/setup_64.c
400 +@@ -595,7 +595,7 @@ void __init emergency_stack_init(void)
401 +
402 + static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
403 + {
404 +- return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
405 ++ return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
406 + __pa(MAX_DMA_ADDRESS));
407 + }
408 +
409 +@@ -606,7 +606,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
410 +
411 + static int pcpu_cpu_distance(unsigned int from, unsigned int to)
412 + {
413 +- if (early_cpu_to_node(from) == early_cpu_to_node(to))
414 ++ if (cpu_to_node(from) == cpu_to_node(to))
415 + return LOCAL_DISTANCE;
416 + else
417 + return REMOTE_DISTANCE;
418 +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
419 +index 5c0298422300..218cba2f5699 100644
420 +--- a/arch/powerpc/kvm/book3s_hv.c
421 ++++ b/arch/powerpc/kvm/book3s_hv.c
422 +@@ -2808,6 +2808,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
423 + int r;
424 + int srcu_idx;
425 + unsigned long ebb_regs[3] = {}; /* shut up GCC */
426 ++ unsigned long user_tar = 0;
427 ++ unsigned int user_vrsave;
428 +
429 + if (!vcpu->arch.sane) {
430 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
431 +@@ -2828,6 +2830,8 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
432 + run->fail_entry.hardware_entry_failure_reason = 0;
433 + return -EINVAL;
434 + }
435 ++ /* Enable TM so we can read the TM SPRs */
436 ++ mtmsr(mfmsr() | MSR_TM);
437 + current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
438 + current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
439 + current->thread.tm_texasr = mfspr(SPRN_TEXASR);
440 +@@ -2856,12 +2860,14 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
441 +
442 + flush_all_to_thread(current);
443 +
444 +- /* Save userspace EBB register values */
445 ++ /* Save userspace EBB and other register values */
446 + if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
447 + ebb_regs[0] = mfspr(SPRN_EBBHR);
448 + ebb_regs[1] = mfspr(SPRN_EBBRR);
449 + ebb_regs[2] = mfspr(SPRN_BESCR);
450 ++ user_tar = mfspr(SPRN_TAR);
451 + }
452 ++ user_vrsave = mfspr(SPRN_VRSAVE);
453 +
454 + vcpu->arch.wqp = &vcpu->arch.vcore->wq;
455 + vcpu->arch.pgdir = current->mm->pgd;
456 +@@ -2885,12 +2891,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
457 + r = kvmppc_xics_rm_complete(vcpu, 0);
458 + } while (is_kvmppc_resume_guest(r));
459 +
460 +- /* Restore userspace EBB register values */
461 ++ /* Restore userspace EBB and other register values */
462 + if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
463 + mtspr(SPRN_EBBHR, ebb_regs[0]);
464 + mtspr(SPRN_EBBRR, ebb_regs[1]);
465 + mtspr(SPRN_BESCR, ebb_regs[2]);
466 ++ mtspr(SPRN_TAR, user_tar);
467 ++ mtspr(SPRN_FSCR, current->thread.fscr);
468 + }
469 ++ mtspr(SPRN_VRSAVE, user_vrsave);
470 +
471 + out:
472 + vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
473 +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
474 +index 6f81adb112f1..0447a22a4df6 100644
475 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
476 ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
477 +@@ -37,6 +37,13 @@
478 + #define NAPPING_CEDE 1
479 + #define NAPPING_NOVCPU 2
480 +
481 ++/* Stack frame offsets for kvmppc_hv_entry */
482 ++#define SFS 112
483 ++#define STACK_SLOT_TRAP (SFS-4)
484 ++#define STACK_SLOT_CIABR (SFS-16)
485 ++#define STACK_SLOT_DAWR (SFS-24)
486 ++#define STACK_SLOT_DAWRX (SFS-32)
487 ++
488 + /*
489 + * Call kvmppc_hv_entry in real mode.
490 + * Must be called with interrupts hard-disabled.
491 +@@ -289,10 +296,10 @@ kvm_novcpu_exit:
492 + bl kvmhv_accumulate_time
493 + #endif
494 + 13: mr r3, r12
495 +- stw r12, 112-4(r1)
496 ++ stw r12, STACK_SLOT_TRAP(r1)
497 + bl kvmhv_commence_exit
498 + nop
499 +- lwz r12, 112-4(r1)
500 ++ lwz r12, STACK_SLOT_TRAP(r1)
501 + b kvmhv_switch_to_host
502 +
503 + /*
504 +@@ -537,7 +544,7 @@ kvmppc_hv_entry:
505 + */
506 + mflr r0
507 + std r0, PPC_LR_STKOFF(r1)
508 +- stdu r1, -112(r1)
509 ++ stdu r1, -SFS(r1)
510 +
511 + /* Save R1 in the PACA */
512 + std r1, HSTATE_HOST_R1(r13)
513 +@@ -698,6 +705,16 @@ kvmppc_got_guest:
514 + mtspr SPRN_PURR,r7
515 + mtspr SPRN_SPURR,r8
516 +
517 ++ /* Save host values of some registers */
518 ++BEGIN_FTR_SECTION
519 ++ mfspr r5, SPRN_CIABR
520 ++ mfspr r6, SPRN_DAWR
521 ++ mfspr r7, SPRN_DAWRX
522 ++ std r5, STACK_SLOT_CIABR(r1)
523 ++ std r6, STACK_SLOT_DAWR(r1)
524 ++ std r7, STACK_SLOT_DAWRX(r1)
525 ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
526 ++
527 + BEGIN_FTR_SECTION
528 + /* Set partition DABR */
529 + /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
530 +@@ -1361,8 +1378,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
531 + */
532 + li r0, 0
533 + mtspr SPRN_IAMR, r0
534 +- mtspr SPRN_CIABR, r0
535 +- mtspr SPRN_DAWRX, r0
536 ++ mtspr SPRN_PSPB, r0
537 + mtspr SPRN_TCSCR, r0
538 + mtspr SPRN_WORT, r0
539 + /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
540 +@@ -1378,6 +1394,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
541 + std r6,VCPU_UAMOR(r9)
542 + li r6,0
543 + mtspr SPRN_AMR,r6
544 ++ mtspr SPRN_UAMOR, r6
545 +
546 + /* Switch DSCR back to host value */
547 + mfspr r8, SPRN_DSCR
548 +@@ -1519,6 +1536,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
549 + slbia
550 + ptesync
551 +
552 ++ /* Restore host values of some registers */
553 ++BEGIN_FTR_SECTION
554 ++ ld r5, STACK_SLOT_CIABR(r1)
555 ++ ld r6, STACK_SLOT_DAWR(r1)
556 ++ ld r7, STACK_SLOT_DAWRX(r1)
557 ++ mtspr SPRN_CIABR, r5
558 ++ mtspr SPRN_DAWR, r6
559 ++ mtspr SPRN_DAWRX, r7
560 ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
561 ++
562 + /*
563 + * POWER7/POWER8 guest -> host partition switch code.
564 + * We don't have to lock against tlbies but we do
565 +@@ -1652,8 +1679,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
566 + li r0, KVM_GUEST_MODE_NONE
567 + stb r0, HSTATE_IN_GUEST(r13)
568 +
569 +- ld r0, 112+PPC_LR_STKOFF(r1)
570 +- addi r1, r1, 112
571 ++ ld r0, SFS+PPC_LR_STKOFF(r1)
572 ++ addi r1, r1, SFS
573 + mtlr r0
574 + blr
575 +
576 +diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
577 +index cc66c49f07aa..666ad0611e63 100644
578 +--- a/arch/powerpc/platforms/pseries/reconfig.c
579 ++++ b/arch/powerpc/platforms/pseries/reconfig.c
580 +@@ -82,7 +82,6 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
581 +
582 + of_detach_node(np);
583 + of_node_put(parent);
584 +- of_node_put(np); /* Must decrement the refcount */
585 + return 0;
586 + }
587 +
588 +diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
589 +index fec8a461bdef..1076c9a77292 100644
590 +--- a/arch/x86/events/intel/cstate.c
591 ++++ b/arch/x86/events/intel/cstate.c
592 +@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
593 + .stop = cstate_pmu_event_stop,
594 + .read = cstate_pmu_event_update,
595 + .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
596 ++ .module = THIS_MODULE,
597 + };
598 +
599 + static struct pmu cstate_pkg_pmu = {
600 +@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
601 + .stop = cstate_pmu_event_stop,
602 + .read = cstate_pmu_event_update,
603 + .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
604 ++ .module = THIS_MODULE,
605 + };
606 +
607 + static const struct cstate_model nhm_cstates __initconst = {
608 +diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
609 +index 8b902b67342a..970c1de3b86e 100644
610 +--- a/arch/x86/events/intel/rapl.c
611 ++++ b/arch/x86/events/intel/rapl.c
612 +@@ -697,6 +697,7 @@ static int __init init_rapl_pmus(void)
613 + rapl_pmus->pmu.start = rapl_pmu_event_start;
614 + rapl_pmus->pmu.stop = rapl_pmu_event_stop;
615 + rapl_pmus->pmu.read = rapl_pmu_event_read;
616 ++ rapl_pmus->pmu.module = THIS_MODULE;
617 + return 0;
618 + }
619 +
620 +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
621 +index 19d646a783fd..aec6cc925af8 100644
622 +--- a/arch/x86/events/intel/uncore.c
623 ++++ b/arch/x86/events/intel/uncore.c
624 +@@ -733,6 +733,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
625 + .start = uncore_pmu_event_start,
626 + .stop = uncore_pmu_event_stop,
627 + .read = uncore_pmu_event_read,
628 ++ .module = THIS_MODULE,
629 + };
630 + } else {
631 + pmu->pmu = *pmu->type->pmu;
632 +diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
633 +index 3dfca7b302dc..a5b47c1361a0 100644
634 +--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
635 ++++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
636 +@@ -955,6 +955,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
637 + const char *name = get_name(bank, NULL);
638 + int err = 0;
639 +
640 ++ if (!dev)
641 ++ return -ENODEV;
642 ++
643 + if (is_shared_bank(bank)) {
644 + nb = node_to_amd_nb(amd_get_nb_id(cpu));
645 +
646 +diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
647 +index dd6cfa4ad3ac..75029d0cfa15 100644
648 +--- a/arch/x86/platform/intel-mid/device_libs/Makefile
649 ++++ b/arch/x86/platform/intel-mid/device_libs/Makefile
650 +@@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
651 + obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
652 + obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
653 + # SPI Devices
654 +-obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
655 ++obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
656 + # I2C Devices
657 + obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
658 + obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
659 +diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
660 +new file mode 100644
661 +index 000000000000..27186ad654c9
662 +--- /dev/null
663 ++++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
664 +@@ -0,0 +1,54 @@
665 ++/*
666 ++ * spidev platform data initilization file
667 ++ *
668 ++ * (C) Copyright 2014, 2016 Intel Corporation
669 ++ * Authors: Andy Shevchenko <andriy.shevchenko@×××××××××××.com>
670 ++ * Dan O'Donovan <dan@××××××.com>
671 ++ *
672 ++ * This program is free software; you can redistribute it and/or
673 ++ * modify it under the terms of the GNU General Public License
674 ++ * as published by the Free Software Foundation; version 2
675 ++ * of the License.
676 ++ */
677 ++
678 ++#include <linux/err.h>
679 ++#include <linux/init.h>
680 ++#include <linux/sfi.h>
681 ++#include <linux/spi/pxa2xx_spi.h>
682 ++#include <linux/spi/spi.h>
683 ++
684 ++#include <asm/intel-mid.h>
685 ++
686 ++#define MRFLD_SPI_DEFAULT_DMA_BURST 8
687 ++#define MRFLD_SPI_DEFAULT_TIMEOUT 500
688 ++
689 ++/* GPIO pin for spidev chipselect */
690 ++#define MRFLD_SPIDEV_GPIO_CS 111
691 ++
692 ++static struct pxa2xx_spi_chip spidev_spi_chip = {
693 ++ .dma_burst_size = MRFLD_SPI_DEFAULT_DMA_BURST,
694 ++ .timeout = MRFLD_SPI_DEFAULT_TIMEOUT,
695 ++ .gpio_cs = MRFLD_SPIDEV_GPIO_CS,
696 ++};
697 ++
698 ++static void __init *spidev_platform_data(void *info)
699 ++{
700 ++ struct spi_board_info *spi_info = info;
701 ++
702 ++ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
703 ++ return ERR_PTR(-ENODEV);
704 ++
705 ++ spi_info->mode = SPI_MODE_0;
706 ++ spi_info->controller_data = &spidev_spi_chip;
707 ++
708 ++ return NULL;
709 ++}
710 ++
711 ++static const struct devs_id spidev_dev_id __initconst = {
712 ++ .name = "spidev",
713 ++ .type = SFI_DEV_TYPE_SPI,
714 ++ .delay = 0,
715 ++ .get_platform_data = &spidev_platform_data,
716 ++};
717 ++
718 ++sfi_device(spidev_dev_id);
719 +diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
720 +deleted file mode 100644
721 +index 30c601b399ee..000000000000
722 +--- a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
723 ++++ /dev/null
724 +@@ -1,50 +0,0 @@
725 +-/*
726 +- * spidev platform data initilization file
727 +- *
728 +- * (C) Copyright 2014, 2016 Intel Corporation
729 +- * Authors: Andy Shevchenko <andriy.shevchenko@×××××××××××.com>
730 +- * Dan O'Donovan <dan@××××××.com>
731 +- *
732 +- * This program is free software; you can redistribute it and/or
733 +- * modify it under the terms of the GNU General Public License
734 +- * as published by the Free Software Foundation; version 2
735 +- * of the License.
736 +- */
737 +-
738 +-#include <linux/init.h>
739 +-#include <linux/sfi.h>
740 +-#include <linux/spi/pxa2xx_spi.h>
741 +-#include <linux/spi/spi.h>
742 +-
743 +-#include <asm/intel-mid.h>
744 +-
745 +-#define MRFLD_SPI_DEFAULT_DMA_BURST 8
746 +-#define MRFLD_SPI_DEFAULT_TIMEOUT 500
747 +-
748 +-/* GPIO pin for spidev chipselect */
749 +-#define MRFLD_SPIDEV_GPIO_CS 111
750 +-
751 +-static struct pxa2xx_spi_chip spidev_spi_chip = {
752 +- .dma_burst_size = MRFLD_SPI_DEFAULT_DMA_BURST,
753 +- .timeout = MRFLD_SPI_DEFAULT_TIMEOUT,
754 +- .gpio_cs = MRFLD_SPIDEV_GPIO_CS,
755 +-};
756 +-
757 +-static void __init *spidev_platform_data(void *info)
758 +-{
759 +- struct spi_board_info *spi_info = info;
760 +-
761 +- spi_info->mode = SPI_MODE_0;
762 +- spi_info->controller_data = &spidev_spi_chip;
763 +-
764 +- return NULL;
765 +-}
766 +-
767 +-static const struct devs_id spidev_dev_id __initconst = {
768 +- .name = "spidev",
769 +- .type = SFI_DEV_TYPE_SPI,
770 +- .delay = 0,
771 +- .get_platform_data = &spidev_platform_data,
772 +-};
773 +-
774 +-sfi_device(spidev_dev_id);
775 +diff --git a/crypto/authencesn.c b/crypto/authencesn.c
776 +index 121010ac9962..18c94e1c31d1 100644
777 +--- a/crypto/authencesn.c
778 ++++ b/crypto/authencesn.c
779 +@@ -248,6 +248,9 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
780 + u8 *ihash = ohash + crypto_ahash_digestsize(auth);
781 + u32 tmp[2];
782 +
783 ++ if (!authsize)
784 ++ goto decrypt;
785 ++
786 + /* Move high-order bits of sequence number back. */
787 + scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
788 + scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
789 +@@ -256,6 +259,8 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
790 + if (crypto_memneq(ihash, ohash, authsize))
791 + return -EBADMSG;
792 +
793 ++decrypt:
794 ++
795 + sg_init_table(areq_ctx->dst, 2);
796 + dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
797 +
798 +diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
799 +index 5ea5dc219f56..73c9c7fa9001 100644
800 +--- a/drivers/acpi/glue.c
801 ++++ b/drivers/acpi/glue.c
802 +@@ -98,7 +98,15 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
803 + if (check_children && list_empty(&adev->children))
804 + return -ENODEV;
805 +
806 +- return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
807 ++ /*
808 ++ * If the device has a _HID (or _CID) returning a valid ACPI/PNP
809 ++ * device ID, it is better to make it look less attractive here, so that
810 ++ * the other device with the same _ADR value (that may not have a valid
811 ++ * device ID) can be matched going forward. [This means a second spec
812 ++ * violation in a row, so whatever we do here is best effort anyway.]
813 ++ */
814 ++ return sta_present && list_empty(&adev->pnp.ids) ?
815 ++ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
816 + }
817 +
818 + struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
819 +diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
820 +index 4facc7517a6a..909311016108 100644
821 +--- a/drivers/char/ipmi/ipmi_watchdog.c
822 ++++ b/drivers/char/ipmi/ipmi_watchdog.c
823 +@@ -1162,10 +1162,11 @@ static int wdog_reboot_handler(struct notifier_block *this,
824 + ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
825 + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
826 + } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
827 +- /* Set a long timer to let the reboot happens, but
828 +- reboot if it hangs, but only if the watchdog
829 ++ /* Set a long timer to let the reboot happen or
830 ++ reset if it hangs, but only if the watchdog
831 + timer was already running. */
832 +- timeout = 120;
833 ++ if (timeout < 120)
834 ++ timeout = 120;
835 + pretimeout = 0;
836 + ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
837 + ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
838 +diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
839 +index 586f9543de73..40be3747724d 100644
840 +--- a/drivers/dax/dax.c
841 ++++ b/drivers/dax/dax.c
842 +@@ -546,7 +546,8 @@ static void dax_dev_release(struct device *dev)
843 + struct dax_dev *dax_dev = to_dax_dev(dev);
844 + struct dax_region *dax_region = dax_dev->region;
845 +
846 +- ida_simple_remove(&dax_region->ida, dax_dev->id);
847 ++ if (dax_dev->id >= 0)
848 ++ ida_simple_remove(&dax_region->ida, dax_dev->id);
849 + ida_simple_remove(&dax_minor_ida, MINOR(dev->devt));
850 + dax_region_put(dax_region);
851 + iput(dax_dev->inode);
852 +@@ -581,7 +582,7 @@ static void unregister_dax_dev(void *dev)
853 + }
854 +
855 + struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
856 +- struct resource *res, int count)
857 ++ int id, struct resource *res, int count)
858 + {
859 + struct device *parent = dax_region->dev;
860 + struct dax_dev *dax_dev;
861 +@@ -608,10 +609,16 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
862 + if (i < count)
863 + goto err_id;
864 +
865 +- dax_dev->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
866 +- if (dax_dev->id < 0) {
867 +- rc = dax_dev->id;
868 +- goto err_id;
869 ++ if (id < 0) {
870 ++ id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL);
871 ++ dax_dev->id = id;
872 ++ if (id < 0) {
873 ++ rc = id;
874 ++ goto err_id;
875 ++ }
876 ++ } else {
877 ++ /* region provider owns @id lifetime */
878 ++ dax_dev->id = -1;
879 + }
880 +
881 + minor = ida_simple_get(&dax_minor_ida, 0, 0, GFP_KERNEL);
882 +@@ -650,7 +657,7 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
883 + dev->parent = parent;
884 + dev->groups = dax_attribute_groups;
885 + dev->release = dax_dev_release;
886 +- dev_set_name(dev, "dax%d.%d", dax_region->id, dax_dev->id);
887 ++ dev_set_name(dev, "dax%d.%d", dax_region->id, id);
888 + rc = device_add(dev);
889 + if (rc) {
890 + kill_dax_dev(dax_dev);
891 +@@ -669,7 +676,8 @@ struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
892 + err_inode:
893 + ida_simple_remove(&dax_minor_ida, minor);
894 + err_minor:
895 +- ida_simple_remove(&dax_region->ida, dax_dev->id);
896 ++ if (dax_dev->id >= 0)
897 ++ ida_simple_remove(&dax_region->ida, dax_dev->id);
898 + err_id:
899 + kfree(dax_dev);
900 +
901 +diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
902 +index ddd829ab58c0..b5ed85036b2a 100644
903 +--- a/drivers/dax/dax.h
904 ++++ b/drivers/dax/dax.h
905 +@@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent,
906 + int region_id, struct resource *res, unsigned int align,
907 + void *addr, unsigned long flags);
908 + struct dax_dev *devm_create_dax_dev(struct dax_region *dax_region,
909 +- struct resource *res, int count);
910 ++ int id, struct resource *res, int count);
911 + #endif /* __DAX_H__ */
912 +diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
913 +index 73c6ce93a0d9..eebb35720398 100644
914 +--- a/drivers/dax/pmem.c
915 ++++ b/drivers/dax/pmem.c
916 +@@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data)
917 +
918 + static int dax_pmem_probe(struct device *dev)
919 + {
920 +- int rc;
921 + void *addr;
922 + struct resource res;
923 + struct dax_dev *dax_dev;
924 ++ int rc, id, region_id;
925 + struct nd_pfn_sb *pfn_sb;
926 + struct dax_pmem *dax_pmem;
927 +- struct nd_region *nd_region;
928 + struct nd_namespace_io *nsio;
929 + struct dax_region *dax_region;
930 + struct nd_namespace_common *ndns;
931 +@@ -122,14 +121,17 @@ static int dax_pmem_probe(struct device *dev)
932 + /* adjust the dax_region resource to the start of data */
933 + res.start += le64_to_cpu(pfn_sb->dataoff);
934 +
935 +- nd_region = to_nd_region(dev->parent);
936 +- dax_region = alloc_dax_region(dev, nd_region->id, &res,
937 ++ rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
938 ++ if (rc != 2)
939 ++ return -EINVAL;
940 ++
941 ++ dax_region = alloc_dax_region(dev, region_id, &res,
942 + le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
943 + if (!dax_region)
944 + return -ENOMEM;
945 +
946 + /* TODO: support for subdividing a dax region... */
947 +- dax_dev = devm_create_dax_dev(dax_region, &res, 1);
948 ++ dax_dev = devm_create_dax_dev(dax_region, id, &res, 1);
949 +
950 + /* child dax_dev instances now own the lifetime of the dax_region */
951 + dax_region_put(dax_region);
952 +diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
953 +index 8e67895bcca3..abcc51b343ce 100644
954 +--- a/drivers/dma/ioat/hw.h
955 ++++ b/drivers/dma/ioat/hw.h
956 +@@ -64,6 +64,8 @@
957 + #define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
958 + #define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
959 +
960 ++#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
961 ++
962 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */
963 + #define IOAT_VER_2_0 0x20 /* Version 2.0 */
964 + #define IOAT_VER_3_0 0x30 /* Version 3.0 */
965 +diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
966 +index d235fbe2564f..0dea6d55f0ff 100644
967 +--- a/drivers/dma/ioat/init.c
968 ++++ b/drivers/dma/ioat/init.c
969 +@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
970 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
971 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
972 +
973 ++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
974 ++
975 + /* I/OAT v3.3 platforms */
976 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
977 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
978 +@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
979 + }
980 + }
981 +
982 ++static inline bool is_skx_ioat(struct pci_dev *pdev)
983 ++{
984 ++ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
985 ++}
986 ++
987 + static bool is_xeon_cb32(struct pci_dev *pdev)
988 + {
989 + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
990 +- is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
991 ++ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
992 + }
993 +
994 + bool is_bwd_ioat(struct pci_dev *pdev)
995 +@@ -1350,6 +1357,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
996 +
997 + device->version = readb(device->reg_base + IOAT_VER_OFFSET);
998 + if (device->version >= IOAT_VER_3_0) {
999 ++ if (is_skx_ioat(pdev))
1000 ++ device->version = IOAT_VER_3_2;
1001 + err = ioat3_dma_probe(device, ioat_dca_enabled);
1002 +
1003 + if (device->version >= IOAT_VER_3_3)
1004 +diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
1005 +index 3f24aeb48c0e..2403475a37cf 100644
1006 +--- a/drivers/dma/ti-dma-crossbar.c
1007 ++++ b/drivers/dma/ti-dma-crossbar.c
1008 +@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
1009 + match = of_match_node(ti_am335x_master_match, dma_node);
1010 + if (!match) {
1011 + dev_err(&pdev->dev, "DMA master is not supported\n");
1012 ++ of_node_put(dma_node);
1013 + return -EINVAL;
1014 + }
1015 +
1016 +@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
1017 + match = of_match_node(ti_dra7_master_match, dma_node);
1018 + if (!match) {
1019 + dev_err(&pdev->dev, "DMA master is not supported\n");
1020 ++ of_node_put(dma_node);
1021 + return -EINVAL;
1022 + }
1023 +
1024 +diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
1025 +index f386f463278d..a904082ed206 100644
1026 +--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
1027 ++++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
1028 +@@ -210,7 +210,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
1029 + void adreno_flush(struct msm_gpu *gpu)
1030 + {
1031 + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
1032 +- uint32_t wptr = get_wptr(gpu->rb);
1033 ++ uint32_t wptr;
1034 ++
1035 ++ /*
1036 ++ * Mask wptr value that we calculate to fit in the HW range. This is
1037 ++ * to account for the possibility that the last command fit exactly into
1038 ++ * the ringbuffer and rb->next hasn't wrapped to zero yet
1039 ++ */
1040 ++ wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
1041 +
1042 + /* ensure writes to ringbuffer have hit system memory: */
1043 + mb();
1044 +diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
1045 +index b6a0f37a65f3..393973016b52 100644
1046 +--- a/drivers/gpu/drm/msm/msm_gem_submit.c
1047 ++++ b/drivers/gpu/drm/msm/msm_gem_submit.c
1048 +@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
1049 + pagefault_disable();
1050 + }
1051 +
1052 +- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
1053 ++ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
1054 ++ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
1055 + DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
1056 + ret = -EINVAL;
1057 + goto out_unlock;
1058 +@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
1059 + {
1060 + uint32_t i, last_offset = 0;
1061 + uint32_t *ptr;
1062 +- int ret;
1063 ++ int ret = 0;
1064 +
1065 + if (offset % 4) {
1066 + DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
1067 +@@ -317,12 +318,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
1068 +
1069 + ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
1070 + if (ret)
1071 +- return -EFAULT;
1072 ++ goto out;
1073 +
1074 + if (submit_reloc.submit_offset % 4) {
1075 + DRM_ERROR("non-aligned reloc offset: %u\n",
1076 + submit_reloc.submit_offset);
1077 +- return -EINVAL;
1078 ++ ret = -EINVAL;
1079 ++ goto out;
1080 + }
1081 +
1082 + /* offset in dwords: */
1083 +@@ -331,12 +333,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
1084 + if ((off >= (obj->base.size / 4)) ||
1085 + (off < last_offset)) {
1086 + DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
1087 +- return -EINVAL;
1088 ++ ret = -EINVAL;
1089 ++ goto out;
1090 + }
1091 +
1092 + ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
1093 + if (ret)
1094 +- return ret;
1095 ++ goto out;
1096 +
1097 + if (valid)
1098 + continue;
1099 +@@ -353,9 +356,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
1100 + last_offset = off;
1101 + }
1102 +
1103 ++out:
1104 + msm_gem_put_vaddr_locked(&obj->base);
1105 +
1106 +- return 0;
1107 ++ return ret;
1108 + }
1109 +
1110 + static void submit_cleanup(struct msm_gem_submit *submit)
1111 +diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
1112 +index f326cf6a32e6..67b34e069abf 100644
1113 +--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
1114 ++++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
1115 +@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
1116 + struct msm_ringbuffer *ring;
1117 + int ret;
1118 +
1119 +- size = ALIGN(size, 4); /* size should be dword aligned */
1120 ++ if (WARN_ON(!is_power_of_2(size)))
1121 ++ return ERR_PTR(-EINVAL);
1122 +
1123 + ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1124 + if (!ring) {
1125 +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
1126 +index 1e1de6bfe85a..5893be9788d3 100644
1127 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
1128 ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
1129 +@@ -27,7 +27,7 @@ struct nv50_disp {
1130 + u8 type[3];
1131 + } pior;
1132 +
1133 +- struct nv50_disp_chan *chan[17];
1134 ++ struct nv50_disp_chan *chan[21];
1135 + };
1136 +
1137 + int nv50_disp_root_scanoutpos(NV50_DISP_MTHD_V0);
1138 +diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
1139 +index c794b2c2d21e..6d8f21290aa2 100644
1140 +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
1141 ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
1142 +@@ -129,7 +129,7 @@ gf100_bar_init(struct nvkm_bar *base)
1143 +
1144 + if (bar->bar[0].mem) {
1145 + addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
1146 +- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
1147 ++ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
1148 + }
1149 +
1150 + return 0;
1151 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
1152 +index 73c971e39b1c..ae125d070212 100644
1153 +--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
1154 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
1155 +@@ -285,7 +285,6 @@ static int rcar_du_remove(struct platform_device *pdev)
1156 +
1157 + drm_kms_helper_poll_fini(ddev);
1158 + drm_mode_config_cleanup(ddev);
1159 +- drm_vblank_cleanup(ddev);
1160 +
1161 + drm_dev_unref(ddev);
1162 +
1163 +@@ -305,7 +304,7 @@ static int rcar_du_probe(struct platform_device *pdev)
1164 + return -ENODEV;
1165 + }
1166 +
1167 +- /* Allocate and initialize the DRM and R-Car device structures. */
1168 ++ /* Allocate and initialize the R-Car device structure. */
1169 + rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
1170 + if (rcdu == NULL)
1171 + return -ENOMEM;
1172 +@@ -315,31 +314,22 @@ static int rcar_du_probe(struct platform_device *pdev)
1173 + rcdu->dev = &pdev->dev;
1174 + rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
1175 +
1176 +- ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
1177 +- if (IS_ERR(ddev))
1178 +- return PTR_ERR(ddev);
1179 +-
1180 +- rcdu->ddev = ddev;
1181 +- ddev->dev_private = rcdu;
1182 +-
1183 + platform_set_drvdata(pdev, rcdu);
1184 +
1185 + /* I/O resources */
1186 + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1187 + rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
1188 +- if (IS_ERR(rcdu->mmio)) {
1189 +- ret = PTR_ERR(rcdu->mmio);
1190 +- goto error;
1191 +- }
1192 +-
1193 +- /* Initialize vertical blanking interrupts handling. Start with vblank
1194 +- * disabled for all CRTCs.
1195 +- */
1196 +- ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
1197 +- if (ret < 0)
1198 +- goto error;
1199 ++ if (IS_ERR(rcdu->mmio))
1200 ++ return PTR_ERR(rcdu->mmio);
1201 +
1202 + /* DRM/KMS objects */
1203 ++ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
1204 ++ if (IS_ERR(ddev))
1205 ++ return PTR_ERR(ddev);
1206 ++
1207 ++ rcdu->ddev = ddev;
1208 ++ ddev->dev_private = rcdu;
1209 ++
1210 + ret = rcar_du_modeset_init(rcdu);
1211 + if (ret < 0) {
1212 + if (ret != -EPROBE_DEFER)
1213 +diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
1214 +index 392c7e6de042..cfc302c65b0b 100644
1215 +--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
1216 ++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
1217 +@@ -567,6 +567,13 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
1218 + if (ret < 0)
1219 + return ret;
1220 +
1221 ++ /* Initialize vertical blanking interrupts handling. Start with vblank
1222 ++ * disabled for all CRTCs.
1223 ++ */
1224 ++ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
1225 ++ if (ret < 0)
1226 ++ return ret;
1227 ++
1228 + /* Initialize the groups. */
1229 + num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
1230 +
1231 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1232 +index c7b53d987f06..fefb9d995d2c 100644
1233 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1234 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
1235 +@@ -519,7 +519,7 @@ static int vmw_cmd_invalid(struct vmw_private *dev_priv,
1236 + struct vmw_sw_context *sw_context,
1237 + SVGA3dCmdHeader *header)
1238 + {
1239 +- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
1240 ++ return -EINVAL;
1241 + }
1242 +
1243 + static int vmw_cmd_ok(struct vmw_private *dev_priv,
1244 +diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1245 +index a5dd7e63ada3..4f3f5749b0c1 100644
1246 +--- a/drivers/hid/hid-core.c
1247 ++++ b/drivers/hid/hid-core.c
1248 +@@ -2484,6 +2484,7 @@ static const struct hid_device_id hid_ignore_list[] = {
1249 + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
1250 + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
1251 + { HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
1252 ++ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
1253 + { HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
1254 + { HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
1255 + #if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
1256 +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1257 +index cfca43f635a6..08fd3f831d62 100644
1258 +--- a/drivers/hid/hid-ids.h
1259 ++++ b/drivers/hid/hid-ids.h
1260 +@@ -819,6 +819,9 @@
1261 + #define USB_VENDOR_ID_PETALYNX 0x18b1
1262 + #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
1263 +
1264 ++#define USB_VENDOR_ID_PETZL 0x2122
1265 ++#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
1266 ++
1267 + #define USB_VENDOR_ID_PHILIPS 0x0471
1268 + #define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
1269 +
1270 +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
1271 +index 01e3a37b0aef..d118ffe0bfb6 100644
1272 +--- a/drivers/infiniband/core/uverbs_cmd.c
1273 ++++ b/drivers/infiniband/core/uverbs_cmd.c
1274 +@@ -2342,8 +2342,9 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1275 + if (copy_from_user(&cmd, buf, sizeof cmd))
1276 + return -EFAULT;
1277 +
1278 +- if (cmd.port_num < rdma_start_port(ib_dev) ||
1279 +- cmd.port_num > rdma_end_port(ib_dev))
1280 ++ if ((cmd.attr_mask & IB_QP_PORT) &&
1281 ++ (cmd.port_num < rdma_start_port(ib_dev) ||
1282 ++ cmd.port_num > rdma_end_port(ib_dev)))
1283 + return -EINVAL;
1284 +
1285 + INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1286 +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
1287 +index f2a885eee4bb..8059b7eaf3a8 100644
1288 +--- a/drivers/infiniband/hw/mlx4/main.c
1289 ++++ b/drivers/infiniband/hw/mlx4/main.c
1290 +@@ -1680,9 +1680,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
1291 + size += ret;
1292 + }
1293 +
1294 ++ if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
1295 ++ flow_attr->num_of_specs == 1) {
1296 ++ struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
1297 ++ enum ib_flow_spec_type header_spec =
1298 ++ ((union ib_flow_spec *)(flow_attr + 1))->type;
1299 ++
1300 ++ if (header_spec == IB_FLOW_SPEC_ETH)
1301 ++ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1302 ++ }
1303 ++
1304 + ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
1305 + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1306 +- MLX4_CMD_WRAPPED);
1307 ++ MLX4_CMD_NATIVE);
1308 + if (ret == -ENOMEM)
1309 + pr_err("mcg table is full. Fail to register network rule.\n");
1310 + else if (ret == -ENXIO)
1311 +@@ -1699,7 +1709,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
1312 + int err;
1313 + err = mlx4_cmd(dev, reg_id, 0, 0,
1314 + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
1315 +- MLX4_CMD_WRAPPED);
1316 ++ MLX4_CMD_NATIVE);
1317 + if (err)
1318 + pr_err("Fail to detach network rule. registration id = 0x%llx\n",
1319 + reg_id);
1320 +diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
1321 +index 54a5e870a8f5..efbcf8435185 100644
1322 +--- a/drivers/irqchip/irq-keystone.c
1323 ++++ b/drivers/irqchip/irq-keystone.c
1324 +@@ -19,9 +19,9 @@
1325 + #include <linux/bitops.h>
1326 + #include <linux/module.h>
1327 + #include <linux/moduleparam.h>
1328 ++#include <linux/interrupt.h>
1329 + #include <linux/irqdomain.h>
1330 + #include <linux/irqchip.h>
1331 +-#include <linux/irqchip/chained_irq.h>
1332 + #include <linux/of.h>
1333 + #include <linux/of_platform.h>
1334 + #include <linux/mfd/syscon.h>
1335 +@@ -39,6 +39,7 @@ struct keystone_irq_device {
1336 + struct irq_domain *irqd;
1337 + struct regmap *devctrl_regs;
1338 + u32 devctrl_offset;
1339 ++ raw_spinlock_t wa_lock;
1340 + };
1341 +
1342 + static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
1343 +@@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
1344 + /* nothing to do here */
1345 + }
1346 +
1347 +-static void keystone_irq_handler(struct irq_desc *desc)
1348 ++static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
1349 + {
1350 +- unsigned int irq = irq_desc_get_irq(desc);
1351 +- struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
1352 ++ struct keystone_irq_device *kirq = keystone_irq;
1353 ++ unsigned long wa_lock_flags;
1354 + unsigned long pending;
1355 + int src, virq;
1356 +
1357 + dev_dbg(kirq->dev, "start irq %d\n", irq);
1358 +
1359 +- chained_irq_enter(irq_desc_get_chip(desc), desc);
1360 +-
1361 + pending = keystone_irq_readl(kirq);
1362 + keystone_irq_writel(kirq, pending);
1363 +
1364 +@@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
1365 + if (!virq)
1366 + dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
1367 + src, virq);
1368 ++ raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
1369 + generic_handle_irq(virq);
1370 ++ raw_spin_unlock_irqrestore(&kirq->wa_lock,
1371 ++ wa_lock_flags);
1372 + }
1373 + }
1374 +
1375 +- chained_irq_exit(irq_desc_get_chip(desc), desc);
1376 +-
1377 + dev_dbg(kirq->dev, "end irq %d\n", irq);
1378 ++ return IRQ_HANDLED;
1379 + }
1380 +
1381 + static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
1382 +@@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
1383 + return -ENODEV;
1384 + }
1385 +
1386 ++ raw_spin_lock_init(&kirq->wa_lock);
1387 ++
1388 + platform_set_drvdata(pdev, kirq);
1389 +
1390 +- irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
1391 ++ ret = request_irq(kirq->irq, keystone_irq_handler,
1392 ++ 0, dev_name(dev), kirq);
1393 ++ if (ret) {
1394 ++ irq_domain_remove(kirq->irqd);
1395 ++ return ret;
1396 ++ }
1397 +
1398 + /* clear all source bits */
1399 + keystone_irq_writel(kirq, ~0x0);
1400 +@@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
1401 + struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
1402 + int hwirq;
1403 +
1404 ++ free_irq(kirq->irq, kirq);
1405 ++
1406 + for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
1407 + irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
1408 +
1409 +diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
1410 +index 17304705f2cf..05fa9f7af53c 100644
1411 +--- a/drivers/irqchip/irq-mxs.c
1412 ++++ b/drivers/irqchip/irq-mxs.c
1413 +@@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
1414 + .irq_ack = icoll_ack_irq,
1415 + .irq_mask = icoll_mask_irq,
1416 + .irq_unmask = icoll_unmask_irq,
1417 ++ .flags = IRQCHIP_MASK_ON_SUSPEND |
1418 ++ IRQCHIP_SKIP_SET_WAKE,
1419 + };
1420 +
1421 + static struct irq_chip asm9260_icoll_chip = {
1422 + .irq_ack = icoll_ack_irq,
1423 + .irq_mask = asm9260_mask_irq,
1424 + .irq_unmask = asm9260_unmask_irq,
1425 ++ .flags = IRQCHIP_MASK_ON_SUSPEND |
1426 ++ IRQCHIP_SKIP_SET_WAKE,
1427 + };
1428 +
1429 + asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
1430 +diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
1431 +index 9b856e1890d1..e4c43a17b333 100644
1432 +--- a/drivers/isdn/i4l/isdn_common.c
1433 ++++ b/drivers/isdn/i4l/isdn_common.c
1434 +@@ -1379,6 +1379,7 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
1435 + if (arg) {
1436 + if (copy_from_user(bname, argp, sizeof(bname) - 1))
1437 + return -EFAULT;
1438 ++ bname[sizeof(bname)-1] = 0;
1439 + } else
1440 + return -EINVAL;
1441 + ret = mutex_lock_interruptible(&dev->mtx);
1442 +diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
1443 +index c151c6daa67e..f63a110b7bcb 100644
1444 +--- a/drivers/isdn/i4l/isdn_net.c
1445 ++++ b/drivers/isdn/i4l/isdn_net.c
1446 +@@ -2611,10 +2611,9 @@ isdn_net_newslave(char *parm)
1447 + char newname[10];
1448 +
1449 + if (p) {
1450 +- /* Slave-Name MUST not be empty */
1451 +- if (!strlen(p + 1))
1452 ++ /* Slave-Name MUST not be empty or overflow 'newname' */
1453 ++ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
1454 + return NULL;
1455 +- strcpy(newname, p + 1);
1456 + *p = 0;
1457 + /* Master must already exist */
1458 + if (!(n = isdn_net_findif(parm)))
1459 +diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
1460 +index 9c1e8adaf4fc..bf3fbd00a091 100644
1461 +--- a/drivers/isdn/i4l/isdn_ppp.c
1462 ++++ b/drivers/isdn/i4l/isdn_ppp.c
1463 +@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s
1464 + id);
1465 + return NULL;
1466 + } else {
1467 +- rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
1468 ++ rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
1469 + if (!rs)
1470 + return NULL;
1471 + rs->state = CCPResetIdle;
1472 +diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
1473 +index 4a36632c236f..87ef465c6947 100644
1474 +--- a/drivers/mailbox/mailbox.c
1475 ++++ b/drivers/mailbox/mailbox.c
1476 +@@ -104,11 +104,14 @@ static void tx_tick(struct mbox_chan *chan, int r)
1477 + /* Submit next message */
1478 + msg_submit(chan);
1479 +
1480 ++ if (!mssg)
1481 ++ return;
1482 ++
1483 + /* Notify the client */
1484 +- if (mssg && chan->cl->tx_done)
1485 ++ if (chan->cl->tx_done)
1486 + chan->cl->tx_done(chan->cl, mssg, r);
1487 +
1488 +- if (chan->cl->tx_block)
1489 ++ if (r != -ETIME && chan->cl->tx_block)
1490 + complete(&chan->tx_complete);
1491 + }
1492 +
1493 +@@ -261,7 +264,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
1494 +
1495 + msg_submit(chan);
1496 +
1497 +- if (chan->cl->tx_block && chan->active_req) {
1498 ++ if (chan->cl->tx_block) {
1499 + unsigned long wait;
1500 + int ret;
1501 +
1502 +@@ -272,8 +275,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
1503 +
1504 + ret = wait_for_completion_timeout(&chan->tx_complete, wait);
1505 + if (ret == 0) {
1506 +- t = -EIO;
1507 +- tx_tick(chan, -EIO);
1508 ++ t = -ETIME;
1509 ++ tx_tick(chan, t);
1510 + }
1511 + }
1512 +
1513 +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
1514 +index 8f117d6372c9..383f19c6bf24 100644
1515 +--- a/drivers/md/raid5.c
1516 ++++ b/drivers/md/raid5.c
1517 +@@ -5843,6 +5843,8 @@ static void raid5_do_work(struct work_struct *work)
1518 + pr_debug("%d stripes handled\n", handled);
1519 +
1520 + spin_unlock_irq(&conf->device_lock);
1521 ++
1522 ++ async_tx_issue_pending_all();
1523 + blk_finish_plug(&plug);
1524 +
1525 + pr_debug("--- raid5worker inactive\n");
1526 +diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
1527 +index 0a060339e516..2e7185030741 100644
1528 +--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
1529 ++++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
1530 +@@ -211,7 +211,7 @@ static int s5c73m3_3a_lock(struct s5c73m3 *state, struct v4l2_ctrl *ctrl)
1531 + }
1532 +
1533 + if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
1534 +- ret = s5c73m3_af_run(state, ~af_lock);
1535 ++ ret = s5c73m3_af_run(state, !af_lock);
1536 +
1537 + return ret;
1538 + }
1539 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1540 +index b3c9cbef766e..5626908f3f7a 100644
1541 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1542 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1543 +@@ -5186,7 +5186,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
1544 + skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1545 + skb->inner_protocol != htons(ETH_P_TEB) ||
1546 + skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1547 +- sizeof(struct udphdr) + sizeof(struct vxlanhdr))
1548 ++ sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
1549 ++ !adapter->vxlan_port ||
1550 ++ udp_hdr(skb)->dest != adapter->vxlan_port)
1551 + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1552 +
1553 + return features;
1554 +diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
1555 +index 2a9dd460a95f..e1f9e7cebf8f 100644
1556 +--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
1557 ++++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
1558 +@@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
1559 + if (!buf)
1560 + return -ENOMEM;
1561 +
1562 ++ if (offset_in_page(buf)) {
1563 ++ dma_free_coherent(dev, PAGE_SIZE << order,
1564 ++ buf, sg_dma_address(mem));
1565 ++ return -ENOMEM;
1566 ++ }
1567 ++
1568 + sg_set_buf(mem, buf, PAGE_SIZE << order);
1569 +- BUG_ON(mem->offset);
1570 + sg_dma_len(mem) = PAGE_SIZE << order;
1571 + return 0;
1572 + }
1573 +diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
1574 +index b2ca8a635b2e..551786f58e59 100644
1575 +--- a/drivers/net/ethernet/mellanox/mlx4/main.c
1576 ++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
1577 +@@ -42,6 +42,7 @@
1578 + #include <linux/io-mapping.h>
1579 + #include <linux/delay.h>
1580 + #include <linux/kmod.h>
1581 ++#include <linux/etherdevice.h>
1582 + #include <net/devlink.h>
1583 +
1584 + #include <linux/mlx4/device.h>
1585 +@@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
1586 + }
1587 + EXPORT_SYMBOL(mlx4_is_slave_active);
1588 +
1589 ++void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
1590 ++ struct _rule_hw *eth_header)
1591 ++{
1592 ++ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
1593 ++ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
1594 ++ struct mlx4_net_trans_rule_hw_eth *eth =
1595 ++ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
1596 ++ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
1597 ++ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
1598 ++ next_rule->rsvd == 0;
1599 ++
1600 ++ if (last_rule)
1601 ++ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
1602 ++ }
1603 ++}
1604 ++EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
1605 ++
1606 + static void slave_adjust_steering_mode(struct mlx4_dev *dev,
1607 + struct mlx4_dev_cap *dev_cap,
1608 + struct mlx4_init_hca_param *hca_param)
1609 +diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1610 +index 32f76bf018c3..1822382212ee 100644
1611 +--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1612 ++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1613 +@@ -4165,22 +4165,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
1614 + return 0;
1615 + }
1616 +
1617 +-static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
1618 +- struct _rule_hw *eth_header)
1619 +-{
1620 +- if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
1621 +- is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
1622 +- struct mlx4_net_trans_rule_hw_eth *eth =
1623 +- (struct mlx4_net_trans_rule_hw_eth *)eth_header;
1624 +- struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
1625 +- bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
1626 +- next_rule->rsvd == 0;
1627 +-
1628 +- if (last_rule)
1629 +- ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
1630 +- }
1631 +-}
1632 +-
1633 + /*
1634 + * In case of missing eth header, append eth header with a MAC address
1635 + * assigned to the VF.
1636 +@@ -4364,10 +4348,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1637 + header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
1638 +
1639 + if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
1640 +- handle_eth_header_mcast_prio(ctrl, rule_header);
1641 +-
1642 +- if (slave == dev->caps.function)
1643 +- goto execute;
1644 ++ mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
1645 +
1646 + switch (header_id) {
1647 + case MLX4_NET_TRANS_RULE_ID_ETH:
1648 +@@ -4395,7 +4376,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
1649 + goto err_put_qp;
1650 + }
1651 +
1652 +-execute:
1653 + err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
1654 + vhcr->in_modifier, 0,
1655 + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
1656 +@@ -4474,6 +4454,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1657 + struct res_qp *rqp;
1658 + struct res_fs_rule *rrule;
1659 + u64 mirr_reg_id;
1660 ++ int qpn;
1661 +
1662 + if (dev->caps.steering_mode !=
1663 + MLX4_STEERING_MODE_DEVICE_MANAGED)
1664 +@@ -4490,10 +4471,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1665 + }
1666 + mirr_reg_id = rrule->mirr_rule_id;
1667 + kfree(rrule->mirr_mbox);
1668 ++ qpn = rrule->qpn;
1669 +
1670 + /* Release the rule form busy state before removal */
1671 + put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
1672 +- err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
1673 ++ err = get_res(dev, slave, qpn, RES_QP, &rqp);
1674 + if (err)
1675 + return err;
1676 +
1677 +@@ -4518,7 +4500,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
1678 + if (!err)
1679 + atomic_dec(&rqp->ref_count);
1680 + out:
1681 +- put_res(dev, slave, rrule->qpn, RES_QP);
1682 ++ put_res(dev, slave, qpn, RES_QP);
1683 + return err;
1684 + }
1685 +
1686 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1687 +index b08b9e2c6a76..6ffd5d2a70aa 100644
1688 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1689 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
1690 +@@ -672,6 +672,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
1691 + if (err)
1692 + goto err_reps;
1693 + }
1694 ++
1695 ++ /* disable PF RoCE so missed packets don't go through RoCE steering */
1696 ++ mlx5_dev_list_lock();
1697 ++ mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1698 ++ mlx5_dev_list_unlock();
1699 ++
1700 + return 0;
1701 +
1702 + err_reps:
1703 +@@ -695,6 +701,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
1704 + {
1705 + int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
1706 +
1707 ++ /* enable back PF RoCE */
1708 ++ mlx5_dev_list_lock();
1709 ++ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1710 ++ mlx5_dev_list_unlock();
1711 ++
1712 + mlx5_eswitch_disable_sriov(esw);
1713 + err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
1714 + if (err) {
1715 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1716 +index bf000d819a21..2c4350a1c629 100644
1717 +--- a/drivers/net/ethernet/realtek/r8169.c
1718 ++++ b/drivers/net/ethernet/realtek/r8169.c
1719 +@@ -326,6 +326,7 @@ enum cfg_version {
1720 + static const struct pci_device_id rtl8169_pci_tbl[] = {
1721 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
1722 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
1723 ++ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
1724 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
1725 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
1726 + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
1727 +diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
1728 +index a2d218b28c0e..12be259394c6 100644
1729 +--- a/drivers/net/ethernet/renesas/sh_eth.c
1730 ++++ b/drivers/net/ethernet/renesas/sh_eth.c
1731 +@@ -819,6 +819,7 @@ static struct sh_eth_cpu_data sh7734_data = {
1732 + .tsu = 1,
1733 + .hw_crc = 1,
1734 + .select_mii = 1,
1735 ++ .shift_rd0 = 1,
1736 + };
1737 +
1738 + /* SH7763 */
1739 +diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1740 +index 32b555a72e13..9e7b7836774f 100644
1741 +--- a/drivers/net/phy/phy_device.c
1742 ++++ b/drivers/net/phy/phy_device.c
1743 +@@ -1792,7 +1792,7 @@ static struct phy_driver genphy_driver[] = {
1744 + .phy_id = 0xffffffff,
1745 + .phy_id_mask = 0xffffffff,
1746 + .name = "Generic PHY",
1747 +- .soft_reset = genphy_soft_reset,
1748 ++ .soft_reset = genphy_no_soft_reset,
1749 + .config_init = genphy_config_init,
1750 + .features = PHY_GBIT_FEATURES | SUPPORTED_MII |
1751 + SUPPORTED_AUI | SUPPORTED_FIBRE |
1752 +diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
1753 +index dc7b6392e75a..50737def774c 100644
1754 +--- a/drivers/net/usb/asix_devices.c
1755 ++++ b/drivers/net/usb/asix_devices.c
1756 +@@ -1369,6 +1369,7 @@ static struct usb_driver asix_driver = {
1757 + .probe = usbnet_probe,
1758 + .suspend = asix_suspend,
1759 + .resume = asix_resume,
1760 ++ .reset_resume = asix_resume,
1761 + .disconnect = usbnet_disconnect,
1762 + .supports_autosuspend = 1,
1763 + .disable_hub_initiated_lpm = 1,
1764 +diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
1765 +index c9a8bb1186f2..c7956e181f80 100644
1766 +--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
1767 ++++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
1768 +@@ -660,6 +660,9 @@ ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
1769 + struct sk_buff *skb;
1770 + u32 cmd_id;
1771 +
1772 ++ if (!ar->wmi.ops->gen_vdev_spectral_conf)
1773 ++ return -EOPNOTSUPP;
1774 ++
1775 + skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
1776 + if (IS_ERR(skb))
1777 + return PTR_ERR(skb);
1778 +@@ -675,6 +678,9 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
1779 + struct sk_buff *skb;
1780 + u32 cmd_id;
1781 +
1782 ++ if (!ar->wmi.ops->gen_vdev_spectral_enable)
1783 ++ return -EOPNOTSUPP;
1784 ++
1785 + skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
1786 + enable);
1787 + if (IS_ERR(skb))
1788 +diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
1789 +index e7130b54d1d8..24b07a0ce6f7 100644
1790 +--- a/drivers/net/wireless/ath/wil6210/main.c
1791 ++++ b/drivers/net/wireless/ath/wil6210/main.c
1792 +@@ -384,18 +384,19 @@ static void wil_fw_error_worker(struct work_struct *work)
1793 +
1794 + wil->last_fw_recovery = jiffies;
1795 +
1796 ++ wil_info(wil, "fw error recovery requested (try %d)...\n",
1797 ++ wil->recovery_count);
1798 ++ if (!no_fw_recovery)
1799 ++ wil->recovery_state = fw_recovery_running;
1800 ++ if (wil_wait_for_recovery(wil) != 0)
1801 ++ return;
1802 ++
1803 + mutex_lock(&wil->mutex);
1804 + switch (wdev->iftype) {
1805 + case NL80211_IFTYPE_STATION:
1806 + case NL80211_IFTYPE_P2P_CLIENT:
1807 + case NL80211_IFTYPE_MONITOR:
1808 +- wil_info(wil, "fw error recovery requested (try %d)...\n",
1809 +- wil->recovery_count);
1810 +- if (!no_fw_recovery)
1811 +- wil->recovery_state = fw_recovery_running;
1812 +- if (0 != wil_wait_for_recovery(wil))
1813 +- break;
1814 +-
1815 ++ /* silent recovery, upper layers will see disconnect */
1816 + __wil_down(wil);
1817 + __wil_up(wil);
1818 + break;
1819 +diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
1820 +index 5e797d5c38ed..712936f5d2d6 100644
1821 +--- a/drivers/nfc/fdp/i2c.c
1822 ++++ b/drivers/nfc/fdp/i2c.c
1823 +@@ -210,14 +210,14 @@ static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id)
1824 + struct sk_buff *skb;
1825 + int r;
1826 +
1827 +- client = phy->i2c_dev;
1828 +- dev_dbg(&client->dev, "%s\n", __func__);
1829 +-
1830 + if (!phy || irq != phy->i2c_dev->irq) {
1831 + WARN_ON_ONCE(1);
1832 + return IRQ_NONE;
1833 + }
1834 +
1835 ++ client = phy->i2c_dev;
1836 ++ dev_dbg(&client->dev, "%s\n", __func__);
1837 ++
1838 + r = fdp_nci_i2c_read(phy, &skb);
1839 +
1840 + if (r == -EREMOTEIO)
1841 +diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
1842 +index 2b2330b235e6..073e4a478c89 100644
1843 +--- a/drivers/nfc/port100.c
1844 ++++ b/drivers/nfc/port100.c
1845 +@@ -725,23 +725,33 @@ static int port100_submit_urb_for_ack(struct port100 *dev, gfp_t flags)
1846 +
1847 + static int port100_send_ack(struct port100 *dev)
1848 + {
1849 +- int rc;
1850 ++ int rc = 0;
1851 +
1852 + mutex_lock(&dev->out_urb_lock);
1853 +
1854 +- init_completion(&dev->cmd_cancel_done);
1855 ++ /*
1856 ++ * If prior cancel is in-flight (dev->cmd_cancel == true), we
1857 ++ * can skip to send cancel. Then this will wait the prior
1858 ++ * cancel, or merged into the next cancel rarely if next
1859 ++ * cancel was started before waiting done. In any case, this
1860 ++ * will be waked up soon or later.
1861 ++ */
1862 ++ if (!dev->cmd_cancel) {
1863 ++ reinit_completion(&dev->cmd_cancel_done);
1864 +
1865 +- usb_kill_urb(dev->out_urb);
1866 ++ usb_kill_urb(dev->out_urb);
1867 +
1868 +- dev->out_urb->transfer_buffer = ack_frame;
1869 +- dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
1870 +- rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
1871 ++ dev->out_urb->transfer_buffer = ack_frame;
1872 ++ dev->out_urb->transfer_buffer_length = sizeof(ack_frame);
1873 ++ rc = usb_submit_urb(dev->out_urb, GFP_KERNEL);
1874 +
1875 +- /* Set the cmd_cancel flag only if the URB has been successfully
1876 +- * submitted. It will be reset by the out URB completion callback
1877 +- * port100_send_complete().
1878 +- */
1879 +- dev->cmd_cancel = !rc;
1880 ++ /*
1881 ++ * Set the cmd_cancel flag only if the URB has been
1882 ++ * successfully submitted. It will be reset by the out
1883 ++ * URB completion callback port100_send_complete().
1884 ++ */
1885 ++ dev->cmd_cancel = !rc;
1886 ++ }
1887 +
1888 + mutex_unlock(&dev->out_urb_lock);
1889 +
1890 +@@ -928,8 +938,8 @@ static void port100_send_complete(struct urb *urb)
1891 + struct port100 *dev = urb->context;
1892 +
1893 + if (dev->cmd_cancel) {
1894 ++ complete_all(&dev->cmd_cancel_done);
1895 + dev->cmd_cancel = false;
1896 +- complete(&dev->cmd_cancel_done);
1897 + }
1898 +
1899 + switch (urb->status) {
1900 +@@ -1543,6 +1553,7 @@ static int port100_probe(struct usb_interface *interface,
1901 + PORT100_COMM_RF_HEAD_MAX_LEN;
1902 + dev->skb_tailroom = PORT100_FRAME_TAIL_LEN;
1903 +
1904 ++ init_completion(&dev->cmd_cancel_done);
1905 + INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete);
1906 +
1907 + /* The first thing to do with the Port-100 is to set the command type
1908 +diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
1909 +index ac27b9bac3b9..8e7b120696fa 100644
1910 +--- a/drivers/nvmem/imx-ocotp.c
1911 ++++ b/drivers/nvmem/imx-ocotp.c
1912 +@@ -71,7 +71,7 @@ static struct nvmem_config imx_ocotp_nvmem_config = {
1913 +
1914 + static const struct of_device_id imx_ocotp_dt_ids[] = {
1915 + { .compatible = "fsl,imx6q-ocotp", (void *)128 },
1916 +- { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
1917 ++ { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
1918 + { .compatible = "fsl,imx6sx-ocotp", (void *)128 },
1919 + { },
1920 + };
1921 +diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
1922 +index 9d253cb83ee7..e70410beb83a 100644
1923 +--- a/drivers/scsi/bfa/bfad.c
1924 ++++ b/drivers/scsi/bfa/bfad.c
1925 +@@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
1926 + u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
1927 + u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
1928 +
1929 +-#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin"
1930 +-#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin"
1931 +-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
1932 ++#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
1933 ++#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
1934 ++#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
1935 +
1936 + static u32 *bfad_load_fwimg(struct pci_dev *pdev);
1937 + static void bfad_free_fwimg(void);
1938 +diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
1939 +index f9e862093a25..cfcfff48e8e1 100644
1940 +--- a/drivers/scsi/bfa/bfad_drv.h
1941 ++++ b/drivers/scsi/bfa/bfad_drv.h
1942 +@@ -58,7 +58,7 @@
1943 + #ifdef BFA_DRIVER_VERSION
1944 + #define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
1945 + #else
1946 +-#define BFAD_DRIVER_VERSION "3.2.25.0"
1947 ++#define BFAD_DRIVER_VERSION "3.2.25.1"
1948 + #endif
1949 +
1950 + #define BFAD_PROTO_NAME FCPI_NAME
1951 +diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
1952 +index 9ddc9200e0a4..9e4b7709043e 100644
1953 +--- a/drivers/scsi/fnic/fnic.h
1954 ++++ b/drivers/scsi/fnic/fnic.h
1955 +@@ -248,6 +248,7 @@ struct fnic {
1956 + struct completion *remove_wait; /* device remove thread blocks */
1957 +
1958 + atomic_t in_flight; /* io counter */
1959 ++ bool internal_reset_inprogress;
1960 + u32 _reserved; /* fill hole */
1961 + unsigned long state_flags; /* protected by host lock */
1962 + enum fnic_state state;
1963 +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
1964 +index d9fd2f841585..44dd372aa7d3 100644
1965 +--- a/drivers/scsi/fnic/fnic_scsi.c
1966 ++++ b/drivers/scsi/fnic/fnic_scsi.c
1967 +@@ -2573,6 +2573,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
1968 + unsigned long wait_host_tmo;
1969 + struct Scsi_Host *shost = sc->device->host;
1970 + struct fc_lport *lp = shost_priv(shost);
1971 ++ struct fnic *fnic = lport_priv(lp);
1972 ++ unsigned long flags;
1973 ++
1974 ++ spin_lock_irqsave(&fnic->fnic_lock, flags);
1975 ++ if (fnic->internal_reset_inprogress == 0) {
1976 ++ fnic->internal_reset_inprogress = 1;
1977 ++ } else {
1978 ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1979 ++ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1980 ++ "host reset in progress skipping another host reset\n");
1981 ++ return SUCCESS;
1982 ++ }
1983 ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1984 +
1985 + /*
1986 + * If fnic_reset is successful, wait for fabric login to complete
1987 +@@ -2593,6 +2606,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
1988 + }
1989 + }
1990 +
1991 ++ spin_lock_irqsave(&fnic->fnic_lock, flags);
1992 ++ fnic->internal_reset_inprogress = 0;
1993 ++ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1994 + return ret;
1995 + }
1996 +
1997 +diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
1998 +index 396b32dca074..7cf70aaec0ba 100644
1999 +--- a/drivers/scsi/snic/snic_main.c
2000 ++++ b/drivers/scsi/snic/snic_main.c
2001 +@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2002 + if (!pool) {
2003 + SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
2004 +
2005 ++ ret = -ENOMEM;
2006 + goto err_free_res;
2007 + }
2008 +
2009 +@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2010 + if (!pool) {
2011 + SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
2012 +
2013 ++ ret = -ENOMEM;
2014 + goto err_free_dflt_sgl_pool;
2015 + }
2016 +
2017 +@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2018 + if (!pool) {
2019 + SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
2020 +
2021 ++ ret = -ENOMEM;
2022 + goto err_free_max_sgl_pool;
2023 + }
2024 +
2025 +diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
2026 +index 27960e46135d..c4226c07e091 100644
2027 +--- a/drivers/spi/spi-dw.c
2028 ++++ b/drivers/spi/spi-dw.c
2029 +@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
2030 +
2031 + static int dw_spi_debugfs_init(struct dw_spi *dws)
2032 + {
2033 +- dws->debugfs = debugfs_create_dir("dw_spi", NULL);
2034 ++ char name[128];
2035 ++
2036 ++ snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
2037 ++ dws->debugfs = debugfs_create_dir(name, NULL);
2038 + if (!dws->debugfs)
2039 + return -ENOMEM;
2040 +
2041 +diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
2042 +index a34fd5afb9a8..ec9979070479 100644
2043 +--- a/drivers/staging/comedi/comedi_fops.c
2044 ++++ b/drivers/staging/comedi/comedi_fops.c
2045 +@@ -2898,9 +2898,6 @@ static int __init comedi_init(void)
2046 +
2047 + comedi_class->dev_groups = comedi_dev_groups;
2048 +
2049 +- /* XXX requires /proc interface */
2050 +- comedi_proc_init();
2051 +-
2052 + /* create devices files for legacy/manual use */
2053 + for (i = 0; i < comedi_num_legacy_minors; i++) {
2054 + struct comedi_device *dev;
2055 +@@ -2918,6 +2915,9 @@ static int __init comedi_init(void)
2056 + mutex_unlock(&dev->mutex);
2057 + }
2058 +
2059 ++ /* XXX requires /proc interface */
2060 ++ comedi_proc_init();
2061 ++
2062 + return 0;
2063 + }
2064 + module_init(comedi_init);
2065 +diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
2066 +index 5dd1832564c7..35b63518baf6 100644
2067 +--- a/drivers/usb/dwc3/dwc3-omap.c
2068 ++++ b/drivers/usb/dwc3/dwc3-omap.c
2069 +@@ -19,6 +19,7 @@
2070 + #include <linux/module.h>
2071 + #include <linux/kernel.h>
2072 + #include <linux/slab.h>
2073 ++#include <linux/irq.h>
2074 + #include <linux/interrupt.h>
2075 + #include <linux/platform_device.h>
2076 + #include <linux/platform_data/dwc3-omap.h>
2077 +@@ -511,7 +512,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
2078 +
2079 + /* check the DMA Status */
2080 + reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG);
2081 +-
2082 ++ irq_set_status_flags(omap->irq, IRQ_NOAUTOEN);
2083 + ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt,
2084 + dwc3_omap_interrupt_thread, IRQF_SHARED,
2085 + "dwc3-omap", omap);
2086 +@@ -532,7 +533,7 @@ static int dwc3_omap_probe(struct platform_device *pdev)
2087 + }
2088 +
2089 + dwc3_omap_enable_irqs(omap);
2090 +-
2091 ++ enable_irq(omap->irq);
2092 + return 0;
2093 +
2094 + err2:
2095 +@@ -553,6 +554,7 @@ static int dwc3_omap_remove(struct platform_device *pdev)
2096 + extcon_unregister_notifier(omap->edev, EXTCON_USB, &omap->vbus_nb);
2097 + extcon_unregister_notifier(omap->edev, EXTCON_USB_HOST, &omap->id_nb);
2098 + dwc3_omap_disable_irqs(omap);
2099 ++ disable_irq(omap->irq);
2100 + of_platform_depopulate(omap->dev);
2101 + pm_runtime_put_sync(&pdev->dev);
2102 + pm_runtime_disable(&pdev->dev);
2103 +diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
2104 +index b0f71957d00b..b6d4b484c51a 100644
2105 +--- a/drivers/usb/gadget/function/f_hid.c
2106 ++++ b/drivers/usb/gadget/function/f_hid.c
2107 +@@ -582,7 +582,7 @@ static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2108 + }
2109 + status = usb_ep_enable(hidg->out_ep);
2110 + if (status < 0) {
2111 +- ERROR(cdev, "Enable IN endpoint FAILED!\n");
2112 ++ ERROR(cdev, "Enable OUT endpoint FAILED!\n");
2113 + goto fail;
2114 + }
2115 + hidg->out_ep->driver_data = hidg;
2116 +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
2117 +index 031bc08d000d..43559bed7822 100644
2118 +--- a/drivers/vfio/pci/vfio_pci.c
2119 ++++ b/drivers/vfio/pci/vfio_pci.c
2120 +@@ -1173,6 +1173,10 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
2121 + return ret;
2122 +
2123 + vdev->barmap[index] = pci_iomap(pdev, index, 0);
2124 ++ if (!vdev->barmap[index]) {
2125 ++ pci_release_selected_regions(pdev, 1 << index);
2126 ++ return -ENOMEM;
2127 ++ }
2128 + }
2129 +
2130 + vma->vm_private_data = vdev;
2131 +diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
2132 +index 5ffd1d9ad4bd..357243d76f10 100644
2133 +--- a/drivers/vfio/pci/vfio_pci_rdwr.c
2134 ++++ b/drivers/vfio/pci/vfio_pci_rdwr.c
2135 +@@ -193,7 +193,10 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
2136 + if (!vdev->has_vga)
2137 + return -EINVAL;
2138 +
2139 +- switch (pos) {
2140 ++ if (pos > 0xbfffful)
2141 ++ return -EINVAL;
2142 ++
2143 ++ switch ((u32)pos) {
2144 + case 0xa0000 ... 0xbffff:
2145 + count = min(count, (size_t)(0xc0000 - pos));
2146 + iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
2147 +diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
2148 +index 2d3b691f3fc4..038ac6934fe9 100644
2149 +--- a/drivers/video/fbdev/cobalt_lcdfb.c
2150 ++++ b/drivers/video/fbdev/cobalt_lcdfb.c
2151 +@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
2152 + info->screen_size = resource_size(res);
2153 + info->screen_base = devm_ioremap(&dev->dev, res->start,
2154 + info->screen_size);
2155 ++ if (!info->screen_base) {
2156 ++ framebuffer_release(info);
2157 ++ return -ENOMEM;
2158 ++ }
2159 ++
2160 + info->fbops = &cobalt_lcd_fbops;
2161 + info->fix = cobalt_lcdfb_fix;
2162 + info->fix.smem_start = res->start;
2163 +diff --git a/drivers/xen/arm-device.c b/drivers/xen/arm-device.c
2164 +index 778acf80aacb..85dd20e05726 100644
2165 +--- a/drivers/xen/arm-device.c
2166 ++++ b/drivers/xen/arm-device.c
2167 +@@ -58,9 +58,13 @@ static int xen_map_device_mmio(const struct resource *resources,
2168 + xen_pfn_t *gpfns;
2169 + xen_ulong_t *idxs;
2170 + int *errs;
2171 +- struct xen_add_to_physmap_range xatp;
2172 +
2173 + for (i = 0; i < count; i++) {
2174 ++ struct xen_add_to_physmap_range xatp = {
2175 ++ .domid = DOMID_SELF,
2176 ++ .space = XENMAPSPACE_dev_mmio
2177 ++ };
2178 ++
2179 + r = &resources[i];
2180 + nr = DIV_ROUND_UP(resource_size(r), XEN_PAGE_SIZE);
2181 + if ((resource_type(r) != IORESOURCE_MEM) || (nr == 0))
2182 +@@ -87,9 +91,7 @@ static int xen_map_device_mmio(const struct resource *resources,
2183 + idxs[j] = XEN_PFN_DOWN(r->start) + j;
2184 + }
2185 +
2186 +- xatp.domid = DOMID_SELF;
2187 + xatp.size = nr;
2188 +- xatp.space = XENMAPSPACE_dev_mmio;
2189 +
2190 + set_xen_guest_handle(xatp.gpfns, gpfns);
2191 + set_xen_guest_handle(xatp.idxs, idxs);
2192 +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2193 +index e46e7fbe1b34..14a37ff0b9e3 100644
2194 +--- a/fs/btrfs/extent-tree.c
2195 ++++ b/fs/btrfs/extent-tree.c
2196 +@@ -7401,7 +7401,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
2197 +
2198 + spin_unlock(&cluster->refill_lock);
2199 +
2200 +- down_read(&used_bg->data_rwsem);
2201 ++ /* We should only have one-level nested. */
2202 ++ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
2203 +
2204 + spin_lock(&cluster->refill_lock);
2205 + if (used_bg == cluster->block_group)
2206 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2207 +index a2a014b19f18..8a05fa7e2152 100644
2208 +--- a/fs/btrfs/inode.c
2209 ++++ b/fs/btrfs/inode.c
2210 +@@ -7648,11 +7648,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
2211 + * within our reservation, otherwise we need to adjust our inode
2212 + * counter appropriately.
2213 + */
2214 +- if (dio_data->outstanding_extents) {
2215 ++ if (dio_data->outstanding_extents >= num_extents) {
2216 + dio_data->outstanding_extents -= num_extents;
2217 + } else {
2218 ++ /*
2219 ++ * If dio write length has been split due to no large enough
2220 ++ * contiguous space, we need to compensate our inode counter
2221 ++ * appropriately.
2222 ++ */
2223 ++ u64 num_needed = num_extents - dio_data->outstanding_extents;
2224 ++
2225 + spin_lock(&BTRFS_I(inode)->lock);
2226 +- BTRFS_I(inode)->outstanding_extents += num_extents;
2227 ++ BTRFS_I(inode)->outstanding_extents += num_needed;
2228 + spin_unlock(&BTRFS_I(inode)->lock);
2229 + }
2230 + }
2231 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
2232 +index b89004513c09..309313b71617 100644
2233 +--- a/fs/btrfs/tree-log.c
2234 ++++ b/fs/btrfs/tree-log.c
2235 +@@ -37,6 +37,7 @@
2236 + */
2237 + #define LOG_INODE_ALL 0
2238 + #define LOG_INODE_EXISTS 1
2239 ++#define LOG_OTHER_INODE 2
2240 +
2241 + /*
2242 + * directory trouble cases
2243 +@@ -4623,7 +4624,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2244 + if (S_ISDIR(inode->i_mode) ||
2245 + (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
2246 + &BTRFS_I(inode)->runtime_flags) &&
2247 +- inode_only == LOG_INODE_EXISTS))
2248 ++ inode_only >= LOG_INODE_EXISTS))
2249 + max_key.type = BTRFS_XATTR_ITEM_KEY;
2250 + else
2251 + max_key.type = (u8)-1;
2252 +@@ -4647,7 +4648,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2253 + return ret;
2254 + }
2255 +
2256 +- mutex_lock(&BTRFS_I(inode)->log_mutex);
2257 ++ if (inode_only == LOG_OTHER_INODE) {
2258 ++ inode_only = LOG_INODE_EXISTS;
2259 ++ mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
2260 ++ SINGLE_DEPTH_NESTING);
2261 ++ } else {
2262 ++ mutex_lock(&BTRFS_I(inode)->log_mutex);
2263 ++ }
2264 +
2265 + /*
2266 + * a brute force approach to making sure we get the most uptodate
2267 +@@ -4799,7 +4806,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
2268 + * unpin it.
2269 + */
2270 + err = btrfs_log_inode(trans, root, other_inode,
2271 +- LOG_INODE_EXISTS,
2272 ++ LOG_OTHER_INODE,
2273 + 0, LLONG_MAX, ctx);
2274 + iput(other_inode);
2275 + if (err)
2276 +diff --git a/fs/dcache.c b/fs/dcache.c
2277 +index 1dbc6b560fef..67957f5b325c 100644
2278 +--- a/fs/dcache.c
2279 ++++ b/fs/dcache.c
2280 +@@ -277,6 +277,33 @@ static inline int dname_external(const struct dentry *dentry)
2281 + return dentry->d_name.name != dentry->d_iname;
2282 + }
2283 +
2284 ++void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
2285 ++{
2286 ++ spin_lock(&dentry->d_lock);
2287 ++ if (unlikely(dname_external(dentry))) {
2288 ++ struct external_name *p = external_name(dentry);
2289 ++ atomic_inc(&p->u.count);
2290 ++ spin_unlock(&dentry->d_lock);
2291 ++ name->name = p->name;
2292 ++ } else {
2293 ++ memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
2294 ++ spin_unlock(&dentry->d_lock);
2295 ++ name->name = name->inline_name;
2296 ++ }
2297 ++}
2298 ++EXPORT_SYMBOL(take_dentry_name_snapshot);
2299 ++
2300 ++void release_dentry_name_snapshot(struct name_snapshot *name)
2301 ++{
2302 ++ if (unlikely(name->name != name->inline_name)) {
2303 ++ struct external_name *p;
2304 ++ p = container_of(name->name, struct external_name, name[0]);
2305 ++ if (unlikely(atomic_dec_and_test(&p->u.count)))
2306 ++ kfree_rcu(p, u.head);
2307 ++ }
2308 ++}
2309 ++EXPORT_SYMBOL(release_dentry_name_snapshot);
2310 ++
2311 + static inline void __d_set_inode_and_type(struct dentry *dentry,
2312 + struct inode *inode,
2313 + unsigned type_flags)
2314 +diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
2315 +index 1e30f74a9527..3d7de9f4f545 100644
2316 +--- a/fs/debugfs/inode.c
2317 ++++ b/fs/debugfs/inode.c
2318 +@@ -730,7 +730,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
2319 + {
2320 + int error;
2321 + struct dentry *dentry = NULL, *trap;
2322 +- const char *old_name;
2323 ++ struct name_snapshot old_name;
2324 +
2325 + trap = lock_rename(new_dir, old_dir);
2326 + /* Source or destination directories don't exist? */
2327 +@@ -745,19 +745,19 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
2328 + if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
2329 + goto exit;
2330 +
2331 +- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
2332 ++ take_dentry_name_snapshot(&old_name, old_dentry);
2333 +
2334 + error = simple_rename(d_inode(old_dir), old_dentry, d_inode(new_dir),
2335 + dentry, 0);
2336 + if (error) {
2337 +- fsnotify_oldname_free(old_name);
2338 ++ release_dentry_name_snapshot(&old_name);
2339 + goto exit;
2340 + }
2341 + d_move(old_dentry, dentry);
2342 +- fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name,
2343 ++ fsnotify_move(d_inode(old_dir), d_inode(new_dir), old_name.name,
2344 + d_is_dir(old_dentry),
2345 + NULL, old_dentry);
2346 +- fsnotify_oldname_free(old_name);
2347 ++ release_dentry_name_snapshot(&old_name);
2348 + unlock_rename(new_dir, old_dir);
2349 + dput(dentry);
2350 + return old_dentry;
2351 +diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
2352 +index 7bc186f4ed4d..1be45c8d460d 100644
2353 +--- a/fs/jfs/acl.c
2354 ++++ b/fs/jfs/acl.c
2355 +@@ -77,13 +77,6 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
2356 + switch (type) {
2357 + case ACL_TYPE_ACCESS:
2358 + ea_name = XATTR_NAME_POSIX_ACL_ACCESS;
2359 +- if (acl) {
2360 +- rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
2361 +- if (rc)
2362 +- return rc;
2363 +- inode->i_ctime = current_time(inode);
2364 +- mark_inode_dirty(inode);
2365 +- }
2366 + break;
2367 + case ACL_TYPE_DEFAULT:
2368 + ea_name = XATTR_NAME_POSIX_ACL_DEFAULT;
2369 +@@ -118,9 +111,17 @@ int jfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
2370 +
2371 + tid = txBegin(inode->i_sb, 0);
2372 + mutex_lock(&JFS_IP(inode)->commit_mutex);
2373 ++ if (type == ACL_TYPE_ACCESS && acl) {
2374 ++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl);
2375 ++ if (rc)
2376 ++ goto end_tx;
2377 ++ inode->i_ctime = current_time(inode);
2378 ++ mark_inode_dirty(inode);
2379 ++ }
2380 + rc = __jfs_set_acl(tid, inode, type, acl);
2381 + if (!rc)
2382 + rc = txCommit(tid, 1, &inode, 0);
2383 ++end_tx:
2384 + txEnd(tid);
2385 + mutex_unlock(&JFS_IP(inode)->commit_mutex);
2386 + return rc;
2387 +diff --git a/fs/namei.c b/fs/namei.c
2388 +index d5e5140c1045..66209f720146 100644
2389 +--- a/fs/namei.c
2390 ++++ b/fs/namei.c
2391 +@@ -4336,11 +4336,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2392 + {
2393 + int error;
2394 + bool is_dir = d_is_dir(old_dentry);
2395 +- const unsigned char *old_name;
2396 + struct inode *source = old_dentry->d_inode;
2397 + struct inode *target = new_dentry->d_inode;
2398 + bool new_is_dir = false;
2399 + unsigned max_links = new_dir->i_sb->s_max_links;
2400 ++ struct name_snapshot old_name;
2401 +
2402 + /*
2403 + * Check source == target.
2404 +@@ -4391,7 +4391,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2405 + if (error)
2406 + return error;
2407 +
2408 +- old_name = fsnotify_oldname_init(old_dentry->d_name.name);
2409 ++ take_dentry_name_snapshot(&old_name, old_dentry);
2410 + dget(new_dentry);
2411 + if (!is_dir || (flags & RENAME_EXCHANGE))
2412 + lock_two_nondirectories(source, target);
2413 +@@ -4446,14 +4446,14 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2414 + inode_unlock(target);
2415 + dput(new_dentry);
2416 + if (!error) {
2417 +- fsnotify_move(old_dir, new_dir, old_name, is_dir,
2418 ++ fsnotify_move(old_dir, new_dir, old_name.name, is_dir,
2419 + !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry);
2420 + if (flags & RENAME_EXCHANGE) {
2421 + fsnotify_move(new_dir, old_dir, old_dentry->d_name.name,
2422 + new_is_dir, NULL, new_dentry);
2423 + }
2424 + }
2425 +- fsnotify_oldname_free(old_name);
2426 ++ release_dentry_name_snapshot(&old_name);
2427 +
2428 + return error;
2429 + }
2430 +diff --git a/fs/nfs/file.c b/fs/nfs/file.c
2431 +index a1de8ef63e56..84c1cb9237d0 100644
2432 +--- a/fs/nfs/file.c
2433 ++++ b/fs/nfs/file.c
2434 +@@ -757,7 +757,7 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
2435 + */
2436 + nfs_sync_mapping(filp->f_mapping);
2437 + if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
2438 +- nfs_zap_mapping(inode, filp->f_mapping);
2439 ++ nfs_zap_caches(inode);
2440 + out:
2441 + return status;
2442 + }
2443 +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2444 +index 401ea6e4cab8..46ca7881d80d 100644
2445 +--- a/fs/nfs/nfs4proc.c
2446 ++++ b/fs/nfs/nfs4proc.c
2447 +@@ -6419,7 +6419,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
2448 + set_current_state(TASK_INTERRUPTIBLE);
2449 + spin_unlock_irqrestore(&q->lock, flags);
2450 +
2451 +- freezable_schedule_timeout_interruptible(NFS4_LOCK_MAXTIMEOUT);
2452 ++ freezable_schedule_timeout(NFS4_LOCK_MAXTIMEOUT);
2453 + }
2454 +
2455 + finish_wait(q, &wait);
2456 +diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
2457 +index db39de2dd4cb..a64adc2fced9 100644
2458 +--- a/fs/notify/fsnotify.c
2459 ++++ b/fs/notify/fsnotify.c
2460 +@@ -104,16 +104,20 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
2461 + if (unlikely(!fsnotify_inode_watches_children(p_inode)))
2462 + __fsnotify_update_child_dentry_flags(p_inode);
2463 + else if (p_inode->i_fsnotify_mask & mask) {
2464 ++ struct name_snapshot name;
2465 ++
2466 + /* we are notifying a parent so come up with the new mask which
2467 + * specifies these are events which came from a child. */
2468 + mask |= FS_EVENT_ON_CHILD;
2469 +
2470 ++ take_dentry_name_snapshot(&name, dentry);
2471 + if (path)
2472 + ret = fsnotify(p_inode, mask, path, FSNOTIFY_EVENT_PATH,
2473 +- dentry->d_name.name, 0);
2474 ++ name.name, 0);
2475 + else
2476 + ret = fsnotify(p_inode, mask, dentry->d_inode, FSNOTIFY_EVENT_INODE,
2477 +- dentry->d_name.name, 0);
2478 ++ name.name, 0);
2479 ++ release_dentry_name_snapshot(&name);
2480 + }
2481 +
2482 + dput(parent);
2483 +diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
2484 +index 6ad831b9d1b8..8b09271e5d66 100644
2485 +--- a/fs/pstore/ram.c
2486 ++++ b/fs/pstore/ram.c
2487 +@@ -434,7 +434,7 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
2488 + for (i = 0; i < cxt->max_dump_cnt; i++) {
2489 + cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0,
2490 + &cxt->ecc_info,
2491 +- cxt->memtype);
2492 ++ cxt->memtype, 0);
2493 + if (IS_ERR(cxt->przs[i])) {
2494 + err = PTR_ERR(cxt->przs[i]);
2495 + dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
2496 +@@ -471,7 +471,8 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
2497 + return -ENOMEM;
2498 + }
2499 +
2500 +- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
2501 ++ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
2502 ++ cxt->memtype, 0);
2503 + if (IS_ERR(*prz)) {
2504 + int err = PTR_ERR(*prz);
2505 +
2506 +diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
2507 +index 3975deec02f8..e11672aa4575 100644
2508 +--- a/fs/pstore/ram_core.c
2509 ++++ b/fs/pstore/ram_core.c
2510 +@@ -48,16 +48,15 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
2511 + return atomic_read(&prz->buffer->start);
2512 + }
2513 +
2514 +-static DEFINE_RAW_SPINLOCK(buffer_lock);
2515 +-
2516 + /* increase and wrap the start pointer, returning the old value */
2517 + static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
2518 + {
2519 + int old;
2520 + int new;
2521 +- unsigned long flags;
2522 ++ unsigned long flags = 0;
2523 +
2524 +- raw_spin_lock_irqsave(&buffer_lock, flags);
2525 ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
2526 ++ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
2527 +
2528 + old = atomic_read(&prz->buffer->start);
2529 + new = old + a;
2530 +@@ -65,7 +64,8 @@ static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
2531 + new -= prz->buffer_size;
2532 + atomic_set(&prz->buffer->start, new);
2533 +
2534 +- raw_spin_unlock_irqrestore(&buffer_lock, flags);
2535 ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
2536 ++ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
2537 +
2538 + return old;
2539 + }
2540 +@@ -75,9 +75,10 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
2541 + {
2542 + size_t old;
2543 + size_t new;
2544 +- unsigned long flags;
2545 ++ unsigned long flags = 0;
2546 +
2547 +- raw_spin_lock_irqsave(&buffer_lock, flags);
2548 ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
2549 ++ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
2550 +
2551 + old = atomic_read(&prz->buffer->size);
2552 + if (old == prz->buffer_size)
2553 +@@ -89,7 +90,8 @@ static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
2554 + atomic_set(&prz->buffer->size, new);
2555 +
2556 + exit:
2557 +- raw_spin_unlock_irqrestore(&buffer_lock, flags);
2558 ++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
2559 ++ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
2560 + }
2561 +
2562 + static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
2563 +@@ -491,6 +493,7 @@ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
2564 + prz->buffer->sig);
2565 + }
2566 +
2567 ++ /* Rewind missing or invalid memory area. */
2568 + prz->buffer->sig = sig;
2569 + persistent_ram_zap(prz);
2570 +
2571 +@@ -517,7 +520,7 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
2572 +
2573 + struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
2574 + u32 sig, struct persistent_ram_ecc_info *ecc_info,
2575 +- unsigned int memtype)
2576 ++ unsigned int memtype, u32 flags)
2577 + {
2578 + struct persistent_ram_zone *prz;
2579 + int ret = -ENOMEM;
2580 +@@ -528,6 +531,10 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
2581 + goto err;
2582 + }
2583 +
2584 ++ /* Initialize general buffer state. */
2585 ++ raw_spin_lock_init(&prz->buffer_lock);
2586 ++ prz->flags = flags;
2587 ++
2588 + ret = persistent_ram_buffer_map(start, size, prz, memtype);
2589 + if (ret)
2590 + goto err;
2591 +diff --git a/include/linux/dcache.h b/include/linux/dcache.h
2592 +index 5beed7b30561..ff295e166b2c 100644
2593 +--- a/include/linux/dcache.h
2594 ++++ b/include/linux/dcache.h
2595 +@@ -590,5 +590,11 @@ static inline struct inode *d_real_inode(const struct dentry *dentry)
2596 + return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0));
2597 + }
2598 +
2599 ++struct name_snapshot {
2600 ++ const char *name;
2601 ++ char inline_name[DNAME_INLINE_LEN];
2602 ++};
2603 ++void take_dentry_name_snapshot(struct name_snapshot *, struct dentry *);
2604 ++void release_dentry_name_snapshot(struct name_snapshot *);
2605 +
2606 + #endif /* __LINUX_DCACHE_H */
2607 +diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
2608 +index b8bcc058e031..e5f03a4d8430 100644
2609 +--- a/include/linux/fsnotify.h
2610 ++++ b/include/linux/fsnotify.h
2611 +@@ -293,35 +293,4 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
2612 + }
2613 + }
2614 +
2615 +-#if defined(CONFIG_FSNOTIFY) /* notify helpers */
2616 +-
2617 +-/*
2618 +- * fsnotify_oldname_init - save off the old filename before we change it
2619 +- */
2620 +-static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
2621 +-{
2622 +- return kstrdup(name, GFP_KERNEL);
2623 +-}
2624 +-
2625 +-/*
2626 +- * fsnotify_oldname_free - free the name we got from fsnotify_oldname_init
2627 +- */
2628 +-static inline void fsnotify_oldname_free(const unsigned char *old_name)
2629 +-{
2630 +- kfree(old_name);
2631 +-}
2632 +-
2633 +-#else /* CONFIG_FSNOTIFY */
2634 +-
2635 +-static inline const char *fsnotify_oldname_init(const unsigned char *name)
2636 +-{
2637 +- return NULL;
2638 +-}
2639 +-
2640 +-static inline void fsnotify_oldname_free(const unsigned char *old_name)
2641 +-{
2642 +-}
2643 +-
2644 +-#endif /* CONFIG_FSNOTIFY */
2645 +-
2646 + #endif /* _LINUX_FS_NOTIFY_H */
2647 +diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
2648 +index c9f379689dd0..80faf44b8887 100644
2649 +--- a/include/linux/mlx4/device.h
2650 ++++ b/include/linux/mlx4/device.h
2651 +@@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
2652 + int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
2653 + int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
2654 + bool *vlan_offload_disabled);
2655 ++void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
2656 ++ struct _rule_hw *eth_header);
2657 + int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
2658 + int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
2659 + int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
2660 +diff --git a/include/linux/phy.h b/include/linux/phy.h
2661 +index 6c9b1e0006ee..8431c8c0c320 100644
2662 +--- a/include/linux/phy.h
2663 ++++ b/include/linux/phy.h
2664 +@@ -799,6 +799,10 @@ int genphy_read_status(struct phy_device *phydev);
2665 + int genphy_suspend(struct phy_device *phydev);
2666 + int genphy_resume(struct phy_device *phydev);
2667 + int genphy_soft_reset(struct phy_device *phydev);
2668 ++static inline int genphy_no_soft_reset(struct phy_device *phydev)
2669 ++{
2670 ++ return 0;
2671 ++}
2672 + void phy_driver_unregister(struct phy_driver *drv);
2673 + void phy_drivers_unregister(struct phy_driver *drv, int n);
2674 + int phy_driver_register(struct phy_driver *new_driver, struct module *owner);
2675 +diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
2676 +index c668c861c96c..4058bf991868 100644
2677 +--- a/include/linux/pstore_ram.h
2678 ++++ b/include/linux/pstore_ram.h
2679 +@@ -24,6 +24,13 @@
2680 + #include <linux/list.h>
2681 + #include <linux/types.h>
2682 +
2683 ++/*
2684 ++ * Choose whether access to the RAM zone requires locking or not. If a zone
2685 ++ * can be written to from different CPUs like with ftrace for example, then
2686 ++ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
2687 ++ */
2688 ++#define PRZ_FLAG_NO_LOCK BIT(0)
2689 ++
2690 + struct persistent_ram_buffer;
2691 + struct rs_control;
2692 +
2693 +@@ -40,6 +47,8 @@ struct persistent_ram_zone {
2694 + void *vaddr;
2695 + struct persistent_ram_buffer *buffer;
2696 + size_t buffer_size;
2697 ++ u32 flags;
2698 ++ raw_spinlock_t buffer_lock;
2699 +
2700 + /* ECC correction */
2701 + char *par_buffer;
2702 +@@ -55,7 +64,7 @@ struct persistent_ram_zone {
2703 +
2704 + struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
2705 + u32 sig, struct persistent_ram_ecc_info *ecc_info,
2706 +- unsigned int memtype);
2707 ++ unsigned int memtype, u32 flags);
2708 + void persistent_ram_free(struct persistent_ram_zone *prz);
2709 + void persistent_ram_zap(struct persistent_ram_zone *prz);
2710 +
2711 +diff --git a/kernel/cpu.c b/kernel/cpu.c
2712 +index 8f52977aad59..26a4f74bff83 100644
2713 +--- a/kernel/cpu.c
2714 ++++ b/kernel/cpu.c
2715 +@@ -410,11 +410,26 @@ static int notify_online(unsigned int cpu)
2716 + return 0;
2717 + }
2718 +
2719 ++static void __cpuhp_kick_ap_work(struct cpuhp_cpu_state *st);
2720 ++
2721 + static int bringup_wait_for_ap(unsigned int cpu)
2722 + {
2723 + struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2724 +
2725 ++ /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
2726 + wait_for_completion(&st->done);
2727 ++ if (WARN_ON_ONCE((!cpu_online(cpu))))
2728 ++ return -ECANCELED;
2729 ++
2730 ++ /* Unpark the stopper thread and the hotplug thread of the target cpu */
2731 ++ stop_machine_unpark(cpu);
2732 ++ kthread_unpark(st->thread);
2733 ++
2734 ++ /* Should we go further up ? */
2735 ++ if (st->target > CPUHP_AP_ONLINE_IDLE) {
2736 ++ __cpuhp_kick_ap_work(st);
2737 ++ wait_for_completion(&st->done);
2738 ++ }
2739 + return st->result;
2740 + }
2741 +
2742 +@@ -437,9 +452,7 @@ static int bringup_cpu(unsigned int cpu)
2743 + cpu_notify(CPU_UP_CANCELED, cpu);
2744 + return ret;
2745 + }
2746 +- ret = bringup_wait_for_ap(cpu);
2747 +- BUG_ON(!cpu_online(cpu));
2748 +- return ret;
2749 ++ return bringup_wait_for_ap(cpu);
2750 + }
2751 +
2752 + /*
2753 +@@ -974,31 +987,20 @@ void notify_cpu_starting(unsigned int cpu)
2754 + }
2755 +
2756 + /*
2757 +- * Called from the idle task. We need to set active here, so we can kick off
2758 +- * the stopper thread and unpark the smpboot threads. If the target state is
2759 +- * beyond CPUHP_AP_ONLINE_IDLE we kick cpuhp thread and let it bring up the
2760 +- * cpu further.
2761 ++ * Called from the idle task. Wake up the controlling task which brings the
2762 ++ * stopper and the hotplug thread of the upcoming CPU up and then delegates
2763 ++ * the rest of the online bringup to the hotplug thread.
2764 + */
2765 + void cpuhp_online_idle(enum cpuhp_state state)
2766 + {
2767 + struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
2768 +- unsigned int cpu = smp_processor_id();
2769 +
2770 + /* Happens for the boot cpu */
2771 + if (state != CPUHP_AP_ONLINE_IDLE)
2772 + return;
2773 +
2774 + st->state = CPUHP_AP_ONLINE_IDLE;
2775 +-
2776 +- /* Unpark the stopper thread and the hotplug thread of this cpu */
2777 +- stop_machine_unpark(cpu);
2778 +- kthread_unpark(st->thread);
2779 +-
2780 +- /* Should we go further up ? */
2781 +- if (st->target > CPUHP_AP_ONLINE_IDLE)
2782 +- __cpuhp_kick_ap_work(st);
2783 +- else
2784 +- complete(&st->done);
2785 ++ complete(&st->done);
2786 + }
2787 +
2788 + /* Requires cpu_add_remove_lock to be held */
2789 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2790 +index d177b21d04ce..2098954c690f 100644
2791 +--- a/kernel/sched/core.c
2792 ++++ b/kernel/sched/core.c
2793 +@@ -8376,11 +8376,20 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
2794 + if (IS_ERR(tg))
2795 + return ERR_PTR(-ENOMEM);
2796 +
2797 +- sched_online_group(tg, parent);
2798 +-
2799 + return &tg->css;
2800 + }
2801 +
2802 ++/* Expose task group only after completing cgroup initialization */
2803 ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
2804 ++{
2805 ++ struct task_group *tg = css_tg(css);
2806 ++ struct task_group *parent = css_tg(css->parent);
2807 ++
2808 ++ if (parent)
2809 ++ sched_online_group(tg, parent);
2810 ++ return 0;
2811 ++}
2812 ++
2813 + static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
2814 + {
2815 + struct task_group *tg = css_tg(css);
2816 +@@ -8783,6 +8792,7 @@ static struct cftype cpu_files[] = {
2817 +
2818 + struct cgroup_subsys cpu_cgrp_subsys = {
2819 + .css_alloc = cpu_cgroup_css_alloc,
2820 ++ .css_online = cpu_cgroup_css_online,
2821 + .css_released = cpu_cgroup_css_released,
2822 + .css_free = cpu_cgroup_css_free,
2823 + .fork = cpu_cgroup_fork,
2824 +diff --git a/net/core/dev.c b/net/core/dev.c
2825 +index c17952b6e0b6..0af019dfe846 100644
2826 +--- a/net/core/dev.c
2827 ++++ b/net/core/dev.c
2828 +@@ -2702,9 +2702,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
2829 + static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2830 + {
2831 + if (tx_path)
2832 +- return skb->ip_summed != CHECKSUM_PARTIAL;
2833 +- else
2834 +- return skb->ip_summed == CHECKSUM_NONE;
2835 ++ return skb->ip_summed != CHECKSUM_PARTIAL &&
2836 ++ skb->ip_summed != CHECKSUM_NONE;
2837 ++
2838 ++ return skb->ip_summed == CHECKSUM_NONE;
2839 + }
2840 +
2841 + /**
2842 +@@ -2723,11 +2724,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2843 + struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2844 + netdev_features_t features, bool tx_path)
2845 + {
2846 ++ struct sk_buff *segs;
2847 ++
2848 + if (unlikely(skb_needs_check(skb, tx_path))) {
2849 + int err;
2850 +
2851 +- skb_warn_bad_offload(skb);
2852 +-
2853 ++ /* We're going to init ->check field in TCP or UDP header */
2854 + err = skb_cow_head(skb, 0);
2855 + if (err < 0)
2856 + return ERR_PTR(err);
2857 +@@ -2755,7 +2757,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2858 + skb_reset_mac_header(skb);
2859 + skb_reset_mac_len(skb);
2860 +
2861 +- return skb_mac_gso_segment(skb, features);
2862 ++ segs = skb_mac_gso_segment(skb, features);
2863 ++
2864 ++ if (unlikely(skb_needs_check(skb, tx_path)))
2865 ++ skb_warn_bad_offload(skb);
2866 ++
2867 ++ return segs;
2868 + }
2869 + EXPORT_SYMBOL(__skb_gso_segment);
2870 +
2871 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2872 +index fd649599620e..5a4b8e7bcedd 100644
2873 +--- a/net/ipv6/ip6_output.c
2874 ++++ b/net/ipv6/ip6_output.c
2875 +@@ -1376,7 +1376,7 @@ static int __ip6_append_data(struct sock *sk,
2876 + */
2877 +
2878 + cork->length += length;
2879 +- if (((length > mtu) ||
2880 ++ if ((((length + fragheaderlen) > mtu) ||
2881 + (skb && skb_is_gso(skb))) &&
2882 + (sk->sk_protocol == IPPROTO_UDP) &&
2883 + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
2884 +diff --git a/net/key/af_key.c b/net/key/af_key.c
2885 +index d8d95b6415e4..2e1050ec2cf0 100644
2886 +--- a/net/key/af_key.c
2887 ++++ b/net/key/af_key.c
2888 +@@ -63,6 +63,7 @@ struct pfkey_sock {
2889 + } u;
2890 + struct sk_buff *skb;
2891 + } dump;
2892 ++ struct mutex dump_lock;
2893 + };
2894 +
2895 + static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
2896 +@@ -143,6 +144,7 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
2897 + {
2898 + struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
2899 + struct sock *sk;
2900 ++ struct pfkey_sock *pfk;
2901 + int err;
2902 +
2903 + if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2904 +@@ -157,6 +159,9 @@ static int pfkey_create(struct net *net, struct socket *sock, int protocol,
2905 + if (sk == NULL)
2906 + goto out;
2907 +
2908 ++ pfk = pfkey_sk(sk);
2909 ++ mutex_init(&pfk->dump_lock);
2910 ++
2911 + sock->ops = &pfkey_ops;
2912 + sock_init_data(sock, sk);
2913 +
2914 +@@ -285,13 +290,23 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
2915 + struct sadb_msg *hdr;
2916 + int rc;
2917 +
2918 ++ mutex_lock(&pfk->dump_lock);
2919 ++ if (!pfk->dump.dump) {
2920 ++ rc = 0;
2921 ++ goto out;
2922 ++ }
2923 ++
2924 + rc = pfk->dump.dump(pfk);
2925 +- if (rc == -ENOBUFS)
2926 +- return 0;
2927 ++ if (rc == -ENOBUFS) {
2928 ++ rc = 0;
2929 ++ goto out;
2930 ++ }
2931 +
2932 + if (pfk->dump.skb) {
2933 +- if (!pfkey_can_dump(&pfk->sk))
2934 +- return 0;
2935 ++ if (!pfkey_can_dump(&pfk->sk)) {
2936 ++ rc = 0;
2937 ++ goto out;
2938 ++ }
2939 +
2940 + hdr = (struct sadb_msg *) pfk->dump.skb->data;
2941 + hdr->sadb_msg_seq = 0;
2942 +@@ -302,6 +317,9 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
2943 + }
2944 +
2945 + pfkey_terminate_dump(pfk);
2946 ++
2947 ++out:
2948 ++ mutex_unlock(&pfk->dump_lock);
2949 + return rc;
2950 + }
2951 +
2952 +@@ -1806,19 +1824,26 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
2953 + struct xfrm_address_filter *filter = NULL;
2954 + struct pfkey_sock *pfk = pfkey_sk(sk);
2955 +
2956 +- if (pfk->dump.dump != NULL)
2957 ++ mutex_lock(&pfk->dump_lock);
2958 ++ if (pfk->dump.dump != NULL) {
2959 ++ mutex_unlock(&pfk->dump_lock);
2960 + return -EBUSY;
2961 ++ }
2962 +
2963 + proto = pfkey_satype2proto(hdr->sadb_msg_satype);
2964 +- if (proto == 0)
2965 ++ if (proto == 0) {
2966 ++ mutex_unlock(&pfk->dump_lock);
2967 + return -EINVAL;
2968 ++ }
2969 +
2970 + if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
2971 + struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
2972 +
2973 + filter = kmalloc(sizeof(*filter), GFP_KERNEL);
2974 +- if (filter == NULL)
2975 ++ if (filter == NULL) {
2976 ++ mutex_unlock(&pfk->dump_lock);
2977 + return -ENOMEM;
2978 ++ }
2979 +
2980 + memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
2981 + sizeof(xfrm_address_t));
2982 +@@ -1834,6 +1859,7 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
2983 + pfk->dump.dump = pfkey_dump_sa;
2984 + pfk->dump.done = pfkey_dump_sa_done;
2985 + xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
2986 ++ mutex_unlock(&pfk->dump_lock);
2987 +
2988 + return pfkey_do_dump(pfk);
2989 + }
2990 +@@ -2693,14 +2719,18 @@ static int pfkey_spddump(struct sock *sk, struct sk_buff *skb, const struct sadb
2991 + {
2992 + struct pfkey_sock *pfk = pfkey_sk(sk);
2993 +
2994 +- if (pfk->dump.dump != NULL)
2995 ++ mutex_lock(&pfk->dump_lock);
2996 ++ if (pfk->dump.dump != NULL) {
2997 ++ mutex_unlock(&pfk->dump_lock);
2998 + return -EBUSY;
2999 ++ }
3000 +
3001 + pfk->dump.msg_version = hdr->sadb_msg_version;
3002 + pfk->dump.msg_portid = hdr->sadb_msg_pid;
3003 + pfk->dump.dump = pfkey_dump_sp;
3004 + pfk->dump.done = pfkey_dump_sp_done;
3005 + xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
3006 ++ mutex_unlock(&pfk->dump_lock);
3007 +
3008 + return pfkey_do_dump(pfk);
3009 + }
3010 +diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
3011 +index b10abef6b0a0..1d522ce833e6 100644
3012 +--- a/net/l2tp/l2tp_ip6.c
3013 ++++ b/net/l2tp/l2tp_ip6.c
3014 +@@ -64,7 +64,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
3015 + struct sock *sk;
3016 +
3017 + sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
3018 +- const struct in6_addr *addr = inet6_rcv_saddr(sk);
3019 ++ const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
3020 + struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
3021 +
3022 + if (l2tp == NULL)
3023 +@@ -72,7 +72,7 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
3024 +
3025 + if ((l2tp->conn_id == tunnel_id) &&
3026 + net_eq(sock_net(sk), net) &&
3027 +- (!addr || ipv6_addr_equal(addr, laddr)) &&
3028 ++ (!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
3029 + (!sk->sk_bound_dev_if || !dif ||
3030 + sk->sk_bound_dev_if == dif))
3031 + goto found;
3032 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3033 +index 8da67f7c9c5a..e26b515f7794 100644
3034 +--- a/net/xfrm/xfrm_policy.c
3035 ++++ b/net/xfrm/xfrm_policy.c
3036 +@@ -1248,7 +1248,7 @@ static inline int policy_to_flow_dir(int dir)
3037 + }
3038 +
3039 + static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
3040 +- const struct flowi *fl)
3041 ++ const struct flowi *fl, u16 family)
3042 + {
3043 + struct xfrm_policy *pol;
3044 +
3045 +@@ -1256,8 +1256,7 @@ static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
3046 + again:
3047 + pol = rcu_dereference(sk->sk_policy[dir]);
3048 + if (pol != NULL) {
3049 +- bool match = xfrm_selector_match(&pol->selector, fl,
3050 +- sk->sk_family);
3051 ++ bool match = xfrm_selector_match(&pol->selector, fl, family);
3052 + int err = 0;
3053 +
3054 + if (match) {
3055 +@@ -2206,7 +2205,7 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3056 + sk = sk_const_to_full_sk(sk);
3057 + if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3058 + num_pols = 1;
3059 +- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
3060 ++ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
3061 + err = xfrm_expand_policies(fl, family, pols,
3062 + &num_pols, &num_xfrms);
3063 + if (err < 0)
3064 +@@ -2485,7 +2484,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3065 + pol = NULL;
3066 + sk = sk_to_full_sk(sk);
3067 + if (sk && sk->sk_policy[dir]) {
3068 +- pol = xfrm_sk_policy_lookup(sk, dir, &fl);
3069 ++ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
3070 + if (IS_ERR(pol)) {
3071 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3072 + return 0;
3073 +diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
3074 +index c47287d79306..a178e0d03088 100644
3075 +--- a/sound/pci/fm801.c
3076 ++++ b/sound/pci/fm801.c
3077 +@@ -1235,8 +1235,6 @@ static int snd_fm801_create(struct snd_card *card,
3078 + }
3079 + }
3080 +
3081 +- snd_fm801_chip_init(chip);
3082 +-
3083 + if ((chip->tea575x_tuner & TUNER_ONLY) == 0) {
3084 + if (devm_request_irq(&pci->dev, pci->irq, snd_fm801_interrupt,
3085 + IRQF_SHARED, KBUILD_MODNAME, chip)) {
3086 +@@ -1248,6 +1246,8 @@ static int snd_fm801_create(struct snd_card *card,
3087 + pci_set_master(pci);
3088 + }
3089 +
3090 ++ snd_fm801_chip_init(chip);
3091 ++
3092 + if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
3093 + snd_fm801_free(chip);
3094 + return err;
3095 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3096 +index 4bf48336b0fc..775c67818bf1 100644
3097 +--- a/sound/pci/hda/patch_hdmi.c
3098 ++++ b/sound/pci/hda/patch_hdmi.c
3099 +@@ -3600,11 +3600,15 @@ HDA_CODEC_ENTRY(0x1002aa01, "R6xx HDMI", patch_atihdmi),
3100 + HDA_CODEC_ENTRY(0x10951390, "SiI1390 HDMI", patch_generic_hdmi),
3101 + HDA_CODEC_ENTRY(0x10951392, "SiI1392 HDMI", patch_generic_hdmi),
3102 + HDA_CODEC_ENTRY(0x17e80047, "Chrontel HDMI", patch_generic_hdmi),
3103 ++HDA_CODEC_ENTRY(0x10de0001, "MCP73 HDMI", patch_nvhdmi_2ch),
3104 + HDA_CODEC_ENTRY(0x10de0002, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3105 + HDA_CODEC_ENTRY(0x10de0003, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3106 ++HDA_CODEC_ENTRY(0x10de0004, "GPU 04 HDMI", patch_nvhdmi_8ch_7x),
3107 + HDA_CODEC_ENTRY(0x10de0005, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3108 + HDA_CODEC_ENTRY(0x10de0006, "MCP77/78 HDMI", patch_nvhdmi_8ch_7x),
3109 + HDA_CODEC_ENTRY(0x10de0007, "MCP79/7A HDMI", patch_nvhdmi_8ch_7x),
3110 ++HDA_CODEC_ENTRY(0x10de0008, "GPU 08 HDMI/DP", patch_nvhdmi),
3111 ++HDA_CODEC_ENTRY(0x10de0009, "GPU 09 HDMI/DP", patch_nvhdmi),
3112 + HDA_CODEC_ENTRY(0x10de000a, "GPU 0a HDMI/DP", patch_nvhdmi),
3113 + HDA_CODEC_ENTRY(0x10de000b, "GPU 0b HDMI/DP", patch_nvhdmi),
3114 + HDA_CODEC_ENTRY(0x10de000c, "MCP89 HDMI", patch_nvhdmi),
3115 +@@ -3631,17 +3635,40 @@ HDA_CODEC_ENTRY(0x10de0041, "GPU 41 HDMI/DP", patch_nvhdmi),
3116 + HDA_CODEC_ENTRY(0x10de0042, "GPU 42 HDMI/DP", patch_nvhdmi),
3117 + HDA_CODEC_ENTRY(0x10de0043, "GPU 43 HDMI/DP", patch_nvhdmi),
3118 + HDA_CODEC_ENTRY(0x10de0044, "GPU 44 HDMI/DP", patch_nvhdmi),
3119 ++HDA_CODEC_ENTRY(0x10de0045, "GPU 45 HDMI/DP", patch_nvhdmi),
3120 ++HDA_CODEC_ENTRY(0x10de0050, "GPU 50 HDMI/DP", patch_nvhdmi),
3121 + HDA_CODEC_ENTRY(0x10de0051, "GPU 51 HDMI/DP", patch_nvhdmi),
3122 ++HDA_CODEC_ENTRY(0x10de0052, "GPU 52 HDMI/DP", patch_nvhdmi),
3123 + HDA_CODEC_ENTRY(0x10de0060, "GPU 60 HDMI/DP", patch_nvhdmi),
3124 ++HDA_CODEC_ENTRY(0x10de0061, "GPU 61 HDMI/DP", patch_nvhdmi),
3125 ++HDA_CODEC_ENTRY(0x10de0062, "GPU 62 HDMI/DP", patch_nvhdmi),
3126 + HDA_CODEC_ENTRY(0x10de0067, "MCP67 HDMI", patch_nvhdmi_2ch),
3127 + HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
3128 + HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
3129 + HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
3130 ++HDA_CODEC_ENTRY(0x10de0073, "GPU 73 HDMI/DP", patch_nvhdmi),
3131 ++HDA_CODEC_ENTRY(0x10de0074, "GPU 74 HDMI/DP", patch_nvhdmi),
3132 ++HDA_CODEC_ENTRY(0x10de0076, "GPU 76 HDMI/DP", patch_nvhdmi),
3133 ++HDA_CODEC_ENTRY(0x10de007b, "GPU 7b HDMI/DP", patch_nvhdmi),
3134 ++HDA_CODEC_ENTRY(0x10de007c, "GPU 7c HDMI/DP", patch_nvhdmi),
3135 + HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
3136 ++HDA_CODEC_ENTRY(0x10de007e, "GPU 7e HDMI/DP", patch_nvhdmi),
3137 + HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
3138 ++HDA_CODEC_ENTRY(0x10de0081, "GPU 81 HDMI/DP", patch_nvhdmi),
3139 + HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
3140 + HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
3141 ++HDA_CODEC_ENTRY(0x10de0084, "GPU 84 HDMI/DP", patch_nvhdmi),
3142 ++HDA_CODEC_ENTRY(0x10de0090, "GPU 90 HDMI/DP", patch_nvhdmi),
3143 ++HDA_CODEC_ENTRY(0x10de0091, "GPU 91 HDMI/DP", patch_nvhdmi),
3144 ++HDA_CODEC_ENTRY(0x10de0092, "GPU 92 HDMI/DP", patch_nvhdmi),
3145 ++HDA_CODEC_ENTRY(0x10de0093, "GPU 93 HDMI/DP", patch_nvhdmi),
3146 ++HDA_CODEC_ENTRY(0x10de0094, "GPU 94 HDMI/DP", patch_nvhdmi),
3147 ++HDA_CODEC_ENTRY(0x10de0095, "GPU 95 HDMI/DP", patch_nvhdmi),
3148 ++HDA_CODEC_ENTRY(0x10de0097, "GPU 97 HDMI/DP", patch_nvhdmi),
3149 ++HDA_CODEC_ENTRY(0x10de0098, "GPU 98 HDMI/DP", patch_nvhdmi),
3150 ++HDA_CODEC_ENTRY(0x10de0099, "GPU 99 HDMI/DP", patch_nvhdmi),
3151 + HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
3152 ++HDA_CODEC_ENTRY(0x10de8067, "MCP67/68 HDMI", patch_nvhdmi_2ch),
3153 + HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi),
3154 + HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi),
3155 + HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi),
3156 +diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
3157 +index e643be91d762..f9f2737c4ad2 100644
3158 +--- a/sound/soc/codecs/nau8825.c
3159 ++++ b/sound/soc/codecs/nau8825.c
3160 +@@ -1928,7 +1928,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
3161 + NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
3162 + /* FLL pre-scaler */
3163 + regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
3164 +- NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
3165 ++ NAU8825_FLL_REF_DIV_MASK,
3166 ++ fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
3167 + /* select divided VCO input */
3168 + regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
3169 + NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
3170 +diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
3171 +index 1c63e2abafa9..574d6f936135 100644
3172 +--- a/sound/soc/codecs/nau8825.h
3173 ++++ b/sound/soc/codecs/nau8825.h
3174 +@@ -129,7 +129,8 @@
3175 + #define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT)
3176 +
3177 + /* FLL4 (0x07) */
3178 +-#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
3179 ++#define NAU8825_FLL_REF_DIV_SFT 10
3180 ++#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT)
3181 +
3182 + /* FLL5 (0x08) */
3183 + #define NAU8825_FLL_PDB_DAC_EN (0x1 << 15)
3184 +diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
3185 +index 5a8d96ec058c..fe45a16a5142 100644
3186 +--- a/sound/soc/codecs/tlv320aic3x.c
3187 ++++ b/sound/soc/codecs/tlv320aic3x.c
3188 +@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
3189 + { 108, 0x00 }, { 109, 0x00 },
3190 + };
3191 +
3192 ++static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
3193 ++{
3194 ++ switch (reg) {
3195 ++ case AIC3X_RESET:
3196 ++ return true;
3197 ++ default:
3198 ++ return false;
3199 ++ }
3200 ++}
3201 ++
3202 + static const struct regmap_config aic3x_regmap = {
3203 + .reg_bits = 8,
3204 + .val_bits = 8,
3205 +@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
3206 + .max_register = DAC_ICC_ADJ,
3207 + .reg_defaults = aic3x_reg,
3208 + .num_reg_defaults = ARRAY_SIZE(aic3x_reg),
3209 ++
3210 ++ .volatile_reg = aic3x_volatile_reg,
3211 ++
3212 + .cache_type = REGCACHE_RBTREE,
3213 + };
3214 +
3215 +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
3216 +index 50349437d961..fde08660b63b 100644
3217 +--- a/sound/soc/fsl/fsl_ssi.c
3218 ++++ b/sound/soc/fsl/fsl_ssi.c
3219 +@@ -224,6 +224,12 @@ struct fsl_ssi_soc_data {
3220 + * @dbg_stats: Debugging statistics
3221 + *
3222 + * @soc: SoC specific data
3223 ++ *
3224 ++ * @fifo_watermark: the FIFO watermark setting. Notifies DMA when
3225 ++ * there are @fifo_watermark or fewer words in TX fifo or
3226 ++ * @fifo_watermark or more empty words in RX fifo.
3227 ++ * @dma_maxburst: max number of words to transfer in one go. So far,
3228 ++ * this is always the same as fifo_watermark.
3229 + */
3230 + struct fsl_ssi_private {
3231 + struct regmap *regs;
3232 +@@ -263,6 +269,9 @@ struct fsl_ssi_private {
3233 +
3234 + const struct fsl_ssi_soc_data *soc;
3235 + struct device *dev;
3236 ++
3237 ++ u32 fifo_watermark;
3238 ++ u32 dma_maxburst;
3239 + };
3240 +
3241 + /*
3242 +@@ -1051,21 +1060,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
3243 + regmap_write(regs, CCSR_SSI_SRCR, srcr);
3244 + regmap_write(regs, CCSR_SSI_SCR, scr);
3245 +
3246 +- /*
3247 +- * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
3248 +- * use FIFO 1. We program the transmit water to signal a DMA transfer
3249 +- * if there are only two (or fewer) elements left in the FIFO. Two
3250 +- * elements equals one frame (left channel, right channel). This value,
3251 +- * however, depends on the depth of the transmit buffer.
3252 +- *
3253 +- * We set the watermark on the same level as the DMA burstsize. For
3254 +- * fiq it is probably better to use the biggest possible watermark
3255 +- * size.
3256 +- */
3257 +- if (ssi_private->use_dma)
3258 +- wm = ssi_private->fifo_depth - 2;
3259 +- else
3260 +- wm = ssi_private->fifo_depth;
3261 ++ wm = ssi_private->fifo_watermark;
3262 +
3263 + regmap_write(regs, CCSR_SSI_SFCSR,
3264 + CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
3265 +@@ -1373,12 +1368,8 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
3266 + dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
3267 + PTR_ERR(ssi_private->baudclk));
3268 +
3269 +- /*
3270 +- * We have burstsize be "fifo_depth - 2" to match the SSI
3271 +- * watermark setting in fsl_ssi_startup().
3272 +- */
3273 +- ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
3274 +- ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
3275 ++ ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
3276 ++ ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
3277 + ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
3278 + ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
3279 +
3280 +@@ -1543,6 +1534,47 @@ static int fsl_ssi_probe(struct platform_device *pdev)
3281 + /* Older 8610 DTs didn't have the fifo-depth property */
3282 + ssi_private->fifo_depth = 8;
3283 +
3284 ++ /*
3285 ++ * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
3286 ++ * use FIFO 1 but set the watermark appropriately nontheless.
3287 ++ * We program the transmit water to signal a DMA transfer
3288 ++ * if there are N elements left in the FIFO. For chips with 15-deep
3289 ++ * FIFOs, set watermark to 8. This allows the SSI to operate at a
3290 ++ * high data rate without channel slipping. Behavior is unchanged
3291 ++ * for the older chips with a fifo depth of only 8. A value of 4
3292 ++ * might be appropriate for the older chips, but is left at
3293 ++ * fifo_depth-2 until sombody has a chance to test.
3294 ++ *
3295 ++ * We set the watermark on the same level as the DMA burstsize. For
3296 ++ * fiq it is probably better to use the biggest possible watermark
3297 ++ * size.
3298 ++ */
3299 ++ switch (ssi_private->fifo_depth) {
3300 ++ case 15:
3301 ++ /*
3302 ++ * 2 samples is not enough when running at high data
3303 ++ * rates (like 48kHz @ 16 bits/channel, 16 channels)
3304 ++ * 8 seems to split things evenly and leave enough time
3305 ++ * for the DMA to fill the FIFO before it's over/under
3306 ++ * run.
3307 ++ */
3308 ++ ssi_private->fifo_watermark = 8;
3309 ++ ssi_private->dma_maxburst = 8;
3310 ++ break;
3311 ++ case 8:
3312 ++ default:
3313 ++ /*
3314 ++ * maintain old behavior for older chips.
3315 ++ * Keeping it the same because I don't have an older
3316 ++ * board to test with.
3317 ++ * I suspect this could be changed to be something to
3318 ++ * leave some more space in the fifo.
3319 ++ */
3320 ++ ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
3321 ++ ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
3322 ++ break;
3323 ++ }
3324 ++
3325 + dev_set_drvdata(&pdev->dev, ssi_private);
3326 +
3327 + if (ssi_private->soc->imx) {
3328 +diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
3329 +index d5873eeae1aa..bd19fad2d91b 100644
3330 +--- a/sound/soc/intel/boards/bytcr_rt5640.c
3331 ++++ b/sound/soc/intel/boards/bytcr_rt5640.c
3332 +@@ -142,7 +142,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
3333 + * for Jack detection and button press
3334 + */
3335 + ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
3336 +- 0,
3337 ++ 48000 * 512,
3338 + SND_SOC_CLOCK_IN);
3339 + if (!ret) {
3340 + if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
3341 +diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
3342 +index 8fc3178bc79c..b30bd384c8d3 100644
3343 +--- a/sound/soc/intel/skylake/skl-sst.c
3344 ++++ b/sound/soc/intel/skylake/skl-sst.c
3345 +@@ -515,6 +515,9 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw);
3346 +
3347 + void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
3348 + {
3349 ++
3350 ++ if (ctx->dsp->fw)
3351 ++ release_firmware(ctx->dsp->fw);
3352 + skl_clear_module_table(ctx->dsp);
3353 + skl_freeup_uuid_list(ctx);
3354 + skl_ipc_free(&ctx->ipc);
3355 +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
3356 +index d56a16a0f6fa..21c3ef01c438 100644
3357 +--- a/sound/soc/soc-pcm.c
3358 ++++ b/sound/soc/soc-pcm.c
3359 +@@ -2184,9 +2184,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
3360 + break;
3361 + case SNDRV_PCM_TRIGGER_STOP:
3362 + case SNDRV_PCM_TRIGGER_SUSPEND:
3363 +- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3364 + fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
3365 + break;
3366 ++ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
3367 ++ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
3368 ++ break;
3369 + }
3370 +
3371 + out:
3372 +diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
3373 +index c5251aaad844..b8044c6034b3 100644
3374 +--- a/sound/usb/endpoint.c
3375 ++++ b/sound/usb/endpoint.c
3376 +@@ -384,6 +384,9 @@ static void snd_complete_urb(struct urb *urb)
3377 + if (unlikely(atomic_read(&ep->chip->shutdown)))
3378 + goto exit_clear;
3379 +
3380 ++ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
3381 ++ goto exit_clear;
3382 ++
3383 + if (usb_pipeout(ep->pipe)) {
3384 + retire_outbound_urb(ep, ctx);
3385 + /* can be stopped during retire callback */
3386 +diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
3387 +index f1ce60065258..ec30c2fcbac0 100644
3388 +--- a/tools/lib/traceevent/plugin_sched_switch.c
3389 ++++ b/tools/lib/traceevent/plugin_sched_switch.c
3390 +@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
3391 + trace_seq_printf(s, "%lld ", val);
3392 +
3393 + if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
3394 +- trace_seq_printf(s, "[%lld] ", val);
3395 ++ trace_seq_printf(s, "[%d] ", (int) val);
3396 +
3397 + if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
3398 + write_state(s, val);
3399 +@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
3400 + trace_seq_printf(s, "%lld", val);
3401 +
3402 + if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
3403 +- trace_seq_printf(s, " [%lld]", val);
3404 ++ trace_seq_printf(s, " [%d]", (int) val);
3405 +
3406 + return 0;
3407 + }
3408 +diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
3409 +index 982d6439bb07..ef52d1e3d431 100644
3410 +--- a/tools/perf/Makefile.perf
3411 ++++ b/tools/perf/Makefile.perf
3412 +@@ -729,9 +729,9 @@ install-tests: all install-gtk
3413 + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
3414 + $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
3415 +
3416 +-install-bin: install-tools install-tests
3417 ++install-bin: install-tools install-tests install-traceevent-plugins
3418 +
3419 +-install: install-bin try-install-man install-traceevent-plugins
3420 ++install: install-bin try-install-man
3421 +
3422 + install-python_ext:
3423 + $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
3424 +diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
3425 +index 7ea13f44178d..6c50d9f8e210 100644
3426 +--- a/tools/perf/util/probe-event.c
3427 ++++ b/tools/perf/util/probe-event.c
3428 +@@ -268,21 +268,6 @@ static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
3429 + }
3430 +
3431 + /*
3432 +- * NOTE:
3433 +- * '.gnu.linkonce.this_module' section of kernel module elf directly
3434 +- * maps to 'struct module' from linux/module.h. This section contains
3435 +- * actual module name which will be used by kernel after loading it.
3436 +- * But, we cannot use 'struct module' here since linux/module.h is not
3437 +- * exposed to user-space. Offset of 'name' has remained same from long
3438 +- * time, so hardcoding it here.
3439 +- */
3440 +-#ifdef __LP64__
3441 +-#define MOD_NAME_OFFSET 24
3442 +-#else
3443 +-#define MOD_NAME_OFFSET 12
3444 +-#endif
3445 +-
3446 +-/*
3447 + * @module can be module name of module file path. In case of path,
3448 + * inspect elf and find out what is actual module name.
3449 + * Caller has to free mod_name after using it.
3450 +@@ -296,6 +281,7 @@ static char *find_module_name(const char *module)
3451 + Elf_Data *data;
3452 + Elf_Scn *sec;
3453 + char *mod_name = NULL;
3454 ++ int name_offset;
3455 +
3456 + fd = open(module, O_RDONLY);
3457 + if (fd < 0)
3458 +@@ -317,7 +303,21 @@ static char *find_module_name(const char *module)
3459 + if (!data || !data->d_buf)
3460 + goto ret_err;
3461 +
3462 +- mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET);
3463 ++ /*
3464 ++ * NOTE:
3465 ++ * '.gnu.linkonce.this_module' section of kernel module elf directly
3466 ++ * maps to 'struct module' from linux/module.h. This section contains
3467 ++ * actual module name which will be used by kernel after loading it.
3468 ++ * But, we cannot use 'struct module' here since linux/module.h is not
3469 ++ * exposed to user-space. Offset of 'name' has remained same from long
3470 ++ * time, so hardcoding it here.
3471 ++ */
3472 ++ if (ehdr.e_ident[EI_CLASS] == ELFCLASS32)
3473 ++ name_offset = 12;
3474 ++ else /* expect ELFCLASS64 by default */
3475 ++ name_offset = 24;
3476 ++
3477 ++ mod_name = strdup((char *)data->d_buf + name_offset);
3478 +
3479 + ret_err:
3480 + elf_end(elf);
3481 +diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
3482 +index 99400b0e8f2a..adbc6c02c3aa 100644
3483 +--- a/tools/perf/util/symbol-elf.c
3484 ++++ b/tools/perf/util/symbol-elf.c
3485 +@@ -537,6 +537,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
3486 + break;
3487 + } else {
3488 + int n = namesz + descsz;
3489 ++
3490 ++ if (n > (int)sizeof(bf)) {
3491 ++ n = sizeof(bf);
3492 ++ pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
3493 ++ __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
3494 ++ }
3495 + if (read(fd, bf, n) != n)
3496 + break;
3497 + }