Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 15 Feb 2017 16:02:37
Message-Id: 1487174535.941599b763657a0ff60ee868b581e6a63035a7f6.alicef@gentoo
1 commit: 941599b763657a0ff60ee868b581e6a63035a7f6
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Feb 15 16:02:15 2017 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Feb 15 16:02:15 2017 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=941599b7
7
8 Linux patch 4.9.10
9
10 0000_README | 4 +
11 1009_linux-4.9.10.patch | 2157 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2161 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 450839f..9b2ecc5 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -79,6 +79,10 @@ Patch: 1008_linux-4.9.9.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.9
21
22 +Patch: 1009_linux-4.9.10.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.10
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1009_linux-4.9.10.patch b/1009_linux-4.9.10.patch
31 new file mode 100644
32 index 0000000..72028fa
33 --- /dev/null
34 +++ b/1009_linux-4.9.10.patch
35 @@ -0,0 +1,2157 @@
36 +diff --git a/Makefile b/Makefile
37 +index c0c41c9fac0c..d2fe757a979d 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 9
44 ++SUBLEVEL = 10
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
49 +index 91ebe382147f..5f69c3bd59bb 100644
50 +--- a/arch/arc/kernel/unaligned.c
51 ++++ b/arch/arc/kernel/unaligned.c
52 +@@ -243,7 +243,7 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
53 +
54 + /* clear any remanants of delay slot */
55 + if (delay_mode(regs)) {
56 +- regs->ret = regs->bta ~1U;
57 ++ regs->ret = regs->bta & ~1U;
58 + regs->status32 &= ~STATUS_DE_MASK;
59 + } else {
60 + regs->ret += state.instr_len;
61 +diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
62 +index 1ade1951e620..7aa120fbdc71 100644
63 +--- a/arch/arm/boot/dts/imx6dl.dtsi
64 ++++ b/arch/arm/boot/dts/imx6dl.dtsi
65 +@@ -137,7 +137,7 @@
66 + &gpio4 {
67 + gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
68 + <&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
69 +- <&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
70 ++ <&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
71 + <&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
72 + <&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
73 + };
74 +diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
75 +index ce131ed5939d..ae738a6319f6 100644
76 +--- a/arch/arm/kernel/ptrace.c
77 ++++ b/arch/arm/kernel/ptrace.c
78 +@@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
79 + const void *kbuf, const void __user *ubuf)
80 + {
81 + int ret;
82 +- struct pt_regs newregs;
83 ++ struct pt_regs newregs = *task_pt_regs(target);
84 +
85 + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
86 + &newregs,
87 +diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
88 +index 3a2e678b8d30..0122ad1a6027 100644
89 +--- a/arch/arm/mm/fault.c
90 ++++ b/arch/arm/mm/fault.c
91 +@@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
92 +
93 + void __init early_abt_enable(void)
94 + {
95 +- fsr_info[22].fn = early_abort_handler;
96 ++ fsr_info[FSR_FS_AEA].fn = early_abort_handler;
97 + local_abt_enable();
98 +- fsr_info[22].fn = do_bad;
99 ++ fsr_info[FSR_FS_AEA].fn = do_bad;
100 + }
101 +
102 + #ifndef CONFIG_ARM_LPAE
103 +diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
104 +index 67532f242271..afc1f84e763b 100644
105 +--- a/arch/arm/mm/fault.h
106 ++++ b/arch/arm/mm/fault.h
107 +@@ -11,11 +11,15 @@
108 + #define FSR_FS5_0 (0x3f)
109 +
110 + #ifdef CONFIG_ARM_LPAE
111 ++#define FSR_FS_AEA 17
112 ++
113 + static inline int fsr_fs(unsigned int fsr)
114 + {
115 + return fsr & FSR_FS5_0;
116 + }
117 + #else
118 ++#define FSR_FS_AEA 22
119 ++
120 + static inline int fsr_fs(unsigned int fsr)
121 + {
122 + return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
123 +diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
124 +index 9e1499f98def..13f5fad21066 100644
125 +--- a/arch/powerpc/include/asm/reg.h
126 ++++ b/arch/powerpc/include/asm/reg.h
127 +@@ -641,9 +641,10 @@
128 + #define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
129 + #define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
130 + #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
131 +-#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
132 ++#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
133 + #define SRR1_WAKESYSERR 0x00300000 /* System error */
134 + #define SRR1_WAKEEE 0x00200000 /* External interrupt */
135 ++#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
136 + #define SRR1_WAKEMT 0x00280000 /* mtctrl */
137 + #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
138 + #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
139 +diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
140 +index f0b238516e9b..e0b9e576905a 100644
141 +--- a/arch/powerpc/include/asm/xics.h
142 ++++ b/arch/powerpc/include/asm/xics.h
143 +@@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
144 +
145 + #ifdef CONFIG_PPC_POWERNV
146 + extern int icp_opal_init(void);
147 ++extern void icp_opal_flush_interrupt(void);
148 + #else
149 + static inline int icp_opal_init(void) { return -ENODEV; }
150 + #endif
151 +diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
152 +index 3493cf4e0452..71697ff70879 100644
153 +--- a/arch/powerpc/mm/tlb-radix.c
154 ++++ b/arch/powerpc/mm/tlb-radix.c
155 +@@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
156 + for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
157 + __tlbiel_pid(pid, set, ric);
158 + }
159 +- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
160 +- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
161 +- return;
162 ++ asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
163 + }
164 +
165 + static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
166 +@@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
167 + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
168 + : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
169 + asm volatile("ptesync": : :"memory");
170 +- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
171 +- asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
172 + }
173 +
174 + static inline void _tlbie_va(unsigned long va, unsigned long pid,
175 +diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
176 +index c789258ae1e1..eec0e8d0454d 100644
177 +--- a/arch/powerpc/platforms/powernv/smp.c
178 ++++ b/arch/powerpc/platforms/powernv/smp.c
179 +@@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
180 + wmask = SRR1_WAKEMASK_P8;
181 +
182 + idle_states = pnv_get_supported_cpuidle_states();
183 ++
184 + /* We don't want to take decrementer interrupts while we are offline,
185 +- * so clear LPCR:PECE1. We keep PECE2 enabled.
186 ++ * so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
187 ++ * enabled as to let IPIs in.
188 + */
189 + mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
190 +
191 +@@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
192 + * contains 0.
193 + */
194 + if (((srr1 & wmask) == SRR1_WAKEEE) ||
195 ++ ((srr1 & wmask) == SRR1_WAKEHVI) ||
196 + (local_paca->irq_happened & PACA_IRQ_EE)) {
197 +- icp_native_flush_interrupt();
198 ++ if (cpu_has_feature(CPU_FTR_ARCH_300))
199 ++ icp_opal_flush_interrupt();
200 ++ else
201 ++ icp_native_flush_interrupt();
202 + } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
203 + unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
204 + asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
205 +@@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
206 + if (srr1 && !generic_check_cpu_restart(cpu))
207 + DBG("CPU%d Unexpected exit while offline !\n", cpu);
208 + }
209 ++
210 ++ /* Re-enable decrementer interrupts */
211 + mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
212 + DBG("CPU%d coming online...\n", cpu);
213 + }
214 +diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
215 +index 60c57657c772..c96c0cb95d87 100644
216 +--- a/arch/powerpc/sysdev/xics/icp-opal.c
217 ++++ b/arch/powerpc/sysdev/xics/icp-opal.c
218 +@@ -132,6 +132,35 @@ static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
219 + return smp_ipi_demux();
220 + }
221 +
222 ++/*
223 ++ * Called when an interrupt is received on an off-line CPU to
224 ++ * clear the interrupt, so that the CPU can go back to nap mode.
225 ++ */
226 ++void icp_opal_flush_interrupt(void)
227 ++{
228 ++ unsigned int xirr;
229 ++ unsigned int vec;
230 ++
231 ++ do {
232 ++ xirr = icp_opal_get_xirr();
233 ++ vec = xirr & 0x00ffffff;
234 ++ if (vec == XICS_IRQ_SPURIOUS)
235 ++ break;
236 ++ if (vec == XICS_IPI) {
237 ++ /* Clear pending IPI */
238 ++ int cpu = smp_processor_id();
239 ++ kvmppc_set_host_ipi(cpu, 0);
240 ++ opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
241 ++ } else {
242 ++ pr_err("XICS: hw interrupt 0x%x to offline cpu, "
243 ++ "disabling\n", vec);
244 ++ xics_mask_unknown_vec(vec);
245 ++ }
246 ++
247 ++ /* EOI the interrupt */
248 ++ } while (opal_int_eoi(xirr) > 0);
249 ++}
250 ++
251 + #endif /* CONFIG_SMP */
252 +
253 + static const struct icp_ops icp_opal_ops = {
254 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
255 +index 984a7bf17f6a..83db0eae9979 100644
256 +--- a/arch/x86/include/asm/processor.h
257 ++++ b/arch/x86/include/asm/processor.h
258 +@@ -104,6 +104,7 @@ struct cpuinfo_x86 {
259 + __u8 x86_phys_bits;
260 + /* CPUID returned core id bits: */
261 + __u8 x86_coreid_bits;
262 ++ __u8 cu_id;
263 + /* Max extended CPUID function supported: */
264 + __u32 extended_cpuid_level;
265 + /* Maximum supported CPUID level, -1=no CPUID: */
266 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
267 +index 7249f1500bcb..d1e25564b3c1 100644
268 +--- a/arch/x86/kernel/apic/io_apic.c
269 ++++ b/arch/x86/kernel/apic/io_apic.c
270 +@@ -1876,7 +1876,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
271 + .irq_ack = irq_chip_ack_parent,
272 + .irq_eoi = ioapic_ack_level,
273 + .irq_set_affinity = ioapic_set_affinity,
274 +- .irq_retrigger = irq_chip_retrigger_hierarchy,
275 + .flags = IRQCHIP_SKIP_SET_WAKE,
276 + };
277 +
278 +@@ -1888,7 +1887,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
279 + .irq_ack = irq_chip_ack_parent,
280 + .irq_eoi = ioapic_ir_ack_level,
281 + .irq_set_affinity = ioapic_set_affinity,
282 +- .irq_retrigger = irq_chip_retrigger_hierarchy,
283 + .flags = IRQCHIP_SKIP_SET_WAKE,
284 + };
285 +
286 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
287 +index 1d3167269a67..2b4cf04239b6 100644
288 +--- a/arch/x86/kernel/cpu/amd.c
289 ++++ b/arch/x86/kernel/cpu/amd.c
290 +@@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
291 +
292 + /* get information required for multi-node processors */
293 + if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
294 ++ u32 eax, ebx, ecx, edx;
295 +
296 +- node_id = cpuid_ecx(0x8000001e) & 7;
297 ++ cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
298 ++
299 ++ node_id = ecx & 0xff;
300 ++ smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
301 ++
302 ++ if (c->x86 == 0x15)
303 ++ c->cu_id = ebx & 0xff;
304 ++
305 ++ if (c->x86 >= 0x17) {
306 ++ c->cpu_core_id = ebx & 0xff;
307 ++
308 ++ if (smp_num_siblings > 1)
309 ++ c->x86_max_cores /= smp_num_siblings;
310 ++ }
311 +
312 + /*
313 + * We may have multiple LLCs if L3 caches exist, so check if we
314 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
315 +index 023c7bfa24df..4eece91ada37 100644
316 +--- a/arch/x86/kernel/cpu/common.c
317 ++++ b/arch/x86/kernel/cpu/common.c
318 +@@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
319 + c->x86_model_id[0] = '\0'; /* Unset */
320 + c->x86_max_cores = 1;
321 + c->x86_coreid_bits = 0;
322 ++ c->cu_id = 0xff;
323 + #ifdef CONFIG_X86_64
324 + c->x86_clflush_size = 64;
325 + c->x86_phys_bits = 36;
326 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
327 +index e9bbe02950ad..36171bcd91f8 100644
328 +--- a/arch/x86/kernel/smpboot.c
329 ++++ b/arch/x86/kernel/smpboot.c
330 +@@ -423,9 +423,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
331 + int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
332 +
333 + if (c->phys_proc_id == o->phys_proc_id &&
334 +- per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
335 +- c->cpu_core_id == o->cpu_core_id)
336 +- return topology_sane(c, o, "smt");
337 ++ per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
338 ++ if (c->cpu_core_id == o->cpu_core_id)
339 ++ return topology_sane(c, o, "smt");
340 ++
341 ++ if ((c->cu_id != 0xff) &&
342 ++ (o->cu_id != 0xff) &&
343 ++ (c->cu_id == o->cu_id))
344 ++ return topology_sane(c, o, "smt");
345 ++ }
346 +
347 + } else if (c->phys_proc_id == o->phys_proc_id &&
348 + c->cpu_core_id == o->cpu_core_id) {
349 +diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
350 +index ea9c49adaa1f..8aa6bea1cd6c 100644
351 +--- a/arch/x86/mm/dump_pagetables.c
352 ++++ b/arch/x86/mm/dump_pagetables.c
353 +@@ -15,6 +15,7 @@
354 + #include <linux/debugfs.h>
355 + #include <linux/mm.h>
356 + #include <linux/init.h>
357 ++#include <linux/sched.h>
358 + #include <linux/seq_file.h>
359 +
360 + #include <asm/pgtable.h>
361 +@@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
362 + } else
363 + note_page(m, &st, __pgprot(0), 1);
364 +
365 ++ cond_resched();
366 + start++;
367 + }
368 +
369 +diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
370 +index e9c0993b131d..e8817e2f0597 100644
371 +--- a/crypto/algif_aead.c
372 ++++ b/crypto/algif_aead.c
373 +@@ -671,9 +671,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
374 + unlock:
375 + list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
376 + af_alg_free_sg(&rsgl->sgl);
377 ++ list_del(&rsgl->list);
378 + if (rsgl != &ctx->first_rsgl)
379 + sock_kfree_s(sk, rsgl, sizeof(*rsgl));
380 +- list_del(&rsgl->list);
381 + }
382 + INIT_LIST_HEAD(&ctx->list);
383 + aead_wmem_wakeup(sk);
384 +diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
385 +index 312c4b4dc363..6eb6733a7a5c 100644
386 +--- a/drivers/acpi/nfit/core.c
387 ++++ b/drivers/acpi/nfit/core.c
388 +@@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
389 + struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
390 + struct device *dev = acpi_desc->dev;
391 + struct acpi_nfit_flush_work flush;
392 ++ int rc;
393 +
394 + /* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
395 + device_lock(dev);
396 +@@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
397 + INIT_WORK_ONSTACK(&flush.work, flush_probe);
398 + COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
399 + queue_work(nfit_wq, &flush.work);
400 +- return wait_for_completion_interruptible(&flush.cmp);
401 ++
402 ++ rc = wait_for_completion_interruptible(&flush.cmp);
403 ++ cancel_work_sync(&flush.work);
404 ++ return rc;
405 + }
406 +
407 + static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
408 +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
409 +index 4737520ec823..80fa656da5ab 100644
410 +--- a/drivers/cpufreq/intel_pstate.c
411 ++++ b/drivers/cpufreq/intel_pstate.c
412 +@@ -820,6 +820,25 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
413 + wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
414 + }
415 +
416 ++#define MSR_IA32_POWER_CTL_BIT_EE 19
417 ++
418 ++/* Disable energy efficiency optimization */
419 ++static void intel_pstate_disable_ee(int cpu)
420 ++{
421 ++ u64 power_ctl;
422 ++ int ret;
423 ++
424 ++ ret = rdmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, &power_ctl);
425 ++ if (ret)
426 ++ return;
427 ++
428 ++ if (!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE))) {
429 ++ pr_info("Disabling energy efficiency optimization\n");
430 ++ power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
431 ++ wrmsrl_on_cpu(cpu, MSR_IA32_POWER_CTL, power_ctl);
432 ++ }
433 ++}
434 ++
435 + static int atom_get_min_pstate(void)
436 + {
437 + u64 value;
438 +@@ -1420,6 +1439,11 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
439 + {}
440 + };
441 +
442 ++static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
443 ++ ICPU(INTEL_FAM6_KABYLAKE_DESKTOP, core_params),
444 ++ {}
445 ++};
446 ++
447 + static int intel_pstate_init_cpu(unsigned int cpunum)
448 + {
449 + struct cpudata *cpu;
450 +@@ -1435,6 +1459,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
451 + cpu->cpu = cpunum;
452 +
453 + if (hwp_active) {
454 ++ const struct x86_cpu_id *id;
455 ++
456 ++ id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
457 ++ if (id)
458 ++ intel_pstate_disable_ee(cpunum);
459 ++
460 + intel_pstate_hwp_enable(cpu);
461 + pid_params.sample_rate_ms = 50;
462 + pid_params.sample_rate_ns = 50 * NSEC_PER_MSEC;
463 +diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
464 +index faf3cb3ddce2..a388bf2d67f4 100644
465 +--- a/drivers/crypto/ccp/ccp-dev-v5.c
466 ++++ b/drivers/crypto/ccp/ccp-dev-v5.c
467 +@@ -955,7 +955,7 @@ static irqreturn_t ccp5_irq_handler(int irq, void *data)
468 + static void ccp5_config(struct ccp_device *ccp)
469 + {
470 + /* Public side */
471 +- iowrite32(0x00001249, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
472 ++ iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET);
473 + }
474 +
475 + static void ccp5other_config(struct ccp_device *ccp)
476 +diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
477 +index da5f4a678083..340aef14d616 100644
478 +--- a/drivers/crypto/ccp/ccp-dev.h
479 ++++ b/drivers/crypto/ccp/ccp-dev.h
480 +@@ -238,6 +238,7 @@ struct ccp_dma_chan {
481 + struct ccp_device *ccp;
482 +
483 + spinlock_t lock;
484 ++ struct list_head created;
485 + struct list_head pending;
486 + struct list_head active;
487 + struct list_head complete;
488 +diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
489 +index 6553912804f7..e5d9278f4019 100644
490 +--- a/drivers/crypto/ccp/ccp-dmaengine.c
491 ++++ b/drivers/crypto/ccp/ccp-dmaengine.c
492 +@@ -63,6 +63,7 @@ static void ccp_free_chan_resources(struct dma_chan *dma_chan)
493 + ccp_free_desc_resources(chan->ccp, &chan->complete);
494 + ccp_free_desc_resources(chan->ccp, &chan->active);
495 + ccp_free_desc_resources(chan->ccp, &chan->pending);
496 ++ ccp_free_desc_resources(chan->ccp, &chan->created);
497 +
498 + spin_unlock_irqrestore(&chan->lock, flags);
499 + }
500 +@@ -273,6 +274,7 @@ static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
501 + spin_lock_irqsave(&chan->lock, flags);
502 +
503 + cookie = dma_cookie_assign(tx_desc);
504 ++ list_del(&desc->entry);
505 + list_add_tail(&desc->entry, &chan->pending);
506 +
507 + spin_unlock_irqrestore(&chan->lock, flags);
508 +@@ -426,7 +428,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
509 +
510 + spin_lock_irqsave(&chan->lock, sflags);
511 +
512 +- list_add_tail(&desc->entry, &chan->pending);
513 ++ list_add_tail(&desc->entry, &chan->created);
514 +
515 + spin_unlock_irqrestore(&chan->lock, sflags);
516 +
517 +@@ -610,6 +612,7 @@ static int ccp_terminate_all(struct dma_chan *dma_chan)
518 + /*TODO: Purge the complete list? */
519 + ccp_free_desc_resources(chan->ccp, &chan->active);
520 + ccp_free_desc_resources(chan->ccp, &chan->pending);
521 ++ ccp_free_desc_resources(chan->ccp, &chan->created);
522 +
523 + spin_unlock_irqrestore(&chan->lock, flags);
524 +
525 +@@ -679,6 +682,7 @@ int ccp_dmaengine_register(struct ccp_device *ccp)
526 + chan->ccp = ccp;
527 +
528 + spin_lock_init(&chan->lock);
529 ++ INIT_LIST_HEAD(&chan->created);
530 + INIT_LIST_HEAD(&chan->pending);
531 + INIT_LIST_HEAD(&chan->active);
532 + INIT_LIST_HEAD(&chan->complete);
533 +diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
534 +index fb5f9bbfa09c..6aece3f25b08 100644
535 +--- a/drivers/crypto/chelsio/chcr_core.c
536 ++++ b/drivers/crypto/chelsio/chcr_core.c
537 +@@ -51,6 +51,7 @@ static struct cxgb4_uld_info chcr_uld_info = {
538 + int assign_chcr_device(struct chcr_dev **dev)
539 + {
540 + struct uld_ctx *u_ctx;
541 ++ int ret = -ENXIO;
542 +
543 + /*
544 + * Which device to use if multiple devices are available TODO
545 +@@ -58,15 +59,14 @@ int assign_chcr_device(struct chcr_dev **dev)
546 + * must go to the same device to maintain the ordering.
547 + */
548 + mutex_lock(&dev_mutex); /* TODO ? */
549 +- u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry);
550 +- if (!u_ctx) {
551 +- mutex_unlock(&dev_mutex);
552 +- return -ENXIO;
553 ++ list_for_each_entry(u_ctx, &uld_ctx_list, entry)
554 ++ if (u_ctx && u_ctx->dev) {
555 ++ *dev = u_ctx->dev;
556 ++ ret = 0;
557 ++ break;
558 + }
559 +-
560 +- *dev = u_ctx->dev;
561 + mutex_unlock(&dev_mutex);
562 +- return 0;
563 ++ return ret;
564 + }
565 +
566 + static int chcr_dev_add(struct uld_ctx *u_ctx)
567 +@@ -203,10 +203,8 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
568 +
569 + static int __init chcr_crypto_init(void)
570 + {
571 +- if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info)) {
572 ++ if (cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info))
573 + pr_err("ULD register fail: No chcr crypto support in cxgb4");
574 +- return -1;
575 +- }
576 +
577 + return 0;
578 + }
579 +diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
580 +index bc5cbc193aae..5b2d78a5b5aa 100644
581 +--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
582 ++++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
583 +@@ -233,7 +233,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
584 + &hw_data->accel_capabilities_mask);
585 +
586 + /* Find and map all the device's BARS */
587 +- i = 0;
588 ++ i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
589 + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
590 + for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
591 + ADF_PCI_MAX_BARS * 2) {
592 +diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
593 +index e8822536530b..33f0a6251e38 100644
594 +--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
595 ++++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
596 +@@ -69,6 +69,7 @@
597 + #define ADF_ERRSOU5 (0x3A000 + 0xD8)
598 + #define ADF_DEVICE_FUSECTL_OFFSET 0x40
599 + #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
600 ++#define ADF_DEVICE_FUSECTL_MASK 0x80000000
601 + #define ADF_PCI_MAX_BARS 3
602 + #define ADF_DEVICE_NAME_LENGTH 32
603 + #define ADF_ETR_MAX_RINGS_PER_BANK 16
604 +diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
605 +index 1e480f140663..8c4fd255a601 100644
606 +--- a/drivers/crypto/qat/qat_common/qat_hal.c
607 ++++ b/drivers/crypto/qat/qat_common/qat_hal.c
608 +@@ -456,7 +456,7 @@ static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
609 + unsigned int csr_val;
610 + int times = 30;
611 +
612 +- if (handle->pci_dev->device == ADF_C3XXX_PCI_DEVICE_ID)
613 ++ if (handle->pci_dev->device != ADF_DH895XCC_PCI_DEVICE_ID)
614 + return 0;
615 +
616 + csr_val = ADF_CSR_RD(csr_addr, 0);
617 +@@ -716,7 +716,7 @@ int qat_hal_init(struct adf_accel_dev *accel_dev)
618 + (void __iomem *)((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
619 + LOCAL_TO_XFER_REG_OFFSET);
620 + handle->pci_dev = pci_info->pci_dev;
621 +- if (handle->pci_dev->device != ADF_C3XXX_PCI_DEVICE_ID) {
622 ++ if (handle->pci_dev->device == ADF_DH895XCC_PCI_DEVICE_ID) {
623 + sram_bar =
624 + &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
625 + handle->hal_sram_addr_v = sram_bar->virt_addr;
626 +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
627 +index e6862a744210..4e19bde4bbff 100644
628 +--- a/drivers/gpu/drm/drm_atomic.c
629 ++++ b/drivers/gpu/drm/drm_atomic.c
630 +@@ -1759,16 +1759,16 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
631 +
632 + if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
633 + /*
634 +- * TEST_ONLY and PAGE_FLIP_EVENT are mutually exclusive,
635 +- * if they weren't, this code should be called on success
636 +- * for TEST_ONLY too.
637 ++ * Free the allocated event. drm_atomic_helper_setup_commit
638 ++ * can allocate an event too, so only free it if it's ours
639 ++ * to prevent a double free in drm_atomic_state_clear.
640 + */
641 +-
642 + for_each_crtc_in_state(state, crtc, crtc_state, i) {
643 +- if (!crtc_state->event)
644 +- continue;
645 +-
646 +- drm_event_cancel_free(dev, &crtc_state->event->base);
647 ++ struct drm_pending_vblank_event *event = crtc_state->event;
648 ++ if (event && (event->base.fence || event->base.file_priv)) {
649 ++ drm_event_cancel_free(dev, &event->base);
650 ++ crtc_state->event = NULL;
651 ++ }
652 + }
653 + }
654 +
655 +diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
656 +index a218c2e395e7..0c400f852a76 100644
657 +--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
658 ++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
659 +@@ -1215,14 +1215,14 @@ validate_exec_list(struct drm_device *dev,
660 + if (exec[i].offset !=
661 + gen8_canonical_addr(exec[i].offset & PAGE_MASK))
662 + return -EINVAL;
663 +-
664 +- /* From drm_mm perspective address space is continuous,
665 +- * so from this point we're always using non-canonical
666 +- * form internally.
667 +- */
668 +- exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
669 + }
670 +
671 ++ /* From drm_mm perspective address space is continuous,
672 ++ * so from this point we're always using non-canonical
673 ++ * form internally.
674 ++ */
675 ++ exec[i].offset = gen8_noncanonical_addr(exec[i].offset);
676 ++
677 + if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
678 + return -EINVAL;
679 +
680 +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
681 +index 8079e5b380cb..b9be8a6141d8 100644
682 +--- a/drivers/gpu/drm/i915/intel_display.c
683 ++++ b/drivers/gpu/drm/i915/intel_display.c
684 +@@ -4280,10 +4280,10 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
685 + drm_crtc_vblank_put(&intel_crtc->base);
686 +
687 + wake_up_all(&dev_priv->pending_flip_queue);
688 +- queue_work(dev_priv->wq, &work->unpin_work);
689 +-
690 + trace_i915_flip_complete(intel_crtc->plane,
691 + work->pending_flip_obj);
692 ++
693 ++ queue_work(dev_priv->wq, &work->unpin_work);
694 + }
695 +
696 + static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
697 +diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
698 +index 1c59ca50c430..cae27c55dd99 100644
699 +--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
700 ++++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
701 +@@ -1723,7 +1723,8 @@ bxt_get_dpll(struct intel_crtc *crtc,
702 + return NULL;
703 +
704 + if ((encoder->type == INTEL_OUTPUT_DP ||
705 +- encoder->type == INTEL_OUTPUT_EDP) &&
706 ++ encoder->type == INTEL_OUTPUT_EDP ||
707 ++ encoder->type == INTEL_OUTPUT_DP_MST) &&
708 + !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
709 + return NULL;
710 +
711 +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
712 +index 16f91c8490fe..5fb4c6d9209b 100644
713 +--- a/drivers/hv/channel.c
714 ++++ b/drivers/hv/channel.c
715 +@@ -39,7 +39,7 @@
716 + * vmbus_setevent- Trigger an event notification on the specified
717 + * channel.
718 + */
719 +-static void vmbus_setevent(struct vmbus_channel *channel)
720 ++void vmbus_setevent(struct vmbus_channel *channel)
721 + {
722 + struct hv_monitor_page *monitorpage;
723 +
724 +@@ -65,6 +65,7 @@ static void vmbus_setevent(struct vmbus_channel *channel)
725 + vmbus_set_event(channel);
726 + }
727 + }
728 ++EXPORT_SYMBOL_GPL(vmbus_setevent);
729 +
730 + /*
731 + * vmbus_open - Open the specified channel.
732 +@@ -635,8 +636,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
733 + u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
734 + struct kvec bufferlist[3];
735 + u64 aligned_data = 0;
736 +- int ret;
737 +- bool signal = false;
738 + bool lock = channel->acquire_ring_lock;
739 + int num_vecs = ((bufferlen != 0) ? 3 : 1);
740 +
741 +@@ -656,33 +655,9 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
742 + bufferlist[2].iov_base = &aligned_data;
743 + bufferlist[2].iov_len = (packetlen_aligned - packetlen);
744 +
745 +- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, num_vecs,
746 +- &signal, lock, channel->signal_policy);
747 +-
748 +- /*
749 +- * Signalling the host is conditional on many factors:
750 +- * 1. The ring state changed from being empty to non-empty.
751 +- * This is tracked by the variable "signal".
752 +- * 2. The variable kick_q tracks if more data will be placed
753 +- * on the ring. We will not signal if more data is
754 +- * to be placed.
755 +- *
756 +- * Based on the channel signal state, we will decide
757 +- * which signaling policy will be applied.
758 +- *
759 +- * If we cannot write to the ring-buffer; signal the host
760 +- * even if we may not have written anything. This is a rare
761 +- * enough condition that it should not matter.
762 +- * NOTE: in this case, the hvsock channel is an exception, because
763 +- * it looks the host side's hvsock implementation has a throttling
764 +- * mechanism which can hurt the performance otherwise.
765 +- */
766 +-
767 +- if (((ret == 0) && kick_q && signal) ||
768 +- (ret && !is_hvsock_channel(channel)))
769 +- vmbus_setevent(channel);
770 ++ return hv_ringbuffer_write(channel, bufferlist, num_vecs,
771 ++ lock, kick_q);
772 +
773 +- return ret;
774 + }
775 + EXPORT_SYMBOL(vmbus_sendpacket_ctl);
776 +
777 +@@ -723,7 +698,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
778 + u32 flags,
779 + bool kick_q)
780 + {
781 +- int ret;
782 + int i;
783 + struct vmbus_channel_packet_page_buffer desc;
784 + u32 descsize;
785 +@@ -731,7 +705,6 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
786 + u32 packetlen_aligned;
787 + struct kvec bufferlist[3];
788 + u64 aligned_data = 0;
789 +- bool signal = false;
790 + bool lock = channel->acquire_ring_lock;
791 +
792 + if (pagecount > MAX_PAGE_BUFFER_COUNT)
793 +@@ -769,29 +742,8 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
794 + bufferlist[2].iov_base = &aligned_data;
795 + bufferlist[2].iov_len = (packetlen_aligned - packetlen);
796 +
797 +- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
798 +- &signal, lock, channel->signal_policy);
799 +-
800 +- /*
801 +- * Signalling the host is conditional on many factors:
802 +- * 1. The ring state changed from being empty to non-empty.
803 +- * This is tracked by the variable "signal".
804 +- * 2. The variable kick_q tracks if more data will be placed
805 +- * on the ring. We will not signal if more data is
806 +- * to be placed.
807 +- *
808 +- * Based on the channel signal state, we will decide
809 +- * which signaling policy will be applied.
810 +- *
811 +- * If we cannot write to the ring-buffer; signal the host
812 +- * even if we may not have written anything. This is a rare
813 +- * enough condition that it should not matter.
814 +- */
815 +-
816 +- if (((ret == 0) && kick_q && signal) || (ret))
817 +- vmbus_setevent(channel);
818 +-
819 +- return ret;
820 ++ return hv_ringbuffer_write(channel, bufferlist, 3,
821 ++ lock, kick_q);
822 + }
823 + EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
824 +
825 +@@ -822,12 +774,10 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
826 + u32 desc_size,
827 + void *buffer, u32 bufferlen, u64 requestid)
828 + {
829 +- int ret;
830 + u32 packetlen;
831 + u32 packetlen_aligned;
832 + struct kvec bufferlist[3];
833 + u64 aligned_data = 0;
834 +- bool signal = false;
835 + bool lock = channel->acquire_ring_lock;
836 +
837 + packetlen = desc_size + bufferlen;
838 +@@ -848,13 +798,8 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
839 + bufferlist[2].iov_base = &aligned_data;
840 + bufferlist[2].iov_len = (packetlen_aligned - packetlen);
841 +
842 +- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
843 +- &signal, lock, channel->signal_policy);
844 +-
845 +- if (ret == 0 && signal)
846 +- vmbus_setevent(channel);
847 +-
848 +- return ret;
849 ++ return hv_ringbuffer_write(channel, bufferlist, 3,
850 ++ lock, true);
851 + }
852 + EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
853 +
854 +@@ -866,14 +811,12 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
855 + struct hv_multipage_buffer *multi_pagebuffer,
856 + void *buffer, u32 bufferlen, u64 requestid)
857 + {
858 +- int ret;
859 + struct vmbus_channel_packet_multipage_buffer desc;
860 + u32 descsize;
861 + u32 packetlen;
862 + u32 packetlen_aligned;
863 + struct kvec bufferlist[3];
864 + u64 aligned_data = 0;
865 +- bool signal = false;
866 + bool lock = channel->acquire_ring_lock;
867 + u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
868 + multi_pagebuffer->len);
869 +@@ -913,13 +856,8 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
870 + bufferlist[2].iov_base = &aligned_data;
871 + bufferlist[2].iov_len = (packetlen_aligned - packetlen);
872 +
873 +- ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3,
874 +- &signal, lock, channel->signal_policy);
875 +-
876 +- if (ret == 0 && signal)
877 +- vmbus_setevent(channel);
878 +-
879 +- return ret;
880 ++ return hv_ringbuffer_write(channel, bufferlist, 3,
881 ++ lock, true);
882 + }
883 + EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
884 +
885 +@@ -941,16 +879,9 @@ __vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
886 + u32 bufferlen, u32 *buffer_actual_len, u64 *requestid,
887 + bool raw)
888 + {
889 +- int ret;
890 +- bool signal = false;
891 ++ return hv_ringbuffer_read(channel, buffer, bufferlen,
892 ++ buffer_actual_len, requestid, raw);
893 +
894 +- ret = hv_ringbuffer_read(&channel->inbound, buffer, bufferlen,
895 +- buffer_actual_len, requestid, &signal, raw);
896 +-
897 +- if (signal)
898 +- vmbus_setevent(channel);
899 +-
900 +- return ret;
901 + }
902 +
903 + int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
904 +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
905 +index 1bc1d4795243..caf341842464 100644
906 +--- a/drivers/hv/channel_mgmt.c
907 ++++ b/drivers/hv/channel_mgmt.c
908 +@@ -449,8 +449,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel)
909 + }
910 +
911 + dev_type = hv_get_dev_type(newchannel);
912 +- if (dev_type == HV_NIC)
913 +- set_channel_signal_state(newchannel, HV_SIGNAL_POLICY_EXPLICIT);
914 +
915 + init_vp_index(newchannel, dev_type);
916 +
917 +diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
918 +index a5b4442433c8..2b13f2a0a71e 100644
919 +--- a/drivers/hv/hyperv_vmbus.h
920 ++++ b/drivers/hv/hyperv_vmbus.h
921 +@@ -527,14 +527,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
922 +
923 + void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
924 +
925 +-int hv_ringbuffer_write(struct hv_ring_buffer_info *ring_info,
926 ++int hv_ringbuffer_write(struct vmbus_channel *channel,
927 + struct kvec *kv_list,
928 +- u32 kv_count, bool *signal, bool lock,
929 +- enum hv_signal_policy policy);
930 ++ u32 kv_count, bool lock,
931 ++ bool kick_q);
932 +
933 +-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
934 ++int hv_ringbuffer_read(struct vmbus_channel *channel,
935 + void *buffer, u32 buflen, u32 *buffer_actual_len,
936 +- u64 *requestid, bool *signal, bool raw);
937 ++ u64 *requestid, bool raw);
938 +
939 + void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
940 + struct hv_ring_buffer_debug_info *debug_info);
941 +diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
942 +index 08043da1a61c..308dbda700eb 100644
943 +--- a/drivers/hv/ring_buffer.c
944 ++++ b/drivers/hv/ring_buffer.c
945 +@@ -66,21 +66,25 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
946 + * once the ring buffer is empty, it will clear the
947 + * interrupt_mask and re-check to see if new data has
948 + * arrived.
949 ++ *
950 ++ * KYS: Oct. 30, 2016:
951 ++ * It looks like Windows hosts have logic to deal with DOS attacks that
952 ++ * can be triggered if it receives interrupts when it is not expecting
953 ++ * the interrupt. The host expects interrupts only when the ring
954 ++ * transitions from empty to non-empty (or full to non full on the guest
955 ++ * to host ring).
956 ++ * So, base the signaling decision solely on the ring state until the
957 ++ * host logic is fixed.
958 + */
959 +
960 +-static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
961 +- enum hv_signal_policy policy)
962 ++static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
963 ++ bool kick_q)
964 + {
965 ++ struct hv_ring_buffer_info *rbi = &channel->outbound;
966 ++
967 + virt_mb();
968 + if (READ_ONCE(rbi->ring_buffer->interrupt_mask))
969 +- return false;
970 +-
971 +- /*
972 +- * When the client wants to control signaling,
973 +- * we only honour the host interrupt mask.
974 +- */
975 +- if (policy == HV_SIGNAL_POLICY_EXPLICIT)
976 +- return true;
977 ++ return;
978 +
979 + /* check interrupt_mask before read_index */
980 + virt_rmb();
981 +@@ -89,9 +93,9 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi,
982 + * ring transitions from being empty to non-empty.
983 + */
984 + if (old_write == READ_ONCE(rbi->ring_buffer->read_index))
985 +- return true;
986 ++ vmbus_setevent(channel);
987 +
988 +- return false;
989 ++ return;
990 + }
991 +
992 + /* Get the next write location for the specified ring buffer. */
993 +@@ -280,9 +284,9 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
994 + }
995 +
996 + /* Write to the ring buffer. */
997 +-int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
998 +- struct kvec *kv_list, u32 kv_count, bool *signal, bool lock,
999 +- enum hv_signal_policy policy)
1000 ++int hv_ringbuffer_write(struct vmbus_channel *channel,
1001 ++ struct kvec *kv_list, u32 kv_count, bool lock,
1002 ++ bool kick_q)
1003 + {
1004 + int i = 0;
1005 + u32 bytes_avail_towrite;
1006 +@@ -292,6 +296,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
1007 + u32 old_write;
1008 + u64 prev_indices = 0;
1009 + unsigned long flags = 0;
1010 ++ struct hv_ring_buffer_info *outring_info = &channel->outbound;
1011 +
1012 + for (i = 0; i < kv_count; i++)
1013 + totalbytes_towrite += kv_list[i].iov_len;
1014 +@@ -344,13 +349,13 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info,
1015 + if (lock)
1016 + spin_unlock_irqrestore(&outring_info->ring_lock, flags);
1017 +
1018 +- *signal = hv_need_to_signal(old_write, outring_info, policy);
1019 ++ hv_signal_on_write(old_write, channel, kick_q);
1020 + return 0;
1021 + }
1022 +
1023 +-int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
1024 ++int hv_ringbuffer_read(struct vmbus_channel *channel,
1025 + void *buffer, u32 buflen, u32 *buffer_actual_len,
1026 +- u64 *requestid, bool *signal, bool raw)
1027 ++ u64 *requestid, bool raw)
1028 + {
1029 + u32 bytes_avail_toread;
1030 + u32 next_read_location = 0;
1031 +@@ -359,6 +364,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
1032 + u32 offset;
1033 + u32 packetlen;
1034 + int ret = 0;
1035 ++ struct hv_ring_buffer_info *inring_info = &channel->inbound;
1036 +
1037 + if (buflen <= 0)
1038 + return -EINVAL;
1039 +@@ -377,6 +383,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
1040 + return ret;
1041 + }
1042 +
1043 ++ init_cached_read_index(channel);
1044 + next_read_location = hv_get_next_read_location(inring_info);
1045 + next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
1046 + sizeof(desc),
1047 +@@ -416,7 +423,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
1048 + /* Update the read index */
1049 + hv_set_next_read_location(inring_info, next_read_location);
1050 +
1051 +- *signal = hv_need_to_signal_on_read(inring_info);
1052 ++ hv_signal_on_read(channel);
1053 +
1054 + return ret;
1055 + }
1056 +diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
1057 +index 1869152f1d23..9b732c5f89e1 100644
1058 +--- a/drivers/infiniband/sw/rxe/rxe_mr.c
1059 ++++ b/drivers/infiniband/sw/rxe/rxe_mr.c
1060 +@@ -59,9 +59,11 @@ int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
1061 +
1062 + case RXE_MEM_TYPE_MR:
1063 + case RXE_MEM_TYPE_FMR:
1064 +- return ((iova < mem->iova) ||
1065 +- ((iova + length) > (mem->iova + mem->length))) ?
1066 +- -EFAULT : 0;
1067 ++ if (iova < mem->iova ||
1068 ++ length > mem->length ||
1069 ++ iova > mem->iova + mem->length - length)
1070 ++ return -EFAULT;
1071 ++ return 0;
1072 +
1073 + default:
1074 + return -EFAULT;
1075 +diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
1076 +index dd3d88adc003..ccf624763565 100644
1077 +--- a/drivers/infiniband/sw/rxe/rxe_resp.c
1078 ++++ b/drivers/infiniband/sw/rxe/rxe_resp.c
1079 +@@ -472,7 +472,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
1080 + goto err2;
1081 + }
1082 +
1083 +- resid = mtu;
1084 ++ qp->resp.resid = mtu;
1085 + } else {
1086 + if (pktlen != resid) {
1087 + state = RESPST_ERR_LENGTH;
1088 +diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
1089 +index 92595b98e7ed..022be0e22eba 100644
1090 +--- a/drivers/input/misc/uinput.c
1091 ++++ b/drivers/input/misc/uinput.c
1092 +@@ -263,13 +263,21 @@ static int uinput_create_device(struct uinput_device *udev)
1093 + return -EINVAL;
1094 + }
1095 +
1096 +- if (test_bit(ABS_MT_SLOT, dev->absbit)) {
1097 +- nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
1098 +- error = input_mt_init_slots(dev, nslot, 0);
1099 +- if (error)
1100 ++ if (test_bit(EV_ABS, dev->evbit)) {
1101 ++ input_alloc_absinfo(dev);
1102 ++ if (!dev->absinfo) {
1103 ++ error = -EINVAL;
1104 + goto fail1;
1105 +- } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1106 +- input_set_events_per_packet(dev, 60);
1107 ++ }
1108 ++
1109 ++ if (test_bit(ABS_MT_SLOT, dev->absbit)) {
1110 ++ nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1;
1111 ++ error = input_mt_init_slots(dev, nslot, 0);
1112 ++ if (error)
1113 ++ goto fail1;
1114 ++ } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1115 ++ input_set_events_per_packet(dev, 60);
1116 ++ }
1117 + }
1118 +
1119 + if (test_bit(EV_FF, dev->evbit) && !udev->ff_effects_max) {
1120 +diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
1121 +index 31a89c8832c0..2c965424d383 100644
1122 +--- a/drivers/md/dm-rq.c
1123 ++++ b/drivers/md/dm-rq.c
1124 +@@ -804,6 +804,10 @@ static void dm_old_request_fn(struct request_queue *q)
1125 + int srcu_idx;
1126 + struct dm_table *map = dm_get_live_table(md, &srcu_idx);
1127 +
1128 ++ if (unlikely(!map)) {
1129 ++ dm_put_live_table(md, srcu_idx);
1130 ++ return;
1131 ++ }
1132 + ti = dm_table_find_target(map, pos);
1133 + dm_put_live_table(md, srcu_idx);
1134 + }
1135 +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1136 +index 878950a42e6c..2cf8b1d82d6a 100644
1137 +--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1138 ++++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
1139 +@@ -1007,9 +1007,7 @@
1140 +
1141 + static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
1142 + {
1143 +- u8 __iomem *reg_addr = ACCESS_ONCE(base);
1144 +-
1145 +- writel(value, reg_addr + reg);
1146 ++ writel(value, base + reg);
1147 + }
1148 +
1149 + #define dsaf_write_dev(a, reg, value) \
1150 +@@ -1017,9 +1015,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
1151 +
1152 + static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
1153 + {
1154 +- u8 __iomem *reg_addr = ACCESS_ONCE(base);
1155 +-
1156 +- return readl(reg_addr + reg);
1157 ++ return readl(base + reg);
1158 + }
1159 +
1160 + static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
1161 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1162 +index 27ff401cec20..51c6a57ca873 100644
1163 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1164 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
1165 +@@ -991,6 +991,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1166 + {
1167 + struct mlx5e_priv *priv = netdev_priv(dev);
1168 + int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1169 ++ bool hash_changed = false;
1170 + void *in;
1171 +
1172 + if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
1173 +@@ -1012,14 +1013,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
1174 + mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1175 + }
1176 +
1177 +- if (key)
1178 ++ if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
1179 ++ hfunc != priv->params.rss_hfunc) {
1180 ++ priv->params.rss_hfunc = hfunc;
1181 ++ hash_changed = true;
1182 ++ }
1183 ++
1184 ++ if (key) {
1185 + memcpy(priv->params.toeplitz_hash_key, key,
1186 + sizeof(priv->params.toeplitz_hash_key));
1187 ++ hash_changed = hash_changed ||
1188 ++ priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
1189 ++ }
1190 +
1191 +- if (hfunc != ETH_RSS_HASH_NO_CHANGE)
1192 +- priv->params.rss_hfunc = hfunc;
1193 +-
1194 +- mlx5e_modify_tirs_hash(priv, in, inlen);
1195 ++ if (hash_changed)
1196 ++ mlx5e_modify_tirs_hash(priv, in, inlen);
1197 +
1198 + mutex_unlock(&priv->state_lock);
1199 +
1200 +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
1201 +index 720b5fa9e625..c2ac39a940f7 100644
1202 +--- a/drivers/net/hyperv/netvsc.c
1203 ++++ b/drivers/net/hyperv/netvsc.c
1204 +@@ -1288,6 +1288,9 @@ void netvsc_channel_cb(void *context)
1205 + ndev = hv_get_drvdata(device);
1206 + buffer = get_per_channel_state(channel);
1207 +
1208 ++ /* commit_rd_index() -> hv_signal_on_read() needs this. */
1209 ++ init_cached_read_index(channel);
1210 ++
1211 + do {
1212 + desc = get_next_pkt_raw(channel);
1213 + if (desc != NULL) {
1214 +@@ -1340,6 +1343,9 @@ void netvsc_channel_cb(void *context)
1215 +
1216 + bufferlen = bytes_recvd;
1217 + }
1218 ++
1219 ++ init_cached_read_index(channel);
1220 ++
1221 + } while (1);
1222 +
1223 + if (bufferlen > NETVSC_PACKET_SIZE)
1224 +diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
1225 +index 8b6e37ce3f66..20bfb373dcd6 100644
1226 +--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
1227 ++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
1228 +@@ -96,7 +96,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
1229 + struct rtl_priv *rtlpriv = rtl_priv(hw);
1230 + struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1231 + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1232 +- char *fw_name = "rtlwifi/rtl8192cfwU.bin";
1233 ++ char *fw_name;
1234 +
1235 + rtl8192ce_bt_reg_init(hw);
1236 +
1237 +@@ -168,8 +168,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
1238 + }
1239 +
1240 + /* request fw */
1241 +- if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
1242 ++ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
1243 ++ !IS_92C_SERIAL(rtlhal->version))
1244 ++ fw_name = "rtlwifi/rtl8192cfwU.bin";
1245 ++ else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
1246 + fw_name = "rtlwifi/rtl8192cfwU_B.bin";
1247 ++ else
1248 ++ fw_name = "rtlwifi/rtl8192cfw.bin";
1249 +
1250 + rtlpriv->max_fw_size = 0x4000;
1251 + pr_info("Using firmware %s\n", fw_name);
1252 +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1253 +index bf2744e1e3db..0cdcb2169083 100644
1254 +--- a/drivers/net/xen-netfront.c
1255 ++++ b/drivers/net/xen-netfront.c
1256 +@@ -1397,6 +1397,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1257 + for (i = 0; i < num_queues && info->queues; ++i) {
1258 + struct netfront_queue *queue = &info->queues[i];
1259 +
1260 ++ del_timer_sync(&queue->rx_refill_timer);
1261 ++
1262 + if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1263 + unbind_from_irqhandler(queue->tx_irq, queue);
1264 + if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1265 +@@ -1751,7 +1753,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
1266 +
1267 + if (netif_running(info->netdev))
1268 + napi_disable(&queue->napi);
1269 +- del_timer_sync(&queue->rx_refill_timer);
1270 + netif_napi_del(&queue->napi);
1271 + }
1272 +
1273 +diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
1274 +index 1480734c2d6e..aefca644219b 100644
1275 +--- a/drivers/nvdimm/namespace_devs.c
1276 ++++ b/drivers/nvdimm/namespace_devs.c
1277 +@@ -962,8 +962,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1278 + struct nvdimm_drvdata *ndd;
1279 + struct nd_label_id label_id;
1280 + u32 flags = 0, remainder;
1281 ++ int rc, i, id = -1;
1282 + u8 *uuid = NULL;
1283 +- int rc, i;
1284 +
1285 + if (dev->driver || ndns->claim)
1286 + return -EBUSY;
1287 +@@ -972,11 +972,13 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1288 + struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1289 +
1290 + uuid = nspm->uuid;
1291 ++ id = nspm->id;
1292 + } else if (is_namespace_blk(dev)) {
1293 + struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1294 +
1295 + uuid = nsblk->uuid;
1296 + flags = NSLABEL_FLAG_LOCAL;
1297 ++ id = nsblk->id;
1298 + }
1299 +
1300 + /*
1301 +@@ -1039,10 +1041,11 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
1302 +
1303 + /*
1304 + * Try to delete the namespace if we deleted all of its
1305 +- * allocation, this is not the seed device for the region, and
1306 +- * it is not actively claimed by a btt instance.
1307 ++ * allocation, this is not the seed or 0th device for the
1308 ++ * region, and it is not actively claimed by a btt, pfn, or dax
1309 ++ * instance.
1310 + */
1311 +- if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
1312 ++ if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1313 + nd_device_unregister(dev, ND_ASYNC);
1314 +
1315 + return rc;
1316 +diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
1317 +index a2ac9e641aa9..6c033c9a2f06 100644
1318 +--- a/drivers/nvdimm/pfn_devs.c
1319 ++++ b/drivers/nvdimm/pfn_devs.c
1320 +@@ -627,15 +627,12 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
1321 + size = resource_size(&nsio->res);
1322 + npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K;
1323 + if (nd_pfn->mode == PFN_MODE_PMEM) {
1324 +- unsigned long memmap_size;
1325 +-
1326 + /*
1327 + * vmemmap_populate_hugepages() allocates the memmap array in
1328 + * HPAGE_SIZE chunks.
1329 + */
1330 +- memmap_size = ALIGN(64 * npfns, HPAGE_SIZE);
1331 +- offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve,
1332 +- nd_pfn->align) - start;
1333 ++ offset = ALIGN(start + SZ_8K + 64 * npfns + dax_label_reserve,
1334 ++ max(nd_pfn->align, HPAGE_SIZE)) - start;
1335 + } else if (nd_pfn->mode == PFN_MODE_RAM)
1336 + offset = ALIGN(start + SZ_8K + dax_label_reserve,
1337 + nd_pfn->align) - start;
1338 +diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1339 +index 75f820ca17b7..27ff38f839fc 100644
1340 +--- a/drivers/s390/scsi/zfcp_fsf.c
1341 ++++ b/drivers/s390/scsi/zfcp_fsf.c
1342 +@@ -1583,7 +1583,7 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
1343 + int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1344 + {
1345 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1346 +- struct zfcp_fsf_req *req = NULL;
1347 ++ struct zfcp_fsf_req *req;
1348 + int retval = -EIO;
1349 +
1350 + spin_lock_irq(&qdio->req_q_lock);
1351 +@@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1352 + zfcp_fsf_req_free(req);
1353 + out:
1354 + spin_unlock_irq(&qdio->req_q_lock);
1355 +- if (req && !IS_ERR(req))
1356 ++ if (!retval)
1357 + zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
1358 + return retval;
1359 + }
1360 +@@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
1361 + int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1362 + {
1363 + struct zfcp_qdio *qdio = wka_port->adapter->qdio;
1364 +- struct zfcp_fsf_req *req = NULL;
1365 ++ struct zfcp_fsf_req *req;
1366 + int retval = -EIO;
1367 +
1368 + spin_lock_irq(&qdio->req_q_lock);
1369 +@@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1370 + zfcp_fsf_req_free(req);
1371 + out:
1372 + spin_unlock_irq(&qdio->req_q_lock);
1373 +- if (req && !IS_ERR(req))
1374 ++ if (!retval)
1375 + zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
1376 + return retval;
1377 + }
1378 +diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
1379 +index 341ea327ae79..792d3e7e35e2 100644
1380 +--- a/drivers/scsi/aacraid/comminit.c
1381 ++++ b/drivers/scsi/aacraid/comminit.c
1382 +@@ -50,9 +50,13 @@ struct aac_common aac_config = {
1383 +
1384 + static inline int aac_is_msix_mode(struct aac_dev *dev)
1385 + {
1386 +- u32 status;
1387 ++ u32 status = 0;
1388 +
1389 +- status = src_readl(dev, MUnit.OMR);
1390 ++ if (dev->pdev->device == PMC_DEVICE_S6 ||
1391 ++ dev->pdev->device == PMC_DEVICE_S7 ||
1392 ++ dev->pdev->device == PMC_DEVICE_S8) {
1393 ++ status = src_readl(dev, MUnit.OMR);
1394 ++ }
1395 + return (status & AAC_INT_MODE_MSIX);
1396 + }
1397 +
1398 +diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1399 +index e3b911c895b4..91dfd58b175d 100644
1400 +--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1401 ++++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
1402 +@@ -3929,6 +3929,7 @@ static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
1403 + static const struct target_core_fabric_ops ibmvscsis_ops = {
1404 + .module = THIS_MODULE,
1405 + .name = "ibmvscsis",
1406 ++ .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
1407 + .get_fabric_name = ibmvscsis_get_fabric_name,
1408 + .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
1409 + .tpg_get_tag = ibmvscsis_get_tag,
1410 +diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1411 +index f84a6087cebd..8a7941b8189f 100644
1412 +--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1413 ++++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
1414 +@@ -51,6 +51,7 @@
1415 + #include <linux/workqueue.h>
1416 + #include <linux/delay.h>
1417 + #include <linux/pci.h>
1418 ++#include <linux/pci-aspm.h>
1419 + #include <linux/interrupt.h>
1420 + #include <linux/aer.h>
1421 + #include <linux/raid_class.h>
1422 +@@ -8706,6 +8707,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1423 +
1424 + switch (hba_mpi_version) {
1425 + case MPI2_VERSION:
1426 ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
1427 ++ PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
1428 + /* Use mpt2sas driver host template for SAS 2.0 HBA's */
1429 + shost = scsi_host_alloc(&mpt2sas_driver_template,
1430 + sizeof(struct MPT3SAS_ADAPTER));
1431 +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1432 +index 078d797cb492..bea819e5336d 100644
1433 +--- a/drivers/scsi/qla2xxx/qla_os.c
1434 ++++ b/drivers/scsi/qla2xxx/qla_os.c
1435 +@@ -1459,7 +1459,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
1436 + /* Don't abort commands in adapter during EEH
1437 + * recovery as it's not accessible/responding.
1438 + */
1439 +- if (!ha->flags.eeh_busy) {
1440 ++ if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
1441 + /* Get a reference to the sp and drop the lock.
1442 + * The reference ensures this sp->done() call
1443 + * - and not the call in qla2xxx_eh_abort() -
1444 +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
1445 +index 6b423485c5d6..ea9617c7b403 100644
1446 +--- a/drivers/target/target_core_device.c
1447 ++++ b/drivers/target/target_core_device.c
1448 +@@ -351,7 +351,15 @@ int core_enable_device_list_for_node(
1449 + kfree(new);
1450 + return -EINVAL;
1451 + }
1452 +- BUG_ON(orig->se_lun_acl != NULL);
1453 ++ if (orig->se_lun_acl != NULL) {
1454 ++ pr_warn_ratelimited("Detected existing explicit"
1455 ++ " se_lun_acl->se_lun_group reference for %s"
1456 ++ " mapped_lun: %llu, failing\n",
1457 ++ nacl->initiatorname, mapped_lun);
1458 ++ mutex_unlock(&nacl->lun_entry_mutex);
1459 ++ kfree(new);
1460 ++ return -EINVAL;
1461 ++ }
1462 +
1463 + rcu_assign_pointer(new->se_lun, lun);
1464 + rcu_assign_pointer(new->se_lun_acl, lun_acl);
1465 +diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
1466 +index 04f616b3ba0a..aabd6602da6c 100644
1467 +--- a/drivers/target/target_core_sbc.c
1468 ++++ b/drivers/target/target_core_sbc.c
1469 +@@ -450,6 +450,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1470 + int *post_ret)
1471 + {
1472 + struct se_device *dev = cmd->se_dev;
1473 ++ sense_reason_t ret = TCM_NO_SENSE;
1474 +
1475 + /*
1476 + * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
1477 +@@ -457,9 +458,12 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1478 + * sent to the backend driver.
1479 + */
1480 + spin_lock_irq(&cmd->t_state_lock);
1481 +- if ((cmd->transport_state & CMD_T_SENT) && !cmd->scsi_status) {
1482 ++ if (cmd->transport_state & CMD_T_SENT) {
1483 + cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
1484 + *post_ret = 1;
1485 ++
1486 ++ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
1487 ++ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1488 + }
1489 + spin_unlock_irq(&cmd->t_state_lock);
1490 +
1491 +@@ -469,7 +473,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
1492 + */
1493 + up(&dev->caw_sem);
1494 +
1495 +- return TCM_NO_SENSE;
1496 ++ return ret;
1497 + }
1498 +
1499 + static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success,
1500 +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
1501 +index 7dfefd66df93..767d1eb6e035 100644
1502 +--- a/drivers/target/target_core_transport.c
1503 ++++ b/drivers/target/target_core_transport.c
1504 +@@ -457,8 +457,20 @@ static void target_complete_nacl(struct kref *kref)
1505 + {
1506 + struct se_node_acl *nacl = container_of(kref,
1507 + struct se_node_acl, acl_kref);
1508 ++ struct se_portal_group *se_tpg = nacl->se_tpg;
1509 +
1510 +- complete(&nacl->acl_free_comp);
1511 ++ if (!nacl->dynamic_stop) {
1512 ++ complete(&nacl->acl_free_comp);
1513 ++ return;
1514 ++ }
1515 ++
1516 ++ mutex_lock(&se_tpg->acl_node_mutex);
1517 ++ list_del(&nacl->acl_list);
1518 ++ mutex_unlock(&se_tpg->acl_node_mutex);
1519 ++
1520 ++ core_tpg_wait_for_nacl_pr_ref(nacl);
1521 ++ core_free_device_list_for_node(nacl, se_tpg);
1522 ++ kfree(nacl);
1523 + }
1524 +
1525 + void target_put_nacl(struct se_node_acl *nacl)
1526 +@@ -499,12 +511,39 @@ EXPORT_SYMBOL(transport_deregister_session_configfs);
1527 + void transport_free_session(struct se_session *se_sess)
1528 + {
1529 + struct se_node_acl *se_nacl = se_sess->se_node_acl;
1530 ++
1531 + /*
1532 + * Drop the se_node_acl->nacl_kref obtained from within
1533 + * core_tpg_get_initiator_node_acl().
1534 + */
1535 + if (se_nacl) {
1536 ++ struct se_portal_group *se_tpg = se_nacl->se_tpg;
1537 ++ const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
1538 ++ unsigned long flags;
1539 ++
1540 + se_sess->se_node_acl = NULL;
1541 ++
1542 ++ /*
1543 ++ * Also determine if we need to drop the extra ->cmd_kref if
1544 ++ * it had been previously dynamically generated, and
1545 ++ * the endpoint is not caching dynamic ACLs.
1546 ++ */
1547 ++ mutex_lock(&se_tpg->acl_node_mutex);
1548 ++ if (se_nacl->dynamic_node_acl &&
1549 ++ !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
1550 ++ spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
1551 ++ if (list_empty(&se_nacl->acl_sess_list))
1552 ++ se_nacl->dynamic_stop = true;
1553 ++ spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
1554 ++
1555 ++ if (se_nacl->dynamic_stop)
1556 ++ list_del(&se_nacl->acl_list);
1557 ++ }
1558 ++ mutex_unlock(&se_tpg->acl_node_mutex);
1559 ++
1560 ++ if (se_nacl->dynamic_stop)
1561 ++ target_put_nacl(se_nacl);
1562 ++
1563 + target_put_nacl(se_nacl);
1564 + }
1565 + if (se_sess->sess_cmd_map) {
1566 +@@ -518,16 +557,12 @@ EXPORT_SYMBOL(transport_free_session);
1567 + void transport_deregister_session(struct se_session *se_sess)
1568 + {
1569 + struct se_portal_group *se_tpg = se_sess->se_tpg;
1570 +- const struct target_core_fabric_ops *se_tfo;
1571 +- struct se_node_acl *se_nacl;
1572 + unsigned long flags;
1573 +- bool drop_nacl = false;
1574 +
1575 + if (!se_tpg) {
1576 + transport_free_session(se_sess);
1577 + return;
1578 + }
1579 +- se_tfo = se_tpg->se_tpg_tfo;
1580 +
1581 + spin_lock_irqsave(&se_tpg->session_lock, flags);
1582 + list_del(&se_sess->sess_list);
1583 +@@ -535,33 +570,15 @@ void transport_deregister_session(struct se_session *se_sess)
1584 + se_sess->fabric_sess_ptr = NULL;
1585 + spin_unlock_irqrestore(&se_tpg->session_lock, flags);
1586 +
1587 +- /*
1588 +- * Determine if we need to do extra work for this initiator node's
1589 +- * struct se_node_acl if it had been previously dynamically generated.
1590 +- */
1591 +- se_nacl = se_sess->se_node_acl;
1592 +-
1593 +- mutex_lock(&se_tpg->acl_node_mutex);
1594 +- if (se_nacl && se_nacl->dynamic_node_acl) {
1595 +- if (!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
1596 +- list_del(&se_nacl->acl_list);
1597 +- drop_nacl = true;
1598 +- }
1599 +- }
1600 +- mutex_unlock(&se_tpg->acl_node_mutex);
1601 +-
1602 +- if (drop_nacl) {
1603 +- core_tpg_wait_for_nacl_pr_ref(se_nacl);
1604 +- core_free_device_list_for_node(se_nacl, se_tpg);
1605 +- se_sess->se_node_acl = NULL;
1606 +- kfree(se_nacl);
1607 +- }
1608 + pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
1609 + se_tpg->se_tpg_tfo->get_fabric_name());
1610 + /*
1611 + * If last kref is dropping now for an explicit NodeACL, awake sleeping
1612 + * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
1613 + * removal context from within transport_free_session() code.
1614 ++ *
1615 ++ * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
1616 ++ * to release all remaining generate_node_acl=1 created ACL resources.
1617 + */
1618 +
1619 + transport_free_session(se_sess);
1620 +@@ -3086,7 +3103,6 @@ static void target_tmr_work(struct work_struct *work)
1621 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1622 + goto check_stop;
1623 + }
1624 +- cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
1625 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1626 +
1627 + cmd->se_tfo->queue_tm_rsp(cmd);
1628 +@@ -3099,11 +3115,25 @@ int transport_generic_handle_tmr(
1629 + struct se_cmd *cmd)
1630 + {
1631 + unsigned long flags;
1632 ++ bool aborted = false;
1633 +
1634 + spin_lock_irqsave(&cmd->t_state_lock, flags);
1635 +- cmd->transport_state |= CMD_T_ACTIVE;
1636 ++ if (cmd->transport_state & CMD_T_ABORTED) {
1637 ++ aborted = true;
1638 ++ } else {
1639 ++ cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
1640 ++ cmd->transport_state |= CMD_T_ACTIVE;
1641 ++ }
1642 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
1643 +
1644 ++ if (aborted) {
1645 ++ pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d"
1646 ++ "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function,
1647 ++ cmd->se_tmr_req->ref_task_tag, cmd->tag);
1648 ++ transport_cmd_check_stop_to_fabric(cmd);
1649 ++ return 0;
1650 ++ }
1651 ++
1652 + INIT_WORK(&cmd->work, target_tmr_work);
1653 + queue_work(cmd->se_dev->tmr_wq, &cmd->work);
1654 + return 0;
1655 +diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
1656 +index 094a1440eacb..18848ba8d2ba 100644
1657 +--- a/drivers/target/target_core_xcopy.c
1658 ++++ b/drivers/target/target_core_xcopy.c
1659 +@@ -836,7 +836,7 @@ static void target_xcopy_do_work(struct work_struct *work)
1660 + " CHECK_CONDITION -> sending response\n", rc);
1661 + ec_cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
1662 + }
1663 +- target_complete_cmd(ec_cmd, SAM_STAT_CHECK_CONDITION);
1664 ++ target_complete_cmd(ec_cmd, ec_cmd->scsi_status);
1665 + }
1666 +
1667 + sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
1668 +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1669 +index 7acbd2cf6192..1782804f6c26 100644
1670 +--- a/fs/btrfs/ioctl.c
1671 ++++ b/fs/btrfs/ioctl.c
1672 +@@ -5648,6 +5648,10 @@ long btrfs_ioctl(struct file *file, unsigned int
1673 + #ifdef CONFIG_COMPAT
1674 + long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1675 + {
1676 ++ /*
1677 ++ * These all access 32-bit values anyway so no further
1678 ++ * handling is necessary.
1679 ++ */
1680 + switch (cmd) {
1681 + case FS_IOC32_GETFLAGS:
1682 + cmd = FS_IOC_GETFLAGS;
1683 +@@ -5658,8 +5662,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1684 + case FS_IOC32_GETVERSION:
1685 + cmd = FS_IOC_GETVERSION;
1686 + break;
1687 +- default:
1688 +- return -ENOIOCTLCMD;
1689 + }
1690 +
1691 + return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
1692 +diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
1693 +index da7fbf1cdd56..fa3b155ce7e1 100644
1694 +--- a/include/linux/cpumask.h
1695 ++++ b/include/linux/cpumask.h
1696 +@@ -560,7 +560,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
1697 + static inline int cpumask_parse_user(const char __user *buf, int len,
1698 + struct cpumask *dstp)
1699 + {
1700 +- return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpu_ids);
1701 ++ return bitmap_parse_user(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
1702 + }
1703 +
1704 + /**
1705 +@@ -575,7 +575,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
1706 + struct cpumask *dstp)
1707 + {
1708 + return bitmap_parselist_user(buf, len, cpumask_bits(dstp),
1709 +- nr_cpu_ids);
1710 ++ nr_cpumask_bits);
1711 + }
1712 +
1713 + /**
1714 +@@ -590,7 +590,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
1715 + char *nl = strchr(buf, '\n');
1716 + unsigned int len = nl ? (unsigned int)(nl - buf) : strlen(buf);
1717 +
1718 +- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpu_ids);
1719 ++ return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
1720 + }
1721 +
1722 + /**
1723 +@@ -602,7 +602,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
1724 + */
1725 + static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
1726 + {
1727 +- return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpu_ids);
1728 ++ return bitmap_parselist(buf, cpumask_bits(dstp), nr_cpumask_bits);
1729 + }
1730 +
1731 + /**
1732 +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
1733 +index cd184bdca58f..c92a083bcf16 100644
1734 +--- a/include/linux/hyperv.h
1735 ++++ b/include/linux/hyperv.h
1736 +@@ -128,6 +128,7 @@ struct hv_ring_buffer_info {
1737 + u32 ring_data_startoffset;
1738 + u32 priv_write_index;
1739 + u32 priv_read_index;
1740 ++ u32 cached_read_index;
1741 + };
1742 +
1743 + /*
1744 +@@ -180,6 +181,19 @@ static inline u32 hv_get_bytes_to_write(struct hv_ring_buffer_info *rbi)
1745 + return write;
1746 + }
1747 +
1748 ++static inline u32 hv_get_cached_bytes_to_write(
1749 ++ const struct hv_ring_buffer_info *rbi)
1750 ++{
1751 ++ u32 read_loc, write_loc, dsize, write;
1752 ++
1753 ++ dsize = rbi->ring_datasize;
1754 ++ read_loc = rbi->cached_read_index;
1755 ++ write_loc = rbi->ring_buffer->write_index;
1756 ++
1757 ++ write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
1758 ++ read_loc - write_loc;
1759 ++ return write;
1760 ++}
1761 + /*
1762 + * VMBUS version is 32 bit entity broken up into
1763 + * two 16 bit quantities: major_number. minor_number.
1764 +@@ -1447,6 +1461,7 @@ void hv_event_tasklet_enable(struct vmbus_channel *channel);
1765 +
1766 + void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid);
1767 +
1768 ++void vmbus_setevent(struct vmbus_channel *channel);
1769 + /*
1770 + * Negotiated version with the Host.
1771 + */
1772 +@@ -1479,10 +1494,11 @@ hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info)
1773 + * there is room for the producer to send the pending packet.
1774 + */
1775 +
1776 +-static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
1777 ++static inline void hv_signal_on_read(struct vmbus_channel *channel)
1778 + {
1779 +- u32 cur_write_sz;
1780 ++ u32 cur_write_sz, cached_write_sz;
1781 + u32 pending_sz;
1782 ++ struct hv_ring_buffer_info *rbi = &channel->inbound;
1783 +
1784 + /*
1785 + * Issue a full memory barrier before making the signaling decision.
1786 +@@ -1500,14 +1516,26 @@ static inline bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
1787 + pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
1788 + /* If the other end is not blocked on write don't bother. */
1789 + if (pending_sz == 0)
1790 +- return false;
1791 ++ return;
1792 +
1793 + cur_write_sz = hv_get_bytes_to_write(rbi);
1794 +
1795 +- if (cur_write_sz >= pending_sz)
1796 +- return true;
1797 ++ if (cur_write_sz < pending_sz)
1798 ++ return;
1799 ++
1800 ++ cached_write_sz = hv_get_cached_bytes_to_write(rbi);
1801 ++ if (cached_write_sz < pending_sz)
1802 ++ vmbus_setevent(channel);
1803 ++
1804 ++ return;
1805 ++}
1806 ++
1807 ++static inline void
1808 ++init_cached_read_index(struct vmbus_channel *channel)
1809 ++{
1810 ++ struct hv_ring_buffer_info *rbi = &channel->inbound;
1811 +
1812 +- return false;
1813 ++ rbi->cached_read_index = rbi->ring_buffer->read_index;
1814 + }
1815 +
1816 + /*
1817 +@@ -1571,6 +1599,8 @@ static inline void put_pkt_raw(struct vmbus_channel *channel,
1818 + * This call commits the read index and potentially signals the host.
1819 + * Here is the pattern for using the "in-place" consumption APIs:
1820 + *
1821 ++ * init_cached_read_index();
1822 ++ *
1823 + * while (get_next_pkt_raw() {
1824 + * process the packet "in-place";
1825 + * put_pkt_raw();
1826 +@@ -1589,8 +1619,7 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
1827 + virt_rmb();
1828 + ring_info->ring_buffer->read_index = ring_info->priv_read_index;
1829 +
1830 +- if (hv_need_to_signal_on_read(ring_info))
1831 +- vmbus_set_event(channel);
1832 ++ hv_signal_on_read(channel);
1833 + }
1834 +
1835 +
1836 +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
1837 +index c2119008990a..48bc1ac1da43 100644
1838 +--- a/include/target/target_core_base.h
1839 ++++ b/include/target/target_core_base.h
1840 +@@ -538,6 +538,7 @@ struct se_node_acl {
1841 + char initiatorname[TRANSPORT_IQN_LEN];
1842 + /* Used to signal demo mode created ACL, disabled by default */
1843 + bool dynamic_node_acl;
1844 ++ bool dynamic_stop;
1845 + u32 queue_depth;
1846 + u32 acl_index;
1847 + enum target_prot_type saved_prot_type;
1848 +diff --git a/kernel/events/core.c b/kernel/events/core.c
1849 +index b1cfd7416db0..4b3323151a2f 100644
1850 +--- a/kernel/events/core.c
1851 ++++ b/kernel/events/core.c
1852 +@@ -3461,14 +3461,15 @@ struct perf_read_data {
1853 + int ret;
1854 + };
1855 +
1856 +-static int find_cpu_to_read(struct perf_event *event, int local_cpu)
1857 ++static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
1858 + {
1859 +- int event_cpu = event->oncpu;
1860 + u16 local_pkg, event_pkg;
1861 +
1862 + if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
1863 +- event_pkg = topology_physical_package_id(event_cpu);
1864 +- local_pkg = topology_physical_package_id(local_cpu);
1865 ++ int local_cpu = smp_processor_id();
1866 ++
1867 ++ event_pkg = topology_physical_package_id(event_cpu);
1868 ++ local_pkg = topology_physical_package_id(local_cpu);
1869 +
1870 + if (event_pkg == local_pkg)
1871 + return local_cpu;
1872 +@@ -3598,7 +3599,7 @@ u64 perf_event_read_local(struct perf_event *event)
1873 +
1874 + static int perf_event_read(struct perf_event *event, bool group)
1875 + {
1876 +- int ret = 0, cpu_to_read, local_cpu;
1877 ++ int event_cpu, ret = 0;
1878 +
1879 + /*
1880 + * If event is enabled and currently active on a CPU, update the
1881 +@@ -3611,21 +3612,25 @@ static int perf_event_read(struct perf_event *event, bool group)
1882 + .ret = 0,
1883 + };
1884 +
1885 +- local_cpu = get_cpu();
1886 +- cpu_to_read = find_cpu_to_read(event, local_cpu);
1887 +- put_cpu();
1888 ++ event_cpu = READ_ONCE(event->oncpu);
1889 ++ if ((unsigned)event_cpu >= nr_cpu_ids)
1890 ++ return 0;
1891 ++
1892 ++ preempt_disable();
1893 ++ event_cpu = __perf_event_read_cpu(event, event_cpu);
1894 +
1895 + /*
1896 + * Purposely ignore the smp_call_function_single() return
1897 + * value.
1898 + *
1899 +- * If event->oncpu isn't a valid CPU it means the event got
1900 ++ * If event_cpu isn't a valid CPU it means the event got
1901 + * scheduled out and that will have updated the event count.
1902 + *
1903 + * Therefore, either way, we'll have an up-to-date event count
1904 + * after this.
1905 + */
1906 +- (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
1907 ++ (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
1908 ++ preempt_enable();
1909 + ret = data.ret;
1910 + } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1911 + struct perf_event_context *ctx = event->ctx;
1912 +diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
1913 +index b6e4c16377c7..9c15a9124e83 100644
1914 +--- a/kernel/stacktrace.c
1915 ++++ b/kernel/stacktrace.c
1916 +@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
1917 + if (WARN_ON(!trace->entries))
1918 + return;
1919 +
1920 +- for (i = 0; i < trace->nr_entries; i++) {
1921 +- printk("%*c", 1 + spaces, ' ');
1922 +- print_ip_sym(trace->entries[i]);
1923 +- }
1924 ++ for (i = 0; i < trace->nr_entries; i++)
1925 ++ printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
1926 + }
1927 + EXPORT_SYMBOL_GPL(print_stack_trace);
1928 +
1929 +@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size,
1930 + struct stack_trace *trace, int spaces)
1931 + {
1932 + int i;
1933 +- unsigned long ip;
1934 + int generated;
1935 + int total = 0;
1936 +
1937 +@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size,
1938 + return 0;
1939 +
1940 + for (i = 0; i < trace->nr_entries; i++) {
1941 +- ip = trace->entries[i];
1942 +- generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
1943 +- 1 + spaces, ' ', (void *) ip, (void *) ip);
1944 ++ generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
1945 ++ (void *)trace->entries[i]);
1946 +
1947 + total += generated;
1948 +
1949 +diff --git a/mm/slub.c b/mm/slub.c
1950 +index 2b3e740609e9..7aa0e97af928 100644
1951 +--- a/mm/slub.c
1952 ++++ b/mm/slub.c
1953 +@@ -1419,6 +1419,10 @@ static int init_cache_random_seq(struct kmem_cache *s)
1954 + int err;
1955 + unsigned long i, count = oo_objects(s->oo);
1956 +
1957 ++ /* Bailout if already initialised */
1958 ++ if (s->random_seq)
1959 ++ return 0;
1960 ++
1961 + err = cache_random_seq_create(s, count, GFP_KERNEL);
1962 + if (err) {
1963 + pr_err("SLUB: Unable to initialize free list for %s\n",
1964 +diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
1965 +index 42120d965263..50e1b7f78bd4 100644
1966 +--- a/net/mac80211/mesh.c
1967 ++++ b/net/mac80211/mesh.c
1968 +@@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
1969 + /* fast-forward to vendor IEs */
1970 + offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
1971 +
1972 +- if (offset) {
1973 ++ if (offset < ifmsh->ie_len) {
1974 + len = ifmsh->ie_len - offset;
1975 + data = ifmsh->ie + offset;
1976 + if (skb_tailroom(skb) < len)
1977 +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
1978 +index 1b3c18c2c1ec..cd7a419faa21 100644
1979 +--- a/net/wireless/nl80211.c
1980 ++++ b/net/wireless/nl80211.c
1981 +@@ -5874,6 +5874,7 @@ do { \
1982 + break;
1983 + }
1984 + cfg->ht_opmode = ht_opmode;
1985 ++ mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
1986 + }
1987 + FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
1988 + 1, 65535, mask,
1989 +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
1990 +index 09fd6108e421..c2da45ae5b2a 100644
1991 +--- a/security/selinux/hooks.c
1992 ++++ b/security/selinux/hooks.c
1993 +@@ -5858,7 +5858,7 @@ static int selinux_setprocattr(struct task_struct *p,
1994 + return error;
1995 +
1996 + /* Obtain a SID for the context, if one was specified. */
1997 +- if (size && str[1] && str[1] != '\n') {
1998 ++ if (size && str[0] && str[0] != '\n') {
1999 + if (str[size-1] == '\n') {
2000 + str[size-1] = 0;
2001 + size--;
2002 +diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
2003 +index c850345c43b5..dfa5156f3585 100644
2004 +--- a/sound/core/seq/seq_memory.c
2005 ++++ b/sound/core/seq/seq_memory.c
2006 +@@ -419,7 +419,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
2007 + {
2008 + unsigned long flags;
2009 + struct snd_seq_event_cell *ptr;
2010 +- int max_count = 5 * HZ;
2011 +
2012 + if (snd_BUG_ON(!pool))
2013 + return -EINVAL;
2014 +@@ -432,14 +431,8 @@ int snd_seq_pool_done(struct snd_seq_pool *pool)
2015 + if (waitqueue_active(&pool->output_sleep))
2016 + wake_up(&pool->output_sleep);
2017 +
2018 +- while (atomic_read(&pool->counter) > 0) {
2019 +- if (max_count == 0) {
2020 +- pr_warn("ALSA: snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter));
2021 +- break;
2022 +- }
2023 ++ while (atomic_read(&pool->counter) > 0)
2024 + schedule_timeout_uninterruptible(1);
2025 +- max_count--;
2026 +- }
2027 +
2028 + /* release all resources */
2029 + spin_lock_irqsave(&pool->lock, flags);
2030 +diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
2031 +index 0bec02e89d51..450c5187eecb 100644
2032 +--- a/sound/core/seq/seq_queue.c
2033 ++++ b/sound/core/seq/seq_queue.c
2034 +@@ -181,6 +181,8 @@ void __exit snd_seq_queues_delete(void)
2035 + }
2036 + }
2037 +
2038 ++static void queue_use(struct snd_seq_queue *queue, int client, int use);
2039 ++
2040 + /* allocate a new queue -
2041 + * return queue index value or negative value for error
2042 + */
2043 +@@ -192,11 +194,11 @@ int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
2044 + if (q == NULL)
2045 + return -ENOMEM;
2046 + q->info_flags = info_flags;
2047 ++ queue_use(q, client, 1);
2048 + if (queue_list_add(q) < 0) {
2049 + queue_delete(q);
2050 + return -ENOMEM;
2051 + }
2052 +- snd_seq_queue_use(q->queue, client, 1); /* use this queue */
2053 + return q->queue;
2054 + }
2055 +
2056 +@@ -502,19 +504,9 @@ int snd_seq_queue_timer_set_tempo(int queueid, int client,
2057 + return result;
2058 + }
2059 +
2060 +-
2061 +-/* use or unuse this queue -
2062 +- * if it is the first client, starts the timer.
2063 +- * if it is not longer used by any clients, stop the timer.
2064 +- */
2065 +-int snd_seq_queue_use(int queueid, int client, int use)
2066 ++/* use or unuse this queue */
2067 ++static void queue_use(struct snd_seq_queue *queue, int client, int use)
2068 + {
2069 +- struct snd_seq_queue *queue;
2070 +-
2071 +- queue = queueptr(queueid);
2072 +- if (queue == NULL)
2073 +- return -EINVAL;
2074 +- mutex_lock(&queue->timer_mutex);
2075 + if (use) {
2076 + if (!test_and_set_bit(client, queue->clients_bitmap))
2077 + queue->clients++;
2078 +@@ -529,6 +521,21 @@ int snd_seq_queue_use(int queueid, int client, int use)
2079 + } else {
2080 + snd_seq_timer_close(queue);
2081 + }
2082 ++}
2083 ++
2084 ++/* use or unuse this queue -
2085 ++ * if it is the first client, starts the timer.
2086 ++ * if it is not longer used by any clients, stop the timer.
2087 ++ */
2088 ++int snd_seq_queue_use(int queueid, int client, int use)
2089 ++{
2090 ++ struct snd_seq_queue *queue;
2091 ++
2092 ++ queue = queueptr(queueid);
2093 ++ if (queue == NULL)
2094 ++ return -EINVAL;
2095 ++ mutex_lock(&queue->timer_mutex);
2096 ++ queue_use(queue, client, use);
2097 + mutex_unlock(&queue->timer_mutex);
2098 + queuefree(queue);
2099 + return 0;
2100 +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
2101 +index 56e5204ac9c1..4bf48336b0fc 100644
2102 +--- a/sound/pci/hda/patch_hdmi.c
2103 ++++ b/sound/pci/hda/patch_hdmi.c
2104 +@@ -3638,6 +3638,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP", patch_nvhdmi),
2105 + HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP", patch_nvhdmi),
2106 + HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP", patch_nvhdmi),
2107 + HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP", patch_nvhdmi),
2108 ++HDA_CODEC_ENTRY(0x10de0080, "GPU 80 HDMI/DP", patch_nvhdmi),
2109 + HDA_CODEC_ENTRY(0x10de0082, "GPU 82 HDMI/DP", patch_nvhdmi),
2110 + HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP", patch_nvhdmi),
2111 + HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI", patch_nvhdmi_2ch),
2112 +diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c
2113 +index 90009c0b3a92..ab3c280a23d1 100644
2114 +--- a/sound/usb/line6/driver.c
2115 ++++ b/sound/usb/line6/driver.c
2116 +@@ -754,8 +754,9 @@ int line6_probe(struct usb_interface *interface,
2117 + goto error;
2118 + }
2119 +
2120 ++ line6_get_interval(line6);
2121 ++
2122 + if (properties->capabilities & LINE6_CAP_CONTROL) {
2123 +- line6_get_interval(line6);
2124 + ret = line6_init_cap_control(line6);
2125 + if (ret < 0)
2126 + goto error;
2127 +diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
2128 +index 9ff0db4e2d0c..933aeec46f4a 100644
2129 +--- a/tools/perf/builtin-diff.c
2130 ++++ b/tools/perf/builtin-diff.c
2131 +@@ -1199,7 +1199,7 @@ static int ui_init(void)
2132 + BUG_ON(1);
2133 + }
2134 +
2135 +- perf_hpp__register_sort_field(fmt);
2136 ++ perf_hpp__prepend_sort_field(fmt);
2137 + return 0;
2138 + }
2139 +
2140 +diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
2141 +index 37388397b5bc..18cfcdc90356 100644
2142 +--- a/tools/perf/ui/hist.c
2143 ++++ b/tools/perf/ui/hist.c
2144 +@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
2145 + list_add_tail(&format->sort_list, &list->sorts);
2146 + }
2147 +
2148 ++void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
2149 ++ struct perf_hpp_fmt *format)
2150 ++{
2151 ++ list_add(&format->sort_list, &list->sorts);
2152 ++}
2153 ++
2154 + void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
2155 + {
2156 + list_del(&format->list);
2157 +@@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
2158 + perf_hpp_list__for_each_sort_list(list, fmt) {
2159 + struct perf_hpp_fmt *pos;
2160 +
2161 ++ /* skip sort-only fields ("sort_compute" in perf diff) */
2162 ++ if (!fmt->entry && !fmt->color)
2163 ++ continue;
2164 ++
2165 + perf_hpp_list__for_each_format(list, pos) {
2166 + if (fmt_equal(fmt, pos))
2167 + goto next;
2168 +diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
2169 +index 9928fed8bc59..a440a04a29ff 100644
2170 +--- a/tools/perf/util/hist.h
2171 ++++ b/tools/perf/util/hist.h
2172 +@@ -282,6 +282,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
2173 + struct perf_hpp_fmt *format);
2174 + void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
2175 + struct perf_hpp_fmt *format);
2176 ++void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
2177 ++ struct perf_hpp_fmt *format);
2178 +
2179 + static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
2180 + {
2181 +@@ -293,6 +295,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
2182 + perf_hpp_list__register_sort_field(&perf_hpp_list, format);
2183 + }
2184 +
2185 ++static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
2186 ++{
2187 ++ perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
2188 ++}
2189 ++
2190 + #define perf_hpp_list__for_each_format(_list, format) \
2191 + list_for_each_entry(format, &(_list)->fields, list)
2192 +