Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Fri, 06 Sep 2019 17:18:48
Message-Id: 1567790308.c90c7bf33f875cc11c14073b96755d657309bacb.mpagano@gentoo
1 commit: c90c7bf33f875cc11c14073b96755d657309bacb
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Fri Sep 6 17:18:28 2019 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Fri Sep 6 17:18:28 2019 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c90c7bf3
7
8 Linux patch 4.9.191
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1190_linux-4.9.191.patch | 2717 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2721 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 9555d77..04712fb 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -803,6 +803,10 @@ Patch: 1189_linux-4.9.190.patch
21 From: http://www.kernel.org
22 Desc: Linux 4.9.190
23
24 +Patch: 1190_linux-4.9.191.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 4.9.191
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1190_linux-4.9.191.patch b/1190_linux-4.9.191.patch
33 new file mode 100644
34 index 0000000..0e86d92
35 --- /dev/null
36 +++ b/1190_linux-4.9.191.patch
37 @@ -0,0 +1,2717 @@
38 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
39 +index f4f0a1b9ba29..61b73e42f488 100644
40 +--- a/Documentation/kernel-parameters.txt
41 ++++ b/Documentation/kernel-parameters.txt
42 +@@ -3829,6 +3829,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
43 + Run specified binary instead of /init from the ramdisk,
44 + used for early userspace startup. See initrd.
45 +
46 ++ rdrand= [X86]
47 ++ force - Override the decision by the kernel to hide the
48 ++ advertisement of RDRAND support (this affects
49 ++ certain AMD processors because of buggy BIOS
50 ++ support, specifically around the suspend/resume
51 ++ path).
52 ++
53 + reboot= [KNL]
54 + Format (x86 or x86_64):
55 + [w[arm] | c[old] | h[ard] | s[oft] | g[pio]] \
56 +diff --git a/Makefile b/Makefile
57 +index 4b6cf4641eba..311e861afb15 100644
58 +--- a/Makefile
59 ++++ b/Makefile
60 +@@ -1,6 +1,6 @@
61 + VERSION = 4
62 + PATCHLEVEL = 9
63 +-SUBLEVEL = 190
64 ++SUBLEVEL = 191
65 + EXTRAVERSION =
66 + NAME = Roaring Lionus
67 +
68 +diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
69 +index c5bc344fc745..73039746ae36 100644
70 +--- a/arch/mips/kernel/i8253.c
71 ++++ b/arch/mips/kernel/i8253.c
72 +@@ -31,7 +31,8 @@ void __init setup_pit_timer(void)
73 +
74 + static int __init init_pit_clocksource(void)
75 + {
76 +- if (num_possible_cpus() > 1) /* PIT does not scale! */
77 ++ if (num_possible_cpus() > 1 || /* PIT does not scale! */
78 ++ !clockevent_state_periodic(&i8253_clockevent))
79 + return 0;
80 +
81 + return clocksource_i8253_init();
82 +diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
83 +index 4a8cb8d7cbd5..0232b5a2a2d9 100644
84 +--- a/arch/x86/include/asm/bootparam_utils.h
85 ++++ b/arch/x86/include/asm/bootparam_utils.h
86 +@@ -17,6 +17,20 @@
87 + * Note: efi_info is commonly left uninitialized, but that field has a
88 + * private magic, so it is better to leave it unchanged.
89 + */
90 ++
91 ++#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
92 ++
93 ++#define BOOT_PARAM_PRESERVE(struct_member) \
94 ++ { \
95 ++ .start = offsetof(struct boot_params, struct_member), \
96 ++ .len = sizeof_mbr(struct boot_params, struct_member), \
97 ++ }
98 ++
99 ++struct boot_params_to_save {
100 ++ unsigned int start;
101 ++ unsigned int len;
102 ++};
103 ++
104 + static void sanitize_boot_params(struct boot_params *boot_params)
105 + {
106 + /*
107 +@@ -35,19 +49,39 @@ static void sanitize_boot_params(struct boot_params *boot_params)
108 + */
109 + if (boot_params->sentinel) {
110 + /* fields in boot_params are left uninitialized, clear them */
111 +- memset(&boot_params->ext_ramdisk_image, 0,
112 +- (char *)&boot_params->efi_info -
113 +- (char *)&boot_params->ext_ramdisk_image);
114 +- memset(&boot_params->kbd_status, 0,
115 +- (char *)&boot_params->hdr -
116 +- (char *)&boot_params->kbd_status);
117 +- memset(&boot_params->_pad7[0], 0,
118 +- (char *)&boot_params->edd_mbr_sig_buffer[0] -
119 +- (char *)&boot_params->_pad7[0]);
120 +- memset(&boot_params->_pad8[0], 0,
121 +- (char *)&boot_params->eddbuf[0] -
122 +- (char *)&boot_params->_pad8[0]);
123 +- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
124 ++ static struct boot_params scratch;
125 ++ char *bp_base = (char *)boot_params;
126 ++ char *save_base = (char *)&scratch;
127 ++ int i;
128 ++
129 ++ const struct boot_params_to_save to_save[] = {
130 ++ BOOT_PARAM_PRESERVE(screen_info),
131 ++ BOOT_PARAM_PRESERVE(apm_bios_info),
132 ++ BOOT_PARAM_PRESERVE(tboot_addr),
133 ++ BOOT_PARAM_PRESERVE(ist_info),
134 ++ BOOT_PARAM_PRESERVE(hd0_info),
135 ++ BOOT_PARAM_PRESERVE(hd1_info),
136 ++ BOOT_PARAM_PRESERVE(sys_desc_table),
137 ++ BOOT_PARAM_PRESERVE(olpc_ofw_header),
138 ++ BOOT_PARAM_PRESERVE(efi_info),
139 ++ BOOT_PARAM_PRESERVE(alt_mem_k),
140 ++ BOOT_PARAM_PRESERVE(scratch),
141 ++ BOOT_PARAM_PRESERVE(e820_entries),
142 ++ BOOT_PARAM_PRESERVE(eddbuf_entries),
143 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
144 ++ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
145 ++ BOOT_PARAM_PRESERVE(hdr),
146 ++ BOOT_PARAM_PRESERVE(eddbuf),
147 ++ };
148 ++
149 ++ memset(&scratch, 0, sizeof(scratch));
150 ++
151 ++ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
152 ++ memcpy(save_base + to_save[i].start,
153 ++ bp_base + to_save[i].start, to_save[i].len);
154 ++ }
155 ++
156 ++ memcpy(boot_params, save_base, sizeof(*boot_params));
157 + }
158 + }
159 +
160 +diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
161 +index 38f94d07920d..86166868db8c 100644
162 +--- a/arch/x86/include/asm/msr-index.h
163 ++++ b/arch/x86/include/asm/msr-index.h
164 +@@ -313,6 +313,7 @@
165 + #define MSR_AMD64_PATCH_LEVEL 0x0000008b
166 + #define MSR_AMD64_TSC_RATIO 0xc0000104
167 + #define MSR_AMD64_NB_CFG 0xc001001f
168 ++#define MSR_AMD64_CPUID_FN_1 0xc0011004
169 + #define MSR_AMD64_PATCH_LOADER 0xc0010020
170 + #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
171 + #define MSR_AMD64_OSVW_STATUS 0xc0010141
172 +diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
173 +index 031a58e84e5b..10a48505abb5 100644
174 +--- a/arch/x86/include/asm/nospec-branch.h
175 ++++ b/arch/x86/include/asm/nospec-branch.h
176 +@@ -196,7 +196,7 @@
177 + " lfence;\n" \
178 + " jmp 902b;\n" \
179 + " .align 16\n" \
180 +- "903: addl $4, %%esp;\n" \
181 ++ "903: lea 4(%%esp), %%esp;\n" \
182 + " pushl %[thunk_target];\n" \
183 + " ret;\n" \
184 + " .align 16\n" \
185 +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
186 +index 2b5d686ea9f3..ea78a8438a8a 100644
187 +--- a/arch/x86/include/asm/ptrace.h
188 ++++ b/arch/x86/include/asm/ptrace.h
189 +@@ -115,9 +115,9 @@ static inline int v8086_mode(struct pt_regs *regs)
190 + #endif
191 + }
192 +
193 +-#ifdef CONFIG_X86_64
194 + static inline bool user_64bit_mode(struct pt_regs *regs)
195 + {
196 ++#ifdef CONFIG_X86_64
197 + #ifndef CONFIG_PARAVIRT
198 + /*
199 + * On non-paravirt systems, this is the only long mode CPL 3
200 +@@ -128,8 +128,12 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
201 + /* Headers are too twisted for this to go in paravirt.h. */
202 + return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
203 + #endif
204 ++#else /* !CONFIG_X86_64 */
205 ++ return false;
206 ++#endif
207 + }
208 +
209 ++#ifdef CONFIG_X86_64
210 + #define current_user_stack_pointer() current_pt_regs()->sp
211 + #define compat_user_stack_pointer() current_pt_regs()->sp
212 + #endif
213 +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
214 +index cc9a6f680225..37666c536741 100644
215 +--- a/arch/x86/kernel/apic/apic.c
216 ++++ b/arch/x86/kernel/apic/apic.c
217 +@@ -629,7 +629,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
218 + static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
219 +
220 + /*
221 +- * Temporary interrupt handler.
222 ++ * Temporary interrupt handler and polled calibration function.
223 + */
224 + static void __init lapic_cal_handler(struct clock_event_device *dev)
225 + {
226 +@@ -713,7 +713,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
227 + static int __init calibrate_APIC_clock(void)
228 + {
229 + struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
230 +- void (*real_handler)(struct clock_event_device *dev);
231 ++ u64 tsc_perj = 0, tsc_start = 0;
232 ++ unsigned long jif_start;
233 + unsigned long deltaj;
234 + long delta, deltatsc;
235 + int pm_referenced = 0;
236 +@@ -742,28 +743,64 @@ static int __init calibrate_APIC_clock(void)
237 + apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
238 + "calibrating APIC timer ...\n");
239 +
240 ++ /*
241 ++ * There are platforms w/o global clockevent devices. Instead of
242 ++ * making the calibration conditional on that, use a polling based
243 ++ * approach everywhere.
244 ++ */
245 + local_irq_disable();
246 +
247 +- /* Replace the global interrupt handler */
248 +- real_handler = global_clock_event->event_handler;
249 +- global_clock_event->event_handler = lapic_cal_handler;
250 +-
251 + /*
252 + * Setup the APIC counter to maximum. There is no way the lapic
253 + * can underflow in the 100ms detection time frame
254 + */
255 + __setup_APIC_LVTT(0xffffffff, 0, 0);
256 +
257 +- /* Let the interrupts run */
258 ++ /*
259 ++ * Methods to terminate the calibration loop:
260 ++ * 1) Global clockevent if available (jiffies)
261 ++ * 2) TSC if available and frequency is known
262 ++ */
263 ++ jif_start = READ_ONCE(jiffies);
264 ++
265 ++ if (tsc_khz) {
266 ++ tsc_start = rdtsc();
267 ++ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
268 ++ }
269 ++
270 ++ /*
271 ++ * Enable interrupts so the tick can fire, if a global
272 ++ * clockevent device is available
273 ++ */
274 + local_irq_enable();
275 +
276 +- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
277 +- cpu_relax();
278 ++ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
279 ++ /* Wait for a tick to elapse */
280 ++ while (1) {
281 ++ if (tsc_khz) {
282 ++ u64 tsc_now = rdtsc();
283 ++ if ((tsc_now - tsc_start) >= tsc_perj) {
284 ++ tsc_start += tsc_perj;
285 ++ break;
286 ++ }
287 ++ } else {
288 ++ unsigned long jif_now = READ_ONCE(jiffies);
289 +
290 +- local_irq_disable();
291 ++ if (time_after(jif_now, jif_start)) {
292 ++ jif_start = jif_now;
293 ++ break;
294 ++ }
295 ++ }
296 ++ cpu_relax();
297 ++ }
298 +
299 +- /* Restore the real event handler */
300 +- global_clock_event->event_handler = real_handler;
301 ++ /* Invoke the calibration routine */
302 ++ local_irq_disable();
303 ++ lapic_cal_handler(NULL);
304 ++ local_irq_enable();
305 ++ }
306 ++
307 ++ local_irq_disable();
308 +
309 + /* Build delta t1-t2 as apic timer counts down */
310 + delta = lapic_cal_t1 - lapic_cal_t2;
311 +@@ -814,10 +851,11 @@ static int __init calibrate_APIC_clock(void)
312 + levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
313 +
314 + /*
315 +- * PM timer calibration failed or not turned on
316 +- * so lets try APIC timer based calibration
317 ++ * PM timer calibration failed or not turned on so lets try APIC
318 ++ * timer based calibration, if a global clockevent device is
319 ++ * available.
320 + */
321 +- if (!pm_referenced) {
322 ++ if (!pm_referenced && global_clock_event) {
323 + apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
324 +
325 + /*
326 +@@ -1029,6 +1067,10 @@ void clear_local_APIC(void)
327 + apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
328 + v = apic_read(APIC_LVT1);
329 + apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
330 ++ if (!x2apic_enabled()) {
331 ++ v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
332 ++ apic_write(APIC_LDR, v);
333 ++ }
334 + if (maxlvt >= 4) {
335 + v = apic_read(APIC_LVTPC);
336 + apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
337 +diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
338 +index 56012010332c..76fe153ccc6d 100644
339 +--- a/arch/x86/kernel/apic/bigsmp_32.c
340 ++++ b/arch/x86/kernel/apic/bigsmp_32.c
341 +@@ -37,32 +37,12 @@ static int bigsmp_early_logical_apicid(int cpu)
342 + return early_per_cpu(x86_cpu_to_apicid, cpu);
343 + }
344 +
345 +-static inline unsigned long calculate_ldr(int cpu)
346 +-{
347 +- unsigned long val, id;
348 +-
349 +- val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
350 +- id = per_cpu(x86_bios_cpu_apicid, cpu);
351 +- val |= SET_APIC_LOGICAL_ID(id);
352 +-
353 +- return val;
354 +-}
355 +-
356 + /*
357 +- * Set up the logical destination ID.
358 +- *
359 +- * Intel recommends to set DFR, LDR and TPR before enabling
360 +- * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
361 +- * document number 292116). So here it goes...
362 ++ * bigsmp enables physical destination mode
363 ++ * and doesn't use LDR and DFR
364 + */
365 + static void bigsmp_init_apic_ldr(void)
366 + {
367 +- unsigned long val;
368 +- int cpu = smp_processor_id();
369 +-
370 +- apic_write(APIC_DFR, APIC_DFR_FLAT);
371 +- val = calculate_ldr(cpu);
372 +- apic_write(APIC_LDR, val);
373 + }
374 +
375 + static void bigsmp_setup_apic_routing(void)
376 +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
377 +index 52a65f14db06..9428b54fff66 100644
378 +--- a/arch/x86/kernel/cpu/amd.c
379 ++++ b/arch/x86/kernel/cpu/amd.c
380 +@@ -746,6 +746,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
381 + msr_set_bit(MSR_AMD64_DE_CFG, 31);
382 + }
383 +
384 ++static bool rdrand_force;
385 ++
386 ++static int __init rdrand_cmdline(char *str)
387 ++{
388 ++ if (!str)
389 ++ return -EINVAL;
390 ++
391 ++ if (!strcmp(str, "force"))
392 ++ rdrand_force = true;
393 ++ else
394 ++ return -EINVAL;
395 ++
396 ++ return 0;
397 ++}
398 ++early_param("rdrand", rdrand_cmdline);
399 ++
400 ++static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
401 ++{
402 ++ /*
403 ++ * Saving of the MSR used to hide the RDRAND support during
404 ++ * suspend/resume is done by arch/x86/power/cpu.c, which is
405 ++ * dependent on CONFIG_PM_SLEEP.
406 ++ */
407 ++ if (!IS_ENABLED(CONFIG_PM_SLEEP))
408 ++ return;
409 ++
410 ++ /*
411 ++ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
412 ++ * RDRAND support using the CPUID function directly.
413 ++ */
414 ++ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
415 ++ return;
416 ++
417 ++ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
418 ++
419 ++ /*
420 ++ * Verify that the CPUID change has occurred in case the kernel is
421 ++ * running virtualized and the hypervisor doesn't support the MSR.
422 ++ */
423 ++ if (cpuid_ecx(1) & BIT(30)) {
424 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
425 ++ return;
426 ++ }
427 ++
428 ++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
429 ++ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
430 ++}
431 ++
432 ++static void init_amd_jg(struct cpuinfo_x86 *c)
433 ++{
434 ++ /*
435 ++ * Some BIOS implementations do not restore proper RDRAND support
436 ++ * across suspend and resume. Check on whether to hide the RDRAND
437 ++ * instruction support via CPUID.
438 ++ */
439 ++ clear_rdrand_cpuid_bit(c);
440 ++}
441 ++
442 + static void init_amd_bd(struct cpuinfo_x86 *c)
443 + {
444 + u64 value;
445 +@@ -760,6 +818,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
446 + wrmsrl_safe(MSR_F15H_IC_CFG, value);
447 + }
448 + }
449 ++
450 ++ /*
451 ++ * Some BIOS implementations do not restore proper RDRAND support
452 ++ * across suspend and resume. Check on whether to hide the RDRAND
453 ++ * instruction support via CPUID.
454 ++ */
455 ++ clear_rdrand_cpuid_bit(c);
456 + }
457 +
458 + static void init_amd_zn(struct cpuinfo_x86 *c)
459 +@@ -804,6 +869,7 @@ static void init_amd(struct cpuinfo_x86 *c)
460 + case 0x10: init_amd_gh(c); break;
461 + case 0x12: init_amd_ln(c); break;
462 + case 0x15: init_amd_bd(c); break;
463 ++ case 0x16: init_amd_jg(c); break;
464 + case 0x17: init_amd_zn(c); break;
465 + }
466 +
467 +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
468 +index 8d20fb09722c..7f377f8792aa 100644
469 +--- a/arch/x86/kernel/ptrace.c
470 ++++ b/arch/x86/kernel/ptrace.c
471 +@@ -651,11 +651,10 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
472 + {
473 + struct thread_struct *thread = &tsk->thread;
474 + unsigned long val = 0;
475 +- int index = n;
476 +
477 + if (n < HBP_NUM) {
478 ++ int index = array_index_nospec(n, HBP_NUM);
479 + struct perf_event *bp = thread->ptrace_bps[index];
480 +- index = array_index_nospec(index, HBP_NUM);
481 +
482 + if (bp)
483 + val = bp->hw.info.address;
484 +diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
485 +index e78a6b1db74b..e35466afe989 100644
486 +--- a/arch/x86/kernel/uprobes.c
487 ++++ b/arch/x86/kernel/uprobes.c
488 +@@ -514,9 +514,12 @@ struct uprobe_xol_ops {
489 + void (*abort)(struct arch_uprobe *, struct pt_regs *);
490 + };
491 +
492 +-static inline int sizeof_long(void)
493 ++static inline int sizeof_long(struct pt_regs *regs)
494 + {
495 +- return in_ia32_syscall() ? 4 : 8;
496 ++ /*
497 ++ * Check registers for mode as in_xxx_syscall() does not apply here.
498 ++ */
499 ++ return user_64bit_mode(regs) ? 8 : 4;
500 + }
501 +
502 + static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
503 +@@ -527,9 +530,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
504 +
505 + static int push_ret_address(struct pt_regs *regs, unsigned long ip)
506 + {
507 +- unsigned long new_sp = regs->sp - sizeof_long();
508 ++ unsigned long new_sp = regs->sp - sizeof_long(regs);
509 +
510 +- if (copy_to_user((void __user *)new_sp, &ip, sizeof_long()))
511 ++ if (copy_to_user((void __user *)new_sp, &ip, sizeof_long(regs)))
512 + return -EFAULT;
513 +
514 + regs->sp = new_sp;
515 +@@ -562,7 +565,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
516 + long correction = utask->vaddr - utask->xol_vaddr;
517 + regs->ip += correction;
518 + } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
519 +- regs->sp += sizeof_long(); /* Pop incorrect return address */
520 ++ regs->sp += sizeof_long(regs); /* Pop incorrect return address */
521 + if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
522 + return -ERESTART;
523 + }
524 +@@ -671,7 +674,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
525 + * "call" insn was executed out-of-line. Just restore ->sp and restart.
526 + * We could also restore ->ip and try to call branch_emulate_op() again.
527 + */
528 +- regs->sp += sizeof_long();
529 ++ regs->sp += sizeof_long(regs);
530 + return -ERESTART;
531 + }
532 +
533 +@@ -962,7 +965,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
534 + unsigned long
535 + arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
536 + {
537 +- int rasize = sizeof_long(), nleft;
538 ++ int rasize = sizeof_long(regs), nleft;
539 + unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
540 +
541 + if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
542 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
543 +index 8b06700d1676..bbecbf2b1f5e 100644
544 +--- a/arch/x86/kvm/x86.c
545 ++++ b/arch/x86/kvm/x86.c
546 +@@ -5823,12 +5823,13 @@ restart:
547 + unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
548 + toggle_interruptibility(vcpu, ctxt->interruptibility);
549 + vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
550 +- kvm_rip_write(vcpu, ctxt->eip);
551 +- if (r == EMULATE_DONE && ctxt->tf)
552 +- kvm_vcpu_do_singlestep(vcpu, &r);
553 + if (!ctxt->have_exception ||
554 +- exception_type(ctxt->exception.vector) == EXCPT_TRAP)
555 ++ exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
556 ++ kvm_rip_write(vcpu, ctxt->eip);
557 ++ if (r == EMULATE_DONE && ctxt->tf)
558 ++ kvm_vcpu_do_singlestep(vcpu, &r);
559 + __kvm_set_rflags(vcpu, ctxt->eflags);
560 ++ }
561 +
562 + /*
563 + * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
564 +diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
565 +index 2dd1fe13a37b..19f707992db2 100644
566 +--- a/arch/x86/lib/cpu.c
567 ++++ b/arch/x86/lib/cpu.c
568 +@@ -1,5 +1,6 @@
569 + #include <linux/types.h>
570 + #include <linux/export.h>
571 ++#include <asm/cpu.h>
572 +
573 + unsigned int x86_family(unsigned int sig)
574 + {
575 +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
576 +index 29dc59baf0c2..c8f947a4aaf2 100644
577 +--- a/arch/x86/power/cpu.c
578 ++++ b/arch/x86/power/cpu.c
579 +@@ -13,6 +13,7 @@
580 + #include <linux/smp.h>
581 + #include <linux/perf_event.h>
582 + #include <linux/tboot.h>
583 ++#include <linux/dmi.h>
584 +
585 + #include <asm/pgtable.h>
586 + #include <asm/proto.h>
587 +@@ -24,7 +25,7 @@
588 + #include <asm/debugreg.h>
589 + #include <asm/cpu.h>
590 + #include <asm/mmu_context.h>
591 +-#include <linux/dmi.h>
592 ++#include <asm/cpu_device_id.h>
593 +
594 + #ifdef CONFIG_X86_32
595 + __visible unsigned long saved_context_ebx;
596 +@@ -391,15 +392,14 @@ static int __init bsp_pm_check_init(void)
597 +
598 + core_initcall(bsp_pm_check_init);
599 +
600 +-static int msr_init_context(const u32 *msr_id, const int total_num)
601 ++static int msr_build_context(const u32 *msr_id, const int num)
602 + {
603 +- int i = 0;
604 ++ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
605 + struct saved_msr *msr_array;
606 ++ int total_num;
607 ++ int i, j;
608 +
609 +- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
610 +- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
611 +- return -EINVAL;
612 +- }
613 ++ total_num = saved_msrs->num + num;
614 +
615 + msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
616 + if (!msr_array) {
617 +@@ -407,19 +407,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
618 + return -ENOMEM;
619 + }
620 +
621 +- for (i = 0; i < total_num; i++) {
622 +- msr_array[i].info.msr_no = msr_id[i];
623 ++ if (saved_msrs->array) {
624 ++ /*
625 ++ * Multiple callbacks can invoke this function, so copy any
626 ++ * MSR save requests from previous invocations.
627 ++ */
628 ++ memcpy(msr_array, saved_msrs->array,
629 ++ sizeof(struct saved_msr) * saved_msrs->num);
630 ++
631 ++ kfree(saved_msrs->array);
632 ++ }
633 ++
634 ++ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
635 ++ msr_array[i].info.msr_no = msr_id[j];
636 + msr_array[i].valid = false;
637 + msr_array[i].info.reg.q = 0;
638 + }
639 +- saved_context.saved_msrs.num = total_num;
640 +- saved_context.saved_msrs.array = msr_array;
641 ++ saved_msrs->num = total_num;
642 ++ saved_msrs->array = msr_array;
643 +
644 + return 0;
645 + }
646 +
647 + /*
648 +- * The following section is a quirk framework for problematic BIOSen:
649 ++ * The following sections are a quirk framework for problematic BIOSen:
650 + * Sometimes MSRs are modified by the BIOSen after suspended to
651 + * RAM, this might cause unexpected behavior after wakeup.
652 + * Thus we save/restore these specified MSRs across suspend/resume
653 +@@ -434,7 +445,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
654 + u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
655 +
656 + pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
657 +- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
658 ++ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
659 + }
660 +
661 + static struct dmi_system_id msr_save_dmi_table[] = {
662 +@@ -449,9 +460,58 @@ static struct dmi_system_id msr_save_dmi_table[] = {
663 + {}
664 + };
665 +
666 ++static int msr_save_cpuid_features(const struct x86_cpu_id *c)
667 ++{
668 ++ u32 cpuid_msr_id[] = {
669 ++ MSR_AMD64_CPUID_FN_1,
670 ++ };
671 ++
672 ++ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
673 ++ c->family);
674 ++
675 ++ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
676 ++}
677 ++
678 ++static const struct x86_cpu_id msr_save_cpu_table[] = {
679 ++ {
680 ++ .vendor = X86_VENDOR_AMD,
681 ++ .family = 0x15,
682 ++ .model = X86_MODEL_ANY,
683 ++ .feature = X86_FEATURE_ANY,
684 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
685 ++ },
686 ++ {
687 ++ .vendor = X86_VENDOR_AMD,
688 ++ .family = 0x16,
689 ++ .model = X86_MODEL_ANY,
690 ++ .feature = X86_FEATURE_ANY,
691 ++ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
692 ++ },
693 ++ {}
694 ++};
695 ++
696 ++typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
697 ++static int pm_cpu_check(const struct x86_cpu_id *c)
698 ++{
699 ++ const struct x86_cpu_id *m;
700 ++ int ret = 0;
701 ++
702 ++ m = x86_match_cpu(msr_save_cpu_table);
703 ++ if (m) {
704 ++ pm_cpu_match_t fn;
705 ++
706 ++ fn = (pm_cpu_match_t)m->driver_data;
707 ++ ret = fn(m);
708 ++ }
709 ++
710 ++ return ret;
711 ++}
712 ++
713 + static int pm_check_save_msr(void)
714 + {
715 + dmi_check_system(msr_save_dmi_table);
716 ++ pm_cpu_check(msr_save_cpu_table);
717 ++
718 + return 0;
719 + }
720 +
721 +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
722 +index 8d22acdf90f0..0e2bc5b9a78c 100644
723 +--- a/drivers/ata/libata-sff.c
724 ++++ b/drivers/ata/libata-sff.c
725 +@@ -703,6 +703,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
726 + unsigned int offset;
727 + unsigned char *buf;
728 +
729 ++ if (!qc->cursg) {
730 ++ qc->curbytes = qc->nbytes;
731 ++ return;
732 ++ }
733 + if (qc->curbytes == qc->nbytes - qc->sect_size)
734 + ap->hsm_task_state = HSM_ST_LAST;
735 +
736 +@@ -742,6 +746,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
737 +
738 + if (qc->cursg_ofs == qc->cursg->length) {
739 + qc->cursg = sg_next(qc->cursg);
740 ++ if (!qc->cursg)
741 ++ ap->hsm_task_state = HSM_ST_LAST;
742 + qc->cursg_ofs = 0;
743 + }
744 + }
745 +diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
746 +index 5dfe6e8af140..ad736d7de838 100644
747 +--- a/drivers/block/xen-blkback/xenbus.c
748 ++++ b/drivers/block/xen-blkback/xenbus.c
749 +@@ -967,6 +967,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
750 + }
751 + blkif->nr_ring_pages = nr_grefs;
752 +
753 ++ err = -ENOMEM;
754 + for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
755 + req = kzalloc(sizeof(*req), GFP_KERNEL);
756 + if (!req)
757 +@@ -989,7 +990,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
758 + err = xen_blkif_map(ring, ring_ref, nr_grefs, evtchn);
759 + if (err) {
760 + xenbus_dev_fatal(dev, err, "mapping ring-ref port %u", evtchn);
761 +- return err;
762 ++ goto fail;
763 + }
764 +
765 + return 0;
766 +@@ -1009,8 +1010,7 @@ fail:
767 + }
768 + kfree(req);
769 + }
770 +- return -ENOMEM;
771 +-
772 ++ return err;
773 + }
774 +
775 + static int connect_ring(struct backend_info *be)
776 +diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
777 +index 8684d11b29bb..68b41daab3a8 100644
778 +--- a/drivers/dma/ste_dma40.c
779 ++++ b/drivers/dma/ste_dma40.c
780 +@@ -142,7 +142,7 @@ enum d40_events {
781 + * when the DMA hw is powered off.
782 + * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
783 + */
784 +-static u32 d40_backup_regs[] = {
785 ++static __maybe_unused u32 d40_backup_regs[] = {
786 + D40_DREG_LCPA,
787 + D40_DREG_LCLA,
788 + D40_DREG_PRMSE,
789 +@@ -211,7 +211,7 @@ static u32 d40_backup_regs_v4b[] = {
790 +
791 + #define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
792 +
793 +-static u32 d40_backup_regs_chan[] = {
794 ++static __maybe_unused u32 d40_backup_regs_chan[] = {
795 + D40_CHAN_REG_SSCFG,
796 + D40_CHAN_REG_SSELT,
797 + D40_CHAN_REG_SSPTR,
798 +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
799 +index d3675819f561..3b0d77b2fdc5 100644
800 +--- a/drivers/gpio/gpiolib.c
801 ++++ b/drivers/gpio/gpiolib.c
802 +@@ -953,9 +953,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
803 + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
804 + lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
805 + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
806 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
807 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
808 ++ GPIOLINE_FLAG_IS_OUT);
809 + if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
810 +- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
811 ++ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
812 ++ GPIOLINE_FLAG_IS_OUT);
813 +
814 + if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
815 + return -EFAULT;
816 +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
817 +index e57a0bad7a62..77df50dd6d30 100644
818 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
819 ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
820 +@@ -300,8 +300,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
821 + break;
822 + }
823 +
824 +- if (retries == RETRIES)
825 ++ if (retries == RETRIES) {
826 ++ kfree(reply);
827 + return -EINVAL;
828 ++ }
829 +
830 + *msg_len = reply_len;
831 + *msg = reply;
832 +diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
833 +index 9428ea7cdf8a..c52bd163abb3 100644
834 +--- a/drivers/hid/hid-a4tech.c
835 ++++ b/drivers/hid/hid-a4tech.c
836 +@@ -26,12 +26,36 @@
837 + #define A4_2WHEEL_MOUSE_HACK_7 0x01
838 + #define A4_2WHEEL_MOUSE_HACK_B8 0x02
839 +
840 ++#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
841 ++
842 + struct a4tech_sc {
843 + unsigned long quirks;
844 + unsigned int hw_wheel;
845 + __s32 delayed_value;
846 + };
847 +
848 ++static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
849 ++ struct hid_field *field, struct hid_usage *usage,
850 ++ unsigned long **bit, int *max)
851 ++{
852 ++ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
853 ++
854 ++ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
855 ++ usage->hid == A4_WHEEL_ORIENTATION) {
856 ++ /*
857 ++ * We do not want to have this usage mapped to anything as it's
858 ++ * nonstandard and doesn't really behave like an HID report.
859 ++ * It's only selecting the orientation (vertical/horizontal) of
860 ++ * the previous mouse wheel report. The input_events will be
861 ++ * generated once both reports are recorded in a4_event().
862 ++ */
863 ++ return -1;
864 ++ }
865 ++
866 ++ return 0;
867 ++
868 ++}
869 ++
870 + static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
871 + struct hid_field *field, struct hid_usage *usage,
872 + unsigned long **bit, int *max)
873 +@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
874 + struct a4tech_sc *a4 = hid_get_drvdata(hdev);
875 + struct input_dev *input;
876 +
877 +- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
878 +- !usage->type)
879 ++ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
880 + return 0;
881 +
882 + input = field->hidinput->input;
883 +@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
884 + return 1;
885 + }
886 +
887 +- if (usage->hid == 0x000100b8) {
888 ++ if (usage->hid == A4_WHEEL_ORIENTATION) {
889 + input_event(input, EV_REL, value ? REL_HWHEEL :
890 + REL_WHEEL, a4->delayed_value);
891 + return 1;
892 +@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
893 + static struct hid_driver a4_driver = {
894 + .name = "a4tech",
895 + .id_table = a4_devices,
896 ++ .input_mapping = a4_input_mapping,
897 + .input_mapped = a4_input_mapped,
898 + .event = a4_event,
899 + .probe = a4_probe,
900 +diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
901 +index b83376077d72..cfa0cb22c9b3 100644
902 +--- a/drivers/hid/hid-tmff.c
903 ++++ b/drivers/hid/hid-tmff.c
904 +@@ -34,6 +34,8 @@
905 +
906 + #include "hid-ids.h"
907 +
908 ++#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
909 ++
910 + static const signed short ff_rumble[] = {
911 + FF_RUMBLE,
912 + -1
913 +@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
914 + struct hid_field *ff_field = tmff->ff_field;
915 + int x, y;
916 + int left, right; /* Rumbling */
917 ++ int motor_swap;
918 +
919 + switch (effect->type) {
920 + case FF_CONSTANT:
921 +@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
922 + ff_field->logical_minimum,
923 + ff_field->logical_maximum);
924 +
925 ++ /* 2-in-1 strong motor is left */
926 ++ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
927 ++ motor_swap = left;
928 ++ left = right;
929 ++ right = motor_swap;
930 ++ }
931 ++
932 + dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
933 + ff_field->value[0] = left;
934 + ff_field->value[1] = right;
935 +@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
936 + .driver_data = (unsigned long)ff_rumble },
937 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
938 + .driver_data = (unsigned long)ff_rumble },
939 ++ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
940 ++ .driver_data = (unsigned long)ff_rumble },
941 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
942 + .driver_data = (unsigned long)ff_rumble },
943 + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
944 +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
945 +index 6c3bf8846b52..fbf14a14bdd4 100644
946 +--- a/drivers/hid/wacom_wac.c
947 ++++ b/drivers/hid/wacom_wac.c
948 +@@ -819,7 +819,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
949 + input_report_key(input, BTN_BASE2, (data[11] & 0x02));
950 +
951 + if (data[12] & 0x80)
952 +- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
953 ++ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
954 + else
955 + input_report_abs(input, ABS_WHEEL, 0);
956 +
957 +@@ -949,6 +949,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
958 + y >>= 1;
959 + distance >>= 1;
960 + }
961 ++ if (features->type == INTUOSHT2)
962 ++ distance = features->distance_max - distance;
963 + input_report_abs(input, ABS_X, x);
964 + input_report_abs(input, ABS_Y, y);
965 + input_report_abs(input, ABS_DISTANCE, distance);
966 +diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
967 +index fd0ebec03ae7..beefec9701ed 100644
968 +--- a/drivers/hwtracing/stm/core.c
969 ++++ b/drivers/hwtracing/stm/core.c
970 +@@ -1107,7 +1107,6 @@ int stm_source_register_device(struct device *parent,
971 +
972 + err:
973 + put_device(&src->dev);
974 +- kfree(src);
975 +
976 + return err;
977 + }
978 +diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c
979 +index 96bb4e749012..0218ba6eb26a 100644
980 +--- a/drivers/i2c/busses/i2c-emev2.c
981 ++++ b/drivers/i2c/busses/i2c-emev2.c
982 +@@ -72,6 +72,7 @@ struct em_i2c_device {
983 + struct completion msg_done;
984 + struct clk *sclk;
985 + struct i2c_client *slave;
986 ++ int irq;
987 + };
988 +
989 + static inline void em_clear_set_bit(struct em_i2c_device *priv, u8 clear, u8 set, u8 reg)
990 +@@ -342,6 +343,12 @@ static int em_i2c_unreg_slave(struct i2c_client *slave)
991 +
992 + writeb(0, priv->base + I2C_OFS_SVA0);
993 +
994 ++ /*
995 ++ * Wait for interrupt to finish. New slave irqs cannot happen because we
996 ++ * cleared the slave address and, thus, only extension codes will be
997 ++ * detected which do not use the slave ptr.
998 ++ */
999 ++ synchronize_irq(priv->irq);
1000 + priv->slave = NULL;
1001 +
1002 + return 0;
1003 +@@ -358,7 +365,7 @@ static int em_i2c_probe(struct platform_device *pdev)
1004 + {
1005 + struct em_i2c_device *priv;
1006 + struct resource *r;
1007 +- int irq, ret;
1008 ++ int ret;
1009 +
1010 + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
1011 + if (!priv)
1012 +@@ -391,8 +398,8 @@ static int em_i2c_probe(struct platform_device *pdev)
1013 +
1014 + em_i2c_reset(&priv->adap);
1015 +
1016 +- irq = platform_get_irq(pdev, 0);
1017 +- ret = devm_request_irq(&pdev->dev, irq, em_i2c_irq_handler, 0,
1018 ++ priv->irq = platform_get_irq(pdev, 0);
1019 ++ ret = devm_request_irq(&pdev->dev, priv->irq, em_i2c_irq_handler, 0,
1020 + "em_i2c", priv);
1021 + if (ret)
1022 + goto err_clk;
1023 +@@ -402,7 +409,8 @@ static int em_i2c_probe(struct platform_device *pdev)
1024 + if (ret)
1025 + goto err_clk;
1026 +
1027 +- dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr, irq);
1028 ++ dev_info(&pdev->dev, "Added i2c controller %d, irq %d\n", priv->adap.nr,
1029 ++ priv->irq);
1030 +
1031 + return 0;
1032 +
1033 +diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
1034 +index 8f1c5f24c1df..62785aa76b3f 100644
1035 +--- a/drivers/i2c/busses/i2c-piix4.c
1036 ++++ b/drivers/i2c/busses/i2c-piix4.c
1037 +@@ -96,7 +96,7 @@
1038 + #define SB800_PIIX4_PORT_IDX_MASK 0x06
1039 + #define SB800_PIIX4_PORT_IDX_SHIFT 1
1040 +
1041 +-/* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
1042 ++/* On kerncz and Hudson2, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */
1043 + #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02
1044 + #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18
1045 + #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3
1046 +@@ -355,18 +355,16 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
1047 +
1048 + /* Find which register is used for port selection */
1049 + if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) {
1050 +- switch (PIIX4_dev->device) {
1051 +- case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS:
1052 ++ if (PIIX4_dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS ||
1053 ++ (PIIX4_dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
1054 ++ PIIX4_dev->revision >= 0x1F)) {
1055 + piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ;
1056 + piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ;
1057 + piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ;
1058 +- break;
1059 +- case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS:
1060 +- default:
1061 ++ } else {
1062 + piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT;
1063 + piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK;
1064 + piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT;
1065 +- break;
1066 + }
1067 + } else {
1068 + mutex_lock(&piix4_mutex_sb800);
1069 +diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
1070 +index 1520e7f02c2f..89d191b6a0e0 100644
1071 +--- a/drivers/iommu/dma-iommu.c
1072 ++++ b/drivers/iommu/dma-iommu.c
1073 +@@ -493,7 +493,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
1074 + * - and wouldn't make the resulting output segment too long
1075 + */
1076 + if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
1077 +- (cur_len + s_length <= max_len)) {
1078 ++ (max_len - cur_len >= s_length)) {
1079 + /* ...then concatenate it with the previous one */
1080 + cur_len += s_length;
1081 + } else {
1082 +diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
1083 +index c60c7998af17..726fba452f5f 100644
1084 +--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
1085 ++++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
1086 +@@ -1402,6 +1402,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1087 + printk(KERN_DEBUG
1088 + "%s: %s: alloc urb for fifo %i failed",
1089 + hw->name, __func__, fifo->fifonum);
1090 ++ continue;
1091 + }
1092 + fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1093 + fifo->iso[i].indx = i;
1094 +@@ -1700,13 +1701,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1095 + static int
1096 + setup_hfcsusb(struct hfcsusb *hw)
1097 + {
1098 ++ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1099 + u_char b;
1100 ++ int ret;
1101 +
1102 + if (debug & DBG_HFC_CALL_TRACE)
1103 + printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1104 +
1105 ++ if (!dmabuf)
1106 ++ return -ENOMEM;
1107 ++
1108 ++ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1109 ++
1110 ++ memcpy(&b, dmabuf, sizeof(u_char));
1111 ++ kfree(dmabuf);
1112 ++
1113 + /* check the chip id */
1114 +- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
1115 ++ if (ret != 1) {
1116 + printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1117 + hw->name, __func__);
1118 + return 1;
1119 +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1120 +index 673ce38735ff..c837defb5e4d 100644
1121 +--- a/drivers/md/dm-bufio.c
1122 ++++ b/drivers/md/dm-bufio.c
1123 +@@ -1585,7 +1585,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1124 + unsigned long freed;
1125 +
1126 + c = container_of(shrink, struct dm_bufio_client, shrinker);
1127 +- if (!dm_bufio_trylock(c))
1128 ++ if (sc->gfp_mask & __GFP_FS)
1129 ++ dm_bufio_lock(c);
1130 ++ else if (!dm_bufio_trylock(c))
1131 + return SHRINK_STOP;
1132 +
1133 + freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1134 +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1135 +index 5ac239d0f787..29deda7aed04 100644
1136 +--- a/drivers/md/dm-table.c
1137 ++++ b/drivers/md/dm-table.c
1138 +@@ -1263,7 +1263,7 @@ void dm_table_event(struct dm_table *t)
1139 + }
1140 + EXPORT_SYMBOL(dm_table_event);
1141 +
1142 +-sector_t dm_table_get_size(struct dm_table *t)
1143 ++inline sector_t dm_table_get_size(struct dm_table *t)
1144 + {
1145 + return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1146 + }
1147 +@@ -1288,6 +1288,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1148 + unsigned int l, n = 0, k = 0;
1149 + sector_t *node;
1150 +
1151 ++ if (unlikely(sector >= dm_table_get_size(t)))
1152 ++ return &t->targets[t->num_targets];
1153 ++
1154 + for (l = 0; l < t->depth; l++) {
1155 + n = get_child(n, k);
1156 + node = get_node(t, l, n);
1157 +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1158 +index e4ececd3df00..386215245dfe 100644
1159 +--- a/drivers/md/persistent-data/dm-btree.c
1160 ++++ b/drivers/md/persistent-data/dm-btree.c
1161 +@@ -623,39 +623,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
1162 +
1163 + new_parent = shadow_current(s);
1164 +
1165 ++ pn = dm_block_data(new_parent);
1166 ++ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1167 ++ sizeof(__le64) : s->info->value_type.size;
1168 ++
1169 ++ /* create & init the left block */
1170 + r = new_block(s->info, &left);
1171 + if (r < 0)
1172 + return r;
1173 +
1174 ++ ln = dm_block_data(left);
1175 ++ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1176 ++
1177 ++ ln->header.flags = pn->header.flags;
1178 ++ ln->header.nr_entries = cpu_to_le32(nr_left);
1179 ++ ln->header.max_entries = pn->header.max_entries;
1180 ++ ln->header.value_size = pn->header.value_size;
1181 ++ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1182 ++ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1183 ++
1184 ++ /* create & init the right block */
1185 + r = new_block(s->info, &right);
1186 + if (r < 0) {
1187 + unlock_block(s->info, left);
1188 + return r;
1189 + }
1190 +
1191 +- pn = dm_block_data(new_parent);
1192 +- ln = dm_block_data(left);
1193 + rn = dm_block_data(right);
1194 +-
1195 +- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1196 + nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
1197 +
1198 +- ln->header.flags = pn->header.flags;
1199 +- ln->header.nr_entries = cpu_to_le32(nr_left);
1200 +- ln->header.max_entries = pn->header.max_entries;
1201 +- ln->header.value_size = pn->header.value_size;
1202 +-
1203 + rn->header.flags = pn->header.flags;
1204 + rn->header.nr_entries = cpu_to_le32(nr_right);
1205 + rn->header.max_entries = pn->header.max_entries;
1206 + rn->header.value_size = pn->header.value_size;
1207 +-
1208 +- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1209 + memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
1210 +-
1211 +- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1212 +- sizeof(__le64) : s->info->value_type.size;
1213 +- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1214 + memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
1215 + nr_right * size);
1216 +
1217 +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1218 +index 20557e2c60c6..1d29771af380 100644
1219 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c
1220 ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1221 +@@ -248,7 +248,7 @@ static int out(struct sm_metadata *smm)
1222 + }
1223 +
1224 + if (smm->recursion_count == 1)
1225 +- apply_bops(smm);
1226 ++ r = apply_bops(smm);
1227 +
1228 + smm->recursion_count--;
1229 +
1230 +diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c
1231 +index b3fa738ae005..f005206d9033 100644
1232 +--- a/drivers/misc/vmw_vmci/vmci_doorbell.c
1233 ++++ b/drivers/misc/vmw_vmci/vmci_doorbell.c
1234 +@@ -318,7 +318,8 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle)
1235 +
1236 + entry = container_of(resource, struct dbell_entry, resource);
1237 + if (entry->run_delayed) {
1238 +- schedule_work(&entry->work);
1239 ++ if (!schedule_work(&entry->work))
1240 ++ vmci_resource_put(resource);
1241 + } else {
1242 + entry->notify_cb(entry->client_data);
1243 + vmci_resource_put(resource);
1244 +@@ -366,7 +367,8 @@ static void dbell_fire_entries(u32 notify_idx)
1245 + atomic_read(&dbell->active) == 1) {
1246 + if (dbell->run_delayed) {
1247 + vmci_resource_get(&dbell->resource);
1248 +- schedule_work(&dbell->work);
1249 ++ if (!schedule_work(&dbell->work))
1250 ++ vmci_resource_put(&dbell->resource);
1251 + } else {
1252 + dbell->notify_cb(dbell->client_data);
1253 + }
1254 +diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
1255 +index 00ba8807dafe..7f654c714fff 100644
1256 +--- a/drivers/mmc/core/sd.c
1257 ++++ b/drivers/mmc/core/sd.c
1258 +@@ -1259,6 +1259,12 @@ int mmc_attach_sd(struct mmc_host *host)
1259 + goto err;
1260 + }
1261 +
1262 ++ /*
1263 ++ * Some SD cards claims an out of spec VDD voltage range. Let's treat
1264 ++ * these bits as being in-valid and especially also bit7.
1265 ++ */
1266 ++ ocr &= ~0x7FFF;
1267 ++
1268 + rocr = mmc_select_voltage(host, ocr);
1269 +
1270 + /*
1271 +diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
1272 +index 83b84ffec27d..2ff6140ea0b7 100644
1273 +--- a/drivers/mmc/host/sdhci-of-at91.c
1274 ++++ b/drivers/mmc/host/sdhci-of-at91.c
1275 +@@ -317,6 +317,9 @@ static int sdhci_at91_probe(struct platform_device *pdev)
1276 + pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1277 + pm_runtime_use_autosuspend(&pdev->dev);
1278 +
1279 ++ /* HS200 is broken at this moment */
1280 ++ host->quirks2 = SDHCI_QUIRK2_BROKEN_HS200;
1281 ++
1282 + ret = sdhci_add_host(host);
1283 + if (ret)
1284 + goto pm_runtime_disable;
1285 +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1286 +index d338c319b30e..8820fb1aec5b 100644
1287 +--- a/drivers/net/bonding/bond_main.c
1288 ++++ b/drivers/net/bonding/bond_main.c
1289 +@@ -2131,6 +2131,15 @@ static void bond_miimon_commit(struct bonding *bond)
1290 + bond_for_each_slave(bond, slave, iter) {
1291 + switch (slave->new_link) {
1292 + case BOND_LINK_NOCHANGE:
1293 ++ /* For 802.3ad mode, check current slave speed and
1294 ++ * duplex again in case its port was disabled after
1295 ++ * invalid speed/duplex reporting but recovered before
1296 ++ * link monitoring could make a decision on the actual
1297 ++ * link status
1298 ++ */
1299 ++ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
1300 ++ slave->link == BOND_LINK_UP)
1301 ++ bond_3ad_adapter_speed_duplex_changed(slave);
1302 + continue;
1303 +
1304 + case BOND_LINK_UP:
1305 +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1306 +index 214a48703a4e..ffc5467a1ec2 100644
1307 +--- a/drivers/net/can/dev.c
1308 ++++ b/drivers/net/can/dev.c
1309 +@@ -1095,6 +1095,8 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
1310 + int register_candev(struct net_device *dev)
1311 + {
1312 + dev->rtnl_link_ops = &can_link_ops;
1313 ++ netif_carrier_off(dev);
1314 ++
1315 + return register_netdev(dev);
1316 + }
1317 + EXPORT_SYMBOL_GPL(register_candev);
1318 +diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
1319 +index dd56133cc461..fc9f8b01ecae 100644
1320 +--- a/drivers/net/can/sja1000/peak_pcmcia.c
1321 ++++ b/drivers/net/can/sja1000/peak_pcmcia.c
1322 +@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
1323 + if (!netdev)
1324 + continue;
1325 +
1326 +- strncpy(name, netdev->name, IFNAMSIZ);
1327 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1328 +
1329 + unregister_sja1000dev(netdev);
1330 +
1331 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1332 +index 54c2354053ac..ce0a352a5eaa 100644
1333 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1334 ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1335 +@@ -879,7 +879,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
1336 +
1337 + dev_prev_siblings = dev->prev_siblings;
1338 + dev->state &= ~PCAN_USB_STATE_CONNECTED;
1339 +- strncpy(name, netdev->name, IFNAMSIZ);
1340 ++ strlcpy(name, netdev->name, IFNAMSIZ);
1341 +
1342 + unregister_netdev(netdev);
1343 +
1344 +diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1345 +index ddd1ec8f7bd0..d1a2159e40d6 100644
1346 +--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1347 ++++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1348 +@@ -3263,7 +3263,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1349 + if (!adapter->regs) {
1350 + dev_err(&pdev->dev, "cannot map device registers\n");
1351 + err = -ENOMEM;
1352 +- goto out_free_adapter;
1353 ++ goto out_free_adapter_nofail;
1354 + }
1355 +
1356 + adapter->pdev = pdev;
1357 +@@ -3381,6 +3381,9 @@ out_free_dev:
1358 + if (adapter->port[i])
1359 + free_netdev(adapter->port[i]);
1360 +
1361 ++out_free_adapter_nofail:
1362 ++ kfree_skb(adapter->nofail_skb);
1363 ++
1364 + out_free_adapter:
1365 + kfree(adapter);
1366 +
1367 +diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
1368 +index b5d18d95d7b9..f7882c1fde16 100644
1369 +--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
1370 ++++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
1371 +@@ -157,6 +157,7 @@ struct hip04_priv {
1372 + unsigned int reg_inten;
1373 +
1374 + struct napi_struct napi;
1375 ++ struct device *dev;
1376 + struct net_device *ndev;
1377 +
1378 + struct tx_desc *tx_desc;
1379 +@@ -185,7 +186,7 @@ struct hip04_priv {
1380 +
1381 + static inline unsigned int tx_count(unsigned int head, unsigned int tail)
1382 + {
1383 +- return (head - tail) % (TX_DESC_NUM - 1);
1384 ++ return (head - tail) % TX_DESC_NUM;
1385 + }
1386 +
1387 + static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
1388 +@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
1389 + }
1390 +
1391 + if (priv->tx_phys[tx_tail]) {
1392 +- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
1393 ++ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
1394 + priv->tx_skb[tx_tail]->len,
1395 + DMA_TO_DEVICE);
1396 + priv->tx_phys[tx_tail] = 0;
1397 +@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1398 + return NETDEV_TX_BUSY;
1399 + }
1400 +
1401 +- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
1402 +- if (dma_mapping_error(&ndev->dev, phys)) {
1403 ++ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
1404 ++ if (dma_mapping_error(priv->dev, phys)) {
1405 + dev_kfree_skb(skb);
1406 + return NETDEV_TX_OK;
1407 + }
1408 +@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1409 + u16 len;
1410 + u32 err;
1411 +
1412 ++ /* clean up tx descriptors */
1413 ++ tx_remaining = hip04_tx_reclaim(ndev, false);
1414 ++
1415 + while (cnt && !last) {
1416 + buf = priv->rx_buf[priv->rx_head];
1417 + skb = build_skb(buf, priv->rx_buf_size);
1418 +@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
1419 + goto refill;
1420 + }
1421 +
1422 +- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
1423 ++ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
1424 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1425 + priv->rx_phys[priv->rx_head] = 0;
1426 +
1427 +@@ -534,9 +538,9 @@ refill:
1428 + buf = netdev_alloc_frag(priv->rx_buf_size);
1429 + if (!buf)
1430 + goto done;
1431 +- phys = dma_map_single(&ndev->dev, buf,
1432 ++ phys = dma_map_single(priv->dev, buf,
1433 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1434 +- if (dma_mapping_error(&ndev->dev, phys))
1435 ++ if (dma_mapping_error(priv->dev, phys))
1436 + goto done;
1437 + priv->rx_buf[priv->rx_head] = buf;
1438 + priv->rx_phys[priv->rx_head] = phys;
1439 +@@ -557,8 +561,7 @@ refill:
1440 + }
1441 + napi_complete(napi);
1442 + done:
1443 +- /* clean up tx descriptors and start a new timer if necessary */
1444 +- tx_remaining = hip04_tx_reclaim(ndev, false);
1445 ++ /* start a new timer if necessary */
1446 + if (rx < budget && tx_remaining)
1447 + hip04_start_tx_timer(priv);
1448 +
1449 +@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
1450 + for (i = 0; i < RX_DESC_NUM; i++) {
1451 + dma_addr_t phys;
1452 +
1453 +- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
1454 ++ phys = dma_map_single(priv->dev, priv->rx_buf[i],
1455 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1456 +- if (dma_mapping_error(&ndev->dev, phys))
1457 ++ if (dma_mapping_error(priv->dev, phys))
1458 + return -EIO;
1459 +
1460 + priv->rx_phys[i] = phys;
1461 +@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
1462 +
1463 + for (i = 0; i < RX_DESC_NUM; i++) {
1464 + if (priv->rx_phys[i]) {
1465 +- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
1466 ++ dma_unmap_single(priv->dev, priv->rx_phys[i],
1467 + RX_BUF_SIZE, DMA_FROM_DEVICE);
1468 + priv->rx_phys[i] = 0;
1469 + }
1470 +@@ -827,6 +830,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
1471 + return -ENOMEM;
1472 +
1473 + priv = netdev_priv(ndev);
1474 ++ priv->dev = d;
1475 + priv->ndev = ndev;
1476 + platform_set_drvdata(pdev, ndev);
1477 +
1478 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1479 +index d51ad140f46d..05953e14a064 100644
1480 +--- a/drivers/net/usb/qmi_wwan.c
1481 ++++ b/drivers/net/usb/qmi_wwan.c
1482 +@@ -892,6 +892,7 @@ static const struct usb_device_id products[] = {
1483 + {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1484 + {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1485 + {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1486 ++ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
1487 + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1488 + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
1489 + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1490 +diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
1491 +index 56f2112e0cd8..85df2e009310 100644
1492 +--- a/drivers/nfc/st-nci/se.c
1493 ++++ b/drivers/nfc/st-nci/se.c
1494 +@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
1495 +
1496 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1497 + skb->len - 2, GFP_KERNEL);
1498 ++ if (!transaction)
1499 ++ return -ENOMEM;
1500 +
1501 + transaction->aid_len = skb->data[1];
1502 + memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
1503 +diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
1504 +index 3a98563d4a12..eac608a457f0 100644
1505 +--- a/drivers/nfc/st21nfca/se.c
1506 ++++ b/drivers/nfc/st21nfca/se.c
1507 +@@ -326,6 +326,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
1508 +
1509 + transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
1510 + skb->len - 2, GFP_KERNEL);
1511 ++ if (!transaction)
1512 ++ return -ENOMEM;
1513 +
1514 + transaction->aid_len = skb->data[1];
1515 + memcpy(transaction->aid, &skb->data[2],
1516 +diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
1517 +index 23129d7b2678..c77e36526447 100644
1518 +--- a/drivers/scsi/ufs/unipro.h
1519 ++++ b/drivers/scsi/ufs/unipro.h
1520 +@@ -52,7 +52,7 @@
1521 + #define RX_HS_UNTERMINATED_ENABLE 0x00A6
1522 + #define RX_ENTER_HIBERN8 0x00A7
1523 + #define RX_BYPASS_8B10B_ENABLE 0x00A8
1524 +-#define RX_TERMINATION_FORCE_ENABLE 0x0089
1525 ++#define RX_TERMINATION_FORCE_ENABLE 0x00A9
1526 + #define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
1527 + #define RX_HIBERN8TIME_CAPABILITY 0x0092
1528 + #define RX_REFCLKFREQ 0x00EB
1529 +diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
1530 +index 0f9859478649..2fbc67ca47d4 100644
1531 +--- a/drivers/usb/chipidea/udc.c
1532 ++++ b/drivers/usb/chipidea/udc.c
1533 +@@ -709,12 +709,6 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1534 + struct ci_hdrc *ci = container_of(gadget, struct ci_hdrc, gadget);
1535 + unsigned long flags;
1536 +
1537 +- spin_lock_irqsave(&ci->lock, flags);
1538 +- ci->gadget.speed = USB_SPEED_UNKNOWN;
1539 +- ci->remote_wakeup = 0;
1540 +- ci->suspended = 0;
1541 +- spin_unlock_irqrestore(&ci->lock, flags);
1542 +-
1543 + /* flush all endpoints */
1544 + gadget_for_each_ep(ep, gadget) {
1545 + usb_ep_fifo_flush(ep);
1546 +@@ -732,6 +726,12 @@ static int _gadget_stop_activity(struct usb_gadget *gadget)
1547 + ci->status = NULL;
1548 + }
1549 +
1550 ++ spin_lock_irqsave(&ci->lock, flags);
1551 ++ ci->gadget.speed = USB_SPEED_UNKNOWN;
1552 ++ ci->remote_wakeup = 0;
1553 ++ ci->suspended = 0;
1554 ++ spin_unlock_irqrestore(&ci->lock, flags);
1555 ++
1556 + return 0;
1557 + }
1558 +
1559 +@@ -1306,6 +1306,10 @@ static int ep_disable(struct usb_ep *ep)
1560 + return -EBUSY;
1561 +
1562 + spin_lock_irqsave(hwep->lock, flags);
1563 ++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1564 ++ spin_unlock_irqrestore(hwep->lock, flags);
1565 ++ return 0;
1566 ++ }
1567 +
1568 + /* only internal SW should disable ctrl endpts */
1569 +
1570 +@@ -1395,6 +1399,10 @@ static int ep_queue(struct usb_ep *ep, struct usb_request *req,
1571 + return -EINVAL;
1572 +
1573 + spin_lock_irqsave(hwep->lock, flags);
1574 ++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1575 ++ spin_unlock_irqrestore(hwep->lock, flags);
1576 ++ return 0;
1577 ++ }
1578 + retval = _ep_queue(ep, req, gfp_flags);
1579 + spin_unlock_irqrestore(hwep->lock, flags);
1580 + return retval;
1581 +@@ -1418,8 +1426,8 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1582 + return -EINVAL;
1583 +
1584 + spin_lock_irqsave(hwep->lock, flags);
1585 +-
1586 +- hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1587 ++ if (hwep->ci->gadget.speed != USB_SPEED_UNKNOWN)
1588 ++ hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1589 +
1590 + list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1591 + dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1592 +@@ -1490,6 +1498,10 @@ static void ep_fifo_flush(struct usb_ep *ep)
1593 + }
1594 +
1595 + spin_lock_irqsave(hwep->lock, flags);
1596 ++ if (hwep->ci->gadget.speed == USB_SPEED_UNKNOWN) {
1597 ++ spin_unlock_irqrestore(hwep->lock, flags);
1598 ++ return;
1599 ++ }
1600 +
1601 + hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1602 +
1603 +@@ -1558,6 +1570,10 @@ static int ci_udc_wakeup(struct usb_gadget *_gadget)
1604 + int ret = 0;
1605 +
1606 + spin_lock_irqsave(&ci->lock, flags);
1607 ++ if (ci->gadget.speed == USB_SPEED_UNKNOWN) {
1608 ++ spin_unlock_irqrestore(&ci->lock, flags);
1609 ++ return 0;
1610 ++ }
1611 + if (!ci->remote_wakeup) {
1612 + ret = -EOPNOTSUPP;
1613 + goto out;
1614 +diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1615 +index 9f001659807a..217a479165e0 100644
1616 +--- a/drivers/usb/class/cdc-wdm.c
1617 ++++ b/drivers/usb/class/cdc-wdm.c
1618 +@@ -597,10 +597,20 @@ static int wdm_flush(struct file *file, fl_owner_t id)
1619 + {
1620 + struct wdm_device *desc = file->private_data;
1621 +
1622 +- wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags));
1623 ++ wait_event(desc->wait,
1624 ++ /*
1625 ++ * needs both flags. We cannot do with one
1626 ++ * because resetting it would cause a race
1627 ++ * with write() yet we need to signal
1628 ++ * a disconnect
1629 ++ */
1630 ++ !test_bit(WDM_IN_USE, &desc->flags) ||
1631 ++ test_bit(WDM_DISCONNECTING, &desc->flags));
1632 +
1633 + /* cannot dereference desc->intf if WDM_DISCONNECTING */
1634 +- if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags))
1635 ++ if (test_bit(WDM_DISCONNECTING, &desc->flags))
1636 ++ return -ENODEV;
1637 ++ if (desc->werr < 0)
1638 + dev_err(&desc->intf->dev, "Error in flush path: %d\n",
1639 + desc->werr);
1640 +
1641 +@@ -968,8 +978,6 @@ static void wdm_disconnect(struct usb_interface *intf)
1642 + spin_lock_irqsave(&desc->iuspin, flags);
1643 + set_bit(WDM_DISCONNECTING, &desc->flags);
1644 + set_bit(WDM_READ, &desc->flags);
1645 +- /* to terminate pending flushes */
1646 +- clear_bit(WDM_IN_USE, &desc->flags);
1647 + spin_unlock_irqrestore(&desc->iuspin, flags);
1648 + wake_up_all(&desc->wait);
1649 + mutex_lock(&desc->rlock);
1650 +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1651 +index 2c022a08f163..9fa168af847b 100644
1652 +--- a/drivers/usb/gadget/composite.c
1653 ++++ b/drivers/usb/gadget/composite.c
1654 +@@ -2000,6 +2000,7 @@ void composite_disconnect(struct usb_gadget *gadget)
1655 + * disconnect callbacks?
1656 + */
1657 + spin_lock_irqsave(&cdev->lock, flags);
1658 ++ cdev->suspended = 0;
1659 + if (cdev->config)
1660 + reset_config(cdev);
1661 + if (cdev->driver->disconnect)
1662 +diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
1663 +index 66efa9a67687..72853020a542 100644
1664 +--- a/drivers/usb/host/fotg210-hcd.c
1665 ++++ b/drivers/usb/host/fotg210-hcd.c
1666 +@@ -1653,6 +1653,10 @@ static int fotg210_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1667 + /* see what we found out */
1668 + temp = check_reset_complete(fotg210, wIndex, status_reg,
1669 + fotg210_readl(fotg210, status_reg));
1670 ++
1671 ++ /* restart schedule */
1672 ++ fotg210->command |= CMD_RUN;
1673 ++ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
1674 + }
1675 +
1676 + if (!(temp & (PORT_RESUME|PORT_RESET))) {
1677 +diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
1678 +index 1afb76e8b1a5..17f1cf02ce34 100644
1679 +--- a/drivers/usb/host/ohci-hcd.c
1680 ++++ b/drivers/usb/host/ohci-hcd.c
1681 +@@ -417,8 +417,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
1682 + * other cases where the next software may expect clean state from the
1683 + * "firmware". this is bus-neutral, unlike shutdown() methods.
1684 + */
1685 +-static void
1686 +-ohci_shutdown (struct usb_hcd *hcd)
1687 ++static void _ohci_shutdown(struct usb_hcd *hcd)
1688 + {
1689 + struct ohci_hcd *ohci;
1690 +
1691 +@@ -434,6 +433,16 @@ ohci_shutdown (struct usb_hcd *hcd)
1692 + ohci->rh_state = OHCI_RH_HALTED;
1693 + }
1694 +
1695 ++static void ohci_shutdown(struct usb_hcd *hcd)
1696 ++{
1697 ++ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
1698 ++ unsigned long flags;
1699 ++
1700 ++ spin_lock_irqsave(&ohci->lock, flags);
1701 ++ _ohci_shutdown(hcd);
1702 ++ spin_unlock_irqrestore(&ohci->lock, flags);
1703 ++}
1704 ++
1705 + /*-------------------------------------------------------------------------*
1706 + * HC functions
1707 + *-------------------------------------------------------------------------*/
1708 +@@ -752,7 +761,7 @@ static void io_watchdog_func(unsigned long _ohci)
1709 + died:
1710 + usb_hc_died(ohci_to_hcd(ohci));
1711 + ohci_dump(ohci);
1712 +- ohci_shutdown(ohci_to_hcd(ohci));
1713 ++ _ohci_shutdown(ohci_to_hcd(ohci));
1714 + goto done;
1715 + } else {
1716 + /* No write back because the done queue was empty */
1717 +diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
1718 +index 64ee8154f2bb..89ec9f0905ca 100644
1719 +--- a/drivers/usb/host/xhci-rcar.c
1720 ++++ b/drivers/usb/host/xhci-rcar.c
1721 +@@ -84,7 +84,7 @@ static int xhci_rcar_is_gen2(struct device *dev)
1722 + return of_device_is_compatible(node, "renesas,xhci-r8a7790") ||
1723 + of_device_is_compatible(node, "renesas,xhci-r8a7791") ||
1724 + of_device_is_compatible(node, "renesas,xhci-r8a7793") ||
1725 +- of_device_is_compatible(node, "renensas,rcar-gen2-xhci");
1726 ++ of_device_is_compatible(node, "renesas,rcar-gen2-xhci");
1727 + }
1728 +
1729 + static int xhci_rcar_is_gen3(struct device *dev)
1730 +diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
1731 +index fac3447021b2..d955761fce6f 100644
1732 +--- a/drivers/usb/storage/realtek_cr.c
1733 ++++ b/drivers/usb/storage/realtek_cr.c
1734 +@@ -51,7 +51,7 @@ MODULE_VERSION("1.03");
1735 +
1736 + static int auto_delink_en = 1;
1737 + module_param(auto_delink_en, int, S_IRUGO | S_IWUSR);
1738 +-MODULE_PARM_DESC(auto_delink_en, "enable auto delink");
1739 ++MODULE_PARM_DESC(auto_delink_en, "auto delink mode (0=firmware, 1=software [default])");
1740 +
1741 + #ifdef CONFIG_REALTEK_AUTOPM
1742 + static int ss_en = 1;
1743 +@@ -1010,12 +1010,15 @@ static int init_realtek_cr(struct us_data *us)
1744 + goto INIT_FAIL;
1745 + }
1746 +
1747 +- if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1748 +- CHECK_FW_VER(chip, 0x5901))
1749 +- SET_AUTO_DELINK(chip);
1750 +- if (STATUS_LEN(chip) == 16) {
1751 +- if (SUPPORT_AUTO_DELINK(chip))
1752 ++ if (CHECK_PID(chip, 0x0138) || CHECK_PID(chip, 0x0158) ||
1753 ++ CHECK_PID(chip, 0x0159)) {
1754 ++ if (CHECK_FW_VER(chip, 0x5888) || CHECK_FW_VER(chip, 0x5889) ||
1755 ++ CHECK_FW_VER(chip, 0x5901))
1756 + SET_AUTO_DELINK(chip);
1757 ++ if (STATUS_LEN(chip) == 16) {
1758 ++ if (SUPPORT_AUTO_DELINK(chip))
1759 ++ SET_AUTO_DELINK(chip);
1760 ++ }
1761 + }
1762 + #ifdef CONFIG_REALTEK_AUTOPM
1763 + if (ss_en)
1764 +diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1765 +index c802aabcc58c..3ebf6307217c 100644
1766 +--- a/drivers/usb/storage/unusual_devs.h
1767 ++++ b/drivers/usb/storage/unusual_devs.h
1768 +@@ -2119,7 +2119,7 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1769 + US_FL_IGNORE_RESIDUE ),
1770 +
1771 + /* Reported by Michael Büsch <m@××××.ch> */
1772 +-UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116,
1773 ++UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0117,
1774 + "JMicron",
1775 + "USB to ATA/ATAPI Bridge",
1776 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1777 +diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
1778 +index 4dddd8298a22..3e2e2e6a8328 100644
1779 +--- a/drivers/watchdog/bcm2835_wdt.c
1780 ++++ b/drivers/watchdog/bcm2835_wdt.c
1781 +@@ -240,6 +240,7 @@ module_param(nowayout, bool, 0);
1782 + MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
1783 + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
1784 +
1785 ++MODULE_ALIAS("platform:bcm2835-wdt");
1786 + MODULE_AUTHOR("Lubomir Rintel <lkundrak@××.sk>");
1787 + MODULE_DESCRIPTION("Driver for Broadcom BCM2835 watchdog timer");
1788 + MODULE_LICENSE("GPL");
1789 +diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
1790 +index 1452177c822d..c719389381dc 100644
1791 +--- a/fs/nfs/nfs4_fs.h
1792 ++++ b/fs/nfs/nfs4_fs.h
1793 +@@ -434,7 +434,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
1794 +
1795 + extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
1796 + extern void nfs4_put_state_owner(struct nfs4_state_owner *);
1797 +-extern void nfs4_purge_state_owners(struct nfs_server *);
1798 ++extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
1799 ++extern void nfs4_free_state_owners(struct list_head *head);
1800 + extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
1801 + extern void nfs4_put_open_state(struct nfs4_state *);
1802 + extern void nfs4_close_state(struct nfs4_state *, fmode_t);
1803 +diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
1804 +index 43f42cc30a60..1ec6dd4f3e2e 100644
1805 +--- a/fs/nfs/nfs4client.c
1806 ++++ b/fs/nfs/nfs4client.c
1807 +@@ -781,9 +781,12 @@ found:
1808 +
1809 + static void nfs4_destroy_server(struct nfs_server *server)
1810 + {
1811 ++ LIST_HEAD(freeme);
1812 ++
1813 + nfs_server_return_all_delegations(server);
1814 + unset_pnfs_layoutdriver(server);
1815 +- nfs4_purge_state_owners(server);
1816 ++ nfs4_purge_state_owners(server, &freeme);
1817 ++ nfs4_free_state_owners(&freeme);
1818 + }
1819 +
1820 + /*
1821 +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
1822 +index 6f474b067032..4e63daeef633 100644
1823 +--- a/fs/nfs/nfs4state.c
1824 ++++ b/fs/nfs/nfs4state.c
1825 +@@ -611,24 +611,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
1826 + /**
1827 + * nfs4_purge_state_owners - Release all cached state owners
1828 + * @server: nfs_server with cached state owners to release
1829 ++ * @head: resulting list of state owners
1830 + *
1831 + * Called at umount time. Remaining state owners will be on
1832 + * the LRU with ref count of zero.
1833 ++ * Note that the state owners are not freed, but are added
1834 ++ * to the list @head, which can later be used as an argument
1835 ++ * to nfs4_free_state_owners.
1836 + */
1837 +-void nfs4_purge_state_owners(struct nfs_server *server)
1838 ++void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
1839 + {
1840 + struct nfs_client *clp = server->nfs_client;
1841 + struct nfs4_state_owner *sp, *tmp;
1842 +- LIST_HEAD(doomed);
1843 +
1844 + spin_lock(&clp->cl_lock);
1845 + list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
1846 +- list_move(&sp->so_lru, &doomed);
1847 ++ list_move(&sp->so_lru, head);
1848 + nfs4_remove_state_owner_locked(sp);
1849 + }
1850 + spin_unlock(&clp->cl_lock);
1851 ++}
1852 +
1853 +- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
1854 ++/**
1855 ++ * nfs4_purge_state_owners - Release all cached state owners
1856 ++ * @head: resulting list of state owners
1857 ++ *
1858 ++ * Frees a list of state owners that was generated by
1859 ++ * nfs4_purge_state_owners
1860 ++ */
1861 ++void nfs4_free_state_owners(struct list_head *head)
1862 ++{
1863 ++ struct nfs4_state_owner *sp, *tmp;
1864 ++
1865 ++ list_for_each_entry_safe(sp, tmp, head, so_lru) {
1866 + list_del(&sp->so_lru);
1867 + nfs4_free_state_owner(sp);
1868 + }
1869 +@@ -1764,12 +1779,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
1870 + struct nfs4_state_owner *sp;
1871 + struct nfs_server *server;
1872 + struct rb_node *pos;
1873 ++ LIST_HEAD(freeme);
1874 + int status = 0;
1875 +
1876 + restart:
1877 + rcu_read_lock();
1878 + list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
1879 +- nfs4_purge_state_owners(server);
1880 ++ nfs4_purge_state_owners(server, &freeme);
1881 + spin_lock(&clp->cl_lock);
1882 + for (pos = rb_first(&server->state_owners);
1883 + pos != NULL;
1884 +@@ -1798,6 +1814,7 @@ restart:
1885 + spin_unlock(&clp->cl_lock);
1886 + }
1887 + rcu_read_unlock();
1888 ++ nfs4_free_state_owners(&freeme);
1889 + return 0;
1890 + }
1891 +
1892 +diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
1893 +index 8bf425a103f0..de63d4e2dfba 100644
1894 +--- a/fs/userfaultfd.c
1895 ++++ b/fs/userfaultfd.c
1896 +@@ -464,6 +464,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1897 + /* len == 0 means wake all */
1898 + struct userfaultfd_wake_range range = { .len = 0, };
1899 + unsigned long new_flags;
1900 ++ bool still_valid;
1901 +
1902 + ACCESS_ONCE(ctx->released) = true;
1903 +
1904 +@@ -479,8 +480,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1905 + * taking the mmap_sem for writing.
1906 + */
1907 + down_write(&mm->mmap_sem);
1908 +- if (!mmget_still_valid(mm))
1909 +- goto skip_mm;
1910 ++ still_valid = mmget_still_valid(mm);
1911 + prev = NULL;
1912 + for (vma = mm->mmap; vma; vma = vma->vm_next) {
1913 + cond_resched();
1914 +@@ -491,19 +491,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
1915 + continue;
1916 + }
1917 + new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
1918 +- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
1919 +- new_flags, vma->anon_vma,
1920 +- vma->vm_file, vma->vm_pgoff,
1921 +- vma_policy(vma),
1922 +- NULL_VM_UFFD_CTX);
1923 +- if (prev)
1924 +- vma = prev;
1925 +- else
1926 +- prev = vma;
1927 ++ if (still_valid) {
1928 ++ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
1929 ++ new_flags, vma->anon_vma,
1930 ++ vma->vm_file, vma->vm_pgoff,
1931 ++ vma_policy(vma),
1932 ++ NULL_VM_UFFD_CTX);
1933 ++ if (prev)
1934 ++ vma = prev;
1935 ++ else
1936 ++ prev = vma;
1937 ++ }
1938 + vma->vm_flags = new_flags;
1939 + vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
1940 + }
1941 +-skip_mm:
1942 + up_write(&mm->mmap_sem);
1943 + mmput(mm);
1944 + wakeup:
1945 +diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
1946 +index 33c389934238..7bfddcd32d73 100644
1947 +--- a/fs/xfs/xfs_iops.c
1948 ++++ b/fs/xfs/xfs_iops.c
1949 +@@ -774,6 +774,7 @@ xfs_setattr_nonsize(
1950 +
1951 + out_cancel:
1952 + xfs_trans_cancel(tp);
1953 ++ xfs_iunlock(ip, XFS_ILOCK_EXCL);
1954 + out_dqrele:
1955 + xfs_qm_dqrele(udqp);
1956 + xfs_qm_dqrele(gdqp);
1957 +diff --git a/include/net/tcp.h b/include/net/tcp.h
1958 +index a474213ca015..23814d997e86 100644
1959 +--- a/include/net/tcp.h
1960 ++++ b/include/net/tcp.h
1961 +@@ -1609,6 +1609,10 @@ static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1962 + {
1963 + struct sk_buff *skb = tcp_send_head(sk);
1964 +
1965 ++ /* empty retransmit queue, for example due to zero window */
1966 ++ if (skb == tcp_write_queue_head(sk))
1967 ++ return NULL;
1968 ++
1969 + return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
1970 + }
1971 +
1972 +diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
1973 +index 5e0ea17d01a6..8847f277a14f 100644
1974 +--- a/kernel/irq/irqdesc.c
1975 ++++ b/kernel/irq/irqdesc.c
1976 +@@ -267,6 +267,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
1977 + }
1978 + }
1979 +
1980 ++static void irq_sysfs_del(struct irq_desc *desc)
1981 ++{
1982 ++ /*
1983 ++ * If irq_sysfs_init() has not yet been invoked (early boot), then
1984 ++ * irq_kobj_base is NULL and the descriptor was never added.
1985 ++ * kobject_del() complains about a object with no parent, so make
1986 ++ * it conditional.
1987 ++ */
1988 ++ if (irq_kobj_base)
1989 ++ kobject_del(&desc->kobj);
1990 ++}
1991 ++
1992 + static int __init irq_sysfs_init(void)
1993 + {
1994 + struct irq_desc *desc;
1995 +@@ -297,6 +309,7 @@ static struct kobj_type irq_kobj_type = {
1996 + };
1997 +
1998 + static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
1999 ++static void irq_sysfs_del(struct irq_desc *desc) {}
2000 +
2001 + #endif /* CONFIG_SYSFS */
2002 +
2003 +@@ -406,7 +419,7 @@ static void free_desc(unsigned int irq)
2004 + * The sysfs entry must be serialized against a concurrent
2005 + * irq_sysfs_init() as well.
2006 + */
2007 +- kobject_del(&desc->kobj);
2008 ++ irq_sysfs_del(desc);
2009 + delete_irq_desc(irq);
2010 +
2011 + /*
2012 +diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2013 +index 7ea8da990b9d..f32f73fa5d3a 100644
2014 +--- a/mm/huge_memory.c
2015 ++++ b/mm/huge_memory.c
2016 +@@ -30,6 +30,7 @@
2017 + #include <linux/userfaultfd_k.h>
2018 + #include <linux/page_idle.h>
2019 + #include <linux/shmem_fs.h>
2020 ++#include <linux/page_owner.h>
2021 +
2022 + #include <asm/tlb.h>
2023 + #include <asm/pgalloc.h>
2024 +@@ -1950,6 +1951,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
2025 + }
2026 +
2027 + ClearPageCompound(head);
2028 ++
2029 ++ split_page_owner(head, HPAGE_PMD_ORDER);
2030 ++
2031 + /* See comment in __split_huge_page_tail() */
2032 + if (PageAnon(head)) {
2033 + page_ref_inc(head);
2034 +diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
2035 +index cf15851a7d2f..5a50ad517f0f 100644
2036 +--- a/mm/zsmalloc.c
2037 ++++ b/mm/zsmalloc.c
2038 +@@ -52,6 +52,7 @@
2039 + #include <linux/zpool.h>
2040 + #include <linux/mount.h>
2041 + #include <linux/migrate.h>
2042 ++#include <linux/wait.h>
2043 + #include <linux/pagemap.h>
2044 +
2045 + #define ZSPAGE_MAGIC 0x58
2046 +@@ -265,6 +266,10 @@ struct zs_pool {
2047 + #ifdef CONFIG_COMPACTION
2048 + struct inode *inode;
2049 + struct work_struct free_work;
2050 ++ /* A wait queue for when migration races with async_free_zspage() */
2051 ++ wait_queue_head_t migration_wait;
2052 ++ atomic_long_t isolated_pages;
2053 ++ bool destroying;
2054 + #endif
2055 + };
2056 +
2057 +@@ -1939,6 +1944,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
2058 + zspage->isolated--;
2059 + }
2060 +
2061 ++static void putback_zspage_deferred(struct zs_pool *pool,
2062 ++ struct size_class *class,
2063 ++ struct zspage *zspage)
2064 ++{
2065 ++ enum fullness_group fg;
2066 ++
2067 ++ fg = putback_zspage(class, zspage);
2068 ++ if (fg == ZS_EMPTY)
2069 ++ schedule_work(&pool->free_work);
2070 ++
2071 ++}
2072 ++
2073 ++static inline void zs_pool_dec_isolated(struct zs_pool *pool)
2074 ++{
2075 ++ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
2076 ++ atomic_long_dec(&pool->isolated_pages);
2077 ++ /*
2078 ++ * There's no possibility of racing, since wait_for_isolated_drain()
2079 ++ * checks the isolated count under &class->lock after enqueuing
2080 ++ * on migration_wait.
2081 ++ */
2082 ++ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
2083 ++ wake_up_all(&pool->migration_wait);
2084 ++}
2085 ++
2086 + static void replace_sub_page(struct size_class *class, struct zspage *zspage,
2087 + struct page *newpage, struct page *oldpage)
2088 + {
2089 +@@ -2008,6 +2038,7 @@ bool zs_page_isolate(struct page *page, isolate_mode_t mode)
2090 + */
2091 + if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
2092 + get_zspage_mapping(zspage, &class_idx, &fullness);
2093 ++ atomic_long_inc(&pool->isolated_pages);
2094 + remove_zspage(class, zspage, fullness);
2095 + }
2096 +
2097 +@@ -2096,8 +2127,16 @@ int zs_page_migrate(struct address_space *mapping, struct page *newpage,
2098 + * Page migration is done so let's putback isolated zspage to
2099 + * the list if @page is final isolated subpage in the zspage.
2100 + */
2101 +- if (!is_zspage_isolated(zspage))
2102 +- putback_zspage(class, zspage);
2103 ++ if (!is_zspage_isolated(zspage)) {
2104 ++ /*
2105 ++ * We cannot race with zs_destroy_pool() here because we wait
2106 ++ * for isolation to hit zero before we start destroying.
2107 ++ * Also, we ensure that everyone can see pool->destroying before
2108 ++ * we start waiting.
2109 ++ */
2110 ++ putback_zspage_deferred(pool, class, zspage);
2111 ++ zs_pool_dec_isolated(pool);
2112 ++ }
2113 +
2114 + reset_page(page);
2115 + put_page(page);
2116 +@@ -2144,13 +2183,12 @@ void zs_page_putback(struct page *page)
2117 + spin_lock(&class->lock);
2118 + dec_zspage_isolation(zspage);
2119 + if (!is_zspage_isolated(zspage)) {
2120 +- fg = putback_zspage(class, zspage);
2121 + /*
2122 + * Due to page_lock, we cannot free zspage immediately
2123 + * so let's defer.
2124 + */
2125 +- if (fg == ZS_EMPTY)
2126 +- schedule_work(&pool->free_work);
2127 ++ putback_zspage_deferred(pool, class, zspage);
2128 ++ zs_pool_dec_isolated(pool);
2129 + }
2130 + spin_unlock(&class->lock);
2131 + }
2132 +@@ -2174,8 +2212,36 @@ static int zs_register_migration(struct zs_pool *pool)
2133 + return 0;
2134 + }
2135 +
2136 ++static bool pool_isolated_are_drained(struct zs_pool *pool)
2137 ++{
2138 ++ return atomic_long_read(&pool->isolated_pages) == 0;
2139 ++}
2140 ++
2141 ++/* Function for resolving migration */
2142 ++static void wait_for_isolated_drain(struct zs_pool *pool)
2143 ++{
2144 ++
2145 ++ /*
2146 ++ * We're in the process of destroying the pool, so there are no
2147 ++ * active allocations. zs_page_isolate() fails for completely free
2148 ++ * zspages, so we need only wait for the zs_pool's isolated
2149 ++ * count to hit zero.
2150 ++ */
2151 ++ wait_event(pool->migration_wait,
2152 ++ pool_isolated_are_drained(pool));
2153 ++}
2154 ++
2155 + static void zs_unregister_migration(struct zs_pool *pool)
2156 + {
2157 ++ pool->destroying = true;
2158 ++ /*
2159 ++ * We need a memory barrier here to ensure global visibility of
2160 ++ * pool->destroying. Thus pool->isolated pages will either be 0 in which
2161 ++ * case we don't care, or it will be > 0 and pool->destroying will
2162 ++ * ensure that we wake up once isolation hits 0.
2163 ++ */
2164 ++ smp_mb();
2165 ++ wait_for_isolated_drain(pool); /* This can block */
2166 + flush_work(&pool->free_work);
2167 + iput(pool->inode);
2168 + }
2169 +@@ -2422,6 +2488,10 @@ struct zs_pool *zs_create_pool(const char *name)
2170 + if (!pool->name)
2171 + goto err;
2172 +
2173 ++#ifdef CONFIG_COMPACTION
2174 ++ init_waitqueue_head(&pool->migration_wait);
2175 ++#endif
2176 ++
2177 + if (create_cache(pool))
2178 + goto err;
2179 +
2180 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2181 +index 142ccaae9c7b..4a47918b504f 100644
2182 +--- a/net/bridge/netfilter/ebtables.c
2183 ++++ b/net/bridge/netfilter/ebtables.c
2184 +@@ -2288,8 +2288,10 @@ static int compat_do_replace(struct net *net, void __user *user,
2185 + state.buf_kern_len = size64;
2186 +
2187 + ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2188 +- if (WARN_ON(ret < 0))
2189 ++ if (WARN_ON(ret < 0)) {
2190 ++ vfree(entries_tmp);
2191 + goto out_unlock;
2192 ++ }
2193 +
2194 + vfree(entries_tmp);
2195 + tmp.entries_size = size64;
2196 +diff --git a/net/core/stream.c b/net/core/stream.c
2197 +index 1086c8b280a8..6e41b20bf9f8 100644
2198 +--- a/net/core/stream.c
2199 ++++ b/net/core/stream.c
2200 +@@ -118,7 +118,6 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
2201 + int err = 0;
2202 + long vm_wait = 0;
2203 + long current_timeo = *timeo_p;
2204 +- bool noblock = (*timeo_p ? false : true);
2205 + DEFINE_WAIT(wait);
2206 +
2207 + if (sk_stream_memory_free(sk))
2208 +@@ -131,11 +130,8 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
2209 +
2210 + if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
2211 + goto do_error;
2212 +- if (!*timeo_p) {
2213 +- if (noblock)
2214 +- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2215 +- goto do_nonblock;
2216 +- }
2217 ++ if (!*timeo_p)
2218 ++ goto do_eagain;
2219 + if (signal_pending(current))
2220 + goto do_interrupted;
2221 + sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
2222 +@@ -167,7 +163,13 @@ out:
2223 + do_error:
2224 + err = -EPIPE;
2225 + goto out;
2226 +-do_nonblock:
2227 ++do_eagain:
2228 ++ /* Make sure that whenever EAGAIN is returned, EPOLLOUT event can
2229 ++ * be generated later.
2230 ++ * When TCP receives ACK packets that make room, tcp_check_space()
2231 ++ * only calls tcp_new_space() if SOCK_NOSPACE is set.
2232 ++ */
2233 ++ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
2234 + err = -EAGAIN;
2235 + goto out;
2236 + do_interrupted:
2237 +diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
2238 +index 954315e1661d..3b2c4692d966 100644
2239 +--- a/net/mac80211/cfg.c
2240 ++++ b/net/mac80211/cfg.c
2241 +@@ -1418,6 +1418,11 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2242 + if (is_multicast_ether_addr(mac))
2243 + return -EINVAL;
2244 +
2245 ++ if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER) &&
2246 ++ sdata->vif.type == NL80211_IFTYPE_STATION &&
2247 ++ !sdata->u.mgd.associated)
2248 ++ return -EINVAL;
2249 ++
2250 + sta = sta_info_alloc(sdata, mac, GFP_KERNEL);
2251 + if (!sta)
2252 + return -ENOMEM;
2253 +@@ -1425,10 +1430,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
2254 + if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
2255 + sta->sta.tdls = true;
2256 +
2257 +- if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
2258 +- !sdata->u.mgd.associated)
2259 +- return -EINVAL;
2260 +-
2261 + err = sta_apply_parameters(local, sta, params);
2262 + if (err) {
2263 + sta_info_free(local, sta);
2264 +diff --git a/net/wireless/reg.c b/net/wireless/reg.c
2265 +index 7c19d0d2549b..d1378340d590 100644
2266 +--- a/net/wireless/reg.c
2267 ++++ b/net/wireless/reg.c
2268 +@@ -2165,7 +2165,7 @@ static void reg_process_pending_hints(void)
2269 +
2270 + /* When last_request->processed becomes true this will be rescheduled */
2271 + if (lr && !lr->processed) {
2272 +- reg_process_hint(lr);
2273 ++ pr_debug("Pending regulatory request, waiting for it to be processed...\n");
2274 + return;
2275 + }
2276 +
2277 +diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
2278 +index 130e22742137..eee4ea17a8f5 100644
2279 +--- a/sound/core/seq/seq_clientmgr.c
2280 ++++ b/sound/core/seq/seq_clientmgr.c
2281 +@@ -1822,8 +1822,7 @@ static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
2282 + if (cptr->type == USER_CLIENT) {
2283 + info->input_pool = cptr->data.user.fifo_pool_size;
2284 + info->input_free = info->input_pool;
2285 +- if (cptr->data.user.fifo)
2286 +- info->input_free = snd_seq_unused_cells(cptr->data.user.fifo->pool);
2287 ++ info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
2288 + } else {
2289 + info->input_pool = 0;
2290 + info->input_free = 0;
2291 +diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
2292 +index 9acbed1ac982..d9f5428ee995 100644
2293 +--- a/sound/core/seq/seq_fifo.c
2294 ++++ b/sound/core/seq/seq_fifo.c
2295 +@@ -278,3 +278,20 @@ int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
2296 +
2297 + return 0;
2298 + }
2299 ++
2300 ++/* get the number of unused cells safely */
2301 ++int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f)
2302 ++{
2303 ++ unsigned long flags;
2304 ++ int cells;
2305 ++
2306 ++ if (!f)
2307 ++ return 0;
2308 ++
2309 ++ snd_use_lock_use(&f->use_lock);
2310 ++ spin_lock_irqsave(&f->lock, flags);
2311 ++ cells = snd_seq_unused_cells(f->pool);
2312 ++ spin_unlock_irqrestore(&f->lock, flags);
2313 ++ snd_use_lock_free(&f->use_lock);
2314 ++ return cells;
2315 ++}
2316 +diff --git a/sound/core/seq/seq_fifo.h b/sound/core/seq/seq_fifo.h
2317 +index 062c446e7867..5d38a0d7f0cd 100644
2318 +--- a/sound/core/seq/seq_fifo.h
2319 ++++ b/sound/core/seq/seq_fifo.h
2320 +@@ -68,5 +68,7 @@ int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file, poll_table
2321 + /* resize pool in fifo */
2322 + int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize);
2323 +
2324 ++/* get the number of unused cells safely */
2325 ++int snd_seq_fifo_unused_cells(struct snd_seq_fifo *f);
2326 +
2327 + #endif
2328 +diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
2329 +index 5a0b17ebfc02..624c209c9498 100644
2330 +--- a/sound/soc/davinci/davinci-mcasp.c
2331 ++++ b/sound/soc/davinci/davinci-mcasp.c
2332 +@@ -1158,6 +1158,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
2333 + return ret;
2334 + }
2335 +
2336 ++static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
2337 ++ struct snd_pcm_hw_rule *rule)
2338 ++{
2339 ++ struct davinci_mcasp_ruledata *rd = rule->private;
2340 ++ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
2341 ++ struct snd_mask nfmt;
2342 ++ int i, slot_width;
2343 ++
2344 ++ snd_mask_none(&nfmt);
2345 ++ slot_width = rd->mcasp->slot_width;
2346 ++
2347 ++ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
2348 ++ if (snd_mask_test(fmt, i)) {
2349 ++ if (snd_pcm_format_width(i) <= slot_width) {
2350 ++ snd_mask_set(&nfmt, i);
2351 ++ }
2352 ++ }
2353 ++ }
2354 ++
2355 ++ return snd_mask_refine(fmt, &nfmt);
2356 ++}
2357 ++
2358 + static const unsigned int davinci_mcasp_dai_rates[] = {
2359 + 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
2360 + 88200, 96000, 176400, 192000,
2361 +@@ -1251,7 +1273,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2362 + struct davinci_mcasp_ruledata *ruledata =
2363 + &mcasp->ruledata[substream->stream];
2364 + u32 max_channels = 0;
2365 +- int i, dir;
2366 ++ int i, dir, ret;
2367 + int tdm_slots = mcasp->tdm_slots;
2368 +
2369 + /* Do not allow more then one stream per direction */
2370 +@@ -1280,6 +1302,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2371 + max_channels++;
2372 + }
2373 + ruledata->serializers = max_channels;
2374 ++ ruledata->mcasp = mcasp;
2375 + max_channels *= tdm_slots;
2376 + /*
2377 + * If the already active stream has less channels than the calculated
2378 +@@ -1305,20 +1328,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
2379 + 0, SNDRV_PCM_HW_PARAM_CHANNELS,
2380 + &mcasp->chconstr[substream->stream]);
2381 +
2382 +- if (mcasp->slot_width)
2383 +- snd_pcm_hw_constraint_minmax(substream->runtime,
2384 +- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
2385 +- 8, mcasp->slot_width);
2386 ++ if (mcasp->slot_width) {
2387 ++ /* Only allow formats require <= slot_width bits on the bus */
2388 ++ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
2389 ++ SNDRV_PCM_HW_PARAM_FORMAT,
2390 ++ davinci_mcasp_hw_rule_slot_width,
2391 ++ ruledata,
2392 ++ SNDRV_PCM_HW_PARAM_FORMAT, -1);
2393 ++ if (ret)
2394 ++ return ret;
2395 ++ }
2396 +
2397 + /*
2398 + * If we rely on implicit BCLK divider setting we should
2399 + * set constraints based on what we can provide.
2400 + */
2401 + if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
2402 +- int ret;
2403 +-
2404 +- ruledata->mcasp = mcasp;
2405 +-
2406 + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
2407 + SNDRV_PCM_HW_PARAM_RATE,
2408 + davinci_mcasp_hw_rule_rate,
2409 +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2410 +index ab647f1fe11b..08bfc91c686f 100644
2411 +--- a/sound/soc/soc-dapm.c
2412 ++++ b/sound/soc/soc-dapm.c
2413 +@@ -1104,8 +1104,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
2414 + list_add_tail(&widget->work_list, list);
2415 +
2416 + if (custom_stop_condition && custom_stop_condition(widget, dir)) {
2417 +- widget->endpoints[dir] = 1;
2418 +- return widget->endpoints[dir];
2419 ++ list = NULL;
2420 ++ custom_stop_condition = NULL;
2421 + }
2422 +
2423 + if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
2424 +@@ -1142,8 +1142,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
2425 + *
2426 + * Optionally, can be supplied with a function acting as a stopping condition.
2427 + * This function takes the dapm widget currently being examined and the walk
2428 +- * direction as an arguments, it should return true if the walk should be
2429 +- * stopped and false otherwise.
2430 ++ * direction as an arguments, it should return true if widgets from that point
2431 ++ * in the graph onwards should not be added to the widget list.
2432 + */
2433 + static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
2434 + struct list_head *list,
2435 +diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
2436 +index a9f99a6c3909..74b399372e0b 100644
2437 +--- a/sound/usb/line6/pcm.c
2438 ++++ b/sound/usb/line6/pcm.c
2439 +@@ -552,6 +552,15 @@ int line6_init_pcm(struct usb_line6 *line6,
2440 + line6pcm->volume_monitor = 255;
2441 + line6pcm->line6 = line6;
2442 +
2443 ++ spin_lock_init(&line6pcm->out.lock);
2444 ++ spin_lock_init(&line6pcm->in.lock);
2445 ++ line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
2446 ++
2447 ++ line6->line6pcm = line6pcm;
2448 ++
2449 ++ pcm->private_data = line6pcm;
2450 ++ pcm->private_free = line6_cleanup_pcm;
2451 ++
2452 + line6pcm->max_packet_size_in =
2453 + usb_maxpacket(line6->usbdev,
2454 + usb_rcvisocpipe(line6->usbdev, ep_read), 0);
2455 +@@ -564,15 +573,6 @@ int line6_init_pcm(struct usb_line6 *line6,
2456 + return -EINVAL;
2457 + }
2458 +
2459 +- spin_lock_init(&line6pcm->out.lock);
2460 +- spin_lock_init(&line6pcm->in.lock);
2461 +- line6pcm->impulse_period = LINE6_IMPULSE_DEFAULT_PERIOD;
2462 +-
2463 +- line6->line6pcm = line6pcm;
2464 +-
2465 +- pcm->private_data = line6pcm;
2466 +- pcm->private_free = line6_cleanup_pcm;
2467 +-
2468 + err = line6_create_audio_out_urbs(line6pcm);
2469 + if (err < 0)
2470 + return err;
2471 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2472 +index 248a4bd82397..a02443717625 100644
2473 +--- a/sound/usb/mixer.c
2474 ++++ b/sound/usb/mixer.c
2475 +@@ -82,6 +82,7 @@ struct mixer_build {
2476 + unsigned char *buffer;
2477 + unsigned int buflen;
2478 + DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
2479 ++ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
2480 + struct usb_audio_term oterm;
2481 + const struct usbmix_name_map *map;
2482 + const struct usbmix_selector_map *selector_map;
2483 +@@ -710,15 +711,24 @@ static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm
2484 + * parse the source unit recursively until it reaches to a terminal
2485 + * or a branched unit.
2486 + */
2487 +-static int check_input_term(struct mixer_build *state, int id,
2488 ++static int __check_input_term(struct mixer_build *state, int id,
2489 + struct usb_audio_term *term)
2490 + {
2491 + int err;
2492 + void *p1;
2493 ++ unsigned char *hdr;
2494 +
2495 + memset(term, 0, sizeof(*term));
2496 +- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
2497 +- unsigned char *hdr = p1;
2498 ++ for (;;) {
2499 ++ /* a loop in the terminal chain? */
2500 ++ if (test_and_set_bit(id, state->termbitmap))
2501 ++ return -EINVAL;
2502 ++
2503 ++ p1 = find_audio_control_unit(state, id);
2504 ++ if (!p1)
2505 ++ break;
2506 ++
2507 ++ hdr = p1;
2508 + term->id = id;
2509 + switch (hdr[2]) {
2510 + case UAC_INPUT_TERMINAL:
2511 +@@ -733,7 +743,7 @@ static int check_input_term(struct mixer_build *state, int id,
2512 +
2513 + /* call recursively to verify that the
2514 + * referenced clock entity is valid */
2515 +- err = check_input_term(state, d->bCSourceID, term);
2516 ++ err = __check_input_term(state, d->bCSourceID, term);
2517 + if (err < 0)
2518 + return err;
2519 +
2520 +@@ -765,7 +775,7 @@ static int check_input_term(struct mixer_build *state, int id,
2521 + case UAC2_CLOCK_SELECTOR: {
2522 + struct uac_selector_unit_descriptor *d = p1;
2523 + /* call recursively to retrieve the channel info */
2524 +- err = check_input_term(state, d->baSourceID[0], term);
2525 ++ err = __check_input_term(state, d->baSourceID[0], term);
2526 + if (err < 0)
2527 + return err;
2528 + term->type = d->bDescriptorSubtype << 16; /* virtual type */
2529 +@@ -812,6 +822,15 @@ static int check_input_term(struct mixer_build *state, int id,
2530 + return -ENODEV;
2531 + }
2532 +
2533 ++
2534 ++static int check_input_term(struct mixer_build *state, int id,
2535 ++ struct usb_audio_term *term)
2536 ++{
2537 ++ memset(term, 0, sizeof(*term));
2538 ++ memset(state->termbitmap, 0, sizeof(state->termbitmap));
2539 ++ return __check_input_term(state, id, term);
2540 ++}
2541 ++
2542 + /*
2543 + * Feature Unit
2544 + */
2545 +@@ -1694,6 +1713,7 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
2546 + int pin, ich, err;
2547 +
2548 + if (desc->bLength < 11 || !(input_pins = desc->bNrInPins) ||
2549 ++ desc->bLength < sizeof(*desc) + desc->bNrInPins ||
2550 + !(num_outs = uac_mixer_unit_bNrChannels(desc))) {
2551 + usb_audio_err(state->chip,
2552 + "invalid MIXER UNIT descriptor %d\n",
2553 +diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
2554 +index 177480066816..fffc7c418459 100644
2555 +--- a/tools/hv/hv_kvp_daemon.c
2556 ++++ b/tools/hv/hv_kvp_daemon.c
2557 +@@ -1379,6 +1379,8 @@ int main(int argc, char *argv[])
2558 + daemonize = 0;
2559 + break;
2560 + case 'h':
2561 ++ print_usage(argv);
2562 ++ exit(0);
2563 + default:
2564 + print_usage(argv);
2565 + exit(EXIT_FAILURE);
2566 +diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
2567 +index e0829809c897..bdc1891e0a9a 100644
2568 +--- a/tools/hv/hv_vss_daemon.c
2569 ++++ b/tools/hv/hv_vss_daemon.c
2570 +@@ -164,6 +164,8 @@ int main(int argc, char *argv[])
2571 + daemonize = 0;
2572 + break;
2573 + case 'h':
2574 ++ print_usage(argv);
2575 ++ exit(0);
2576 + default:
2577 + print_usage(argv);
2578 + exit(EXIT_FAILURE);
2579 +diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
2580 +index e58be7eeced8..7b364f2926d4 100644
2581 +--- a/tools/perf/bench/numa.c
2582 ++++ b/tools/perf/bench/numa.c
2583 +@@ -373,8 +373,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
2584 +
2585 + /* Allocate and initialize all memory on CPU#0: */
2586 + if (init_cpu0) {
2587 +- orig_mask = bind_to_node(0);
2588 +- bind_to_memnode(0);
2589 ++ int node = numa_node_of_cpu(0);
2590 ++
2591 ++ orig_mask = bind_to_node(node);
2592 ++ bind_to_memnode(node);
2593 + }
2594 +
2595 + bytes = bytes0 + HPSIZE;
2596 +diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
2597 +index 41611d7f9873..016d12af6877 100644
2598 +--- a/tools/perf/pmu-events/jevents.c
2599 ++++ b/tools/perf/pmu-events/jevents.c
2600 +@@ -315,6 +315,7 @@ static struct fixed {
2601 + { "inst_retired.any_p", "event=0xc0" },
2602 + { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
2603 + { "cpu_clk_unhalted.thread", "event=0x3c" },
2604 ++ { "cpu_clk_unhalted.core", "event=0x3c" },
2605 + { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
2606 + { NULL, NULL},
2607 + };
2608 +diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
2609 +index 9134a0c3e99d..aa9276bfe3e9 100644
2610 +--- a/tools/perf/tests/parse-events.c
2611 ++++ b/tools/perf/tests/parse-events.c
2612 +@@ -12,32 +12,6 @@
2613 + #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
2614 + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
2615 +
2616 +-#if defined(__s390x__)
2617 +-/* Return true if kvm module is available and loaded. Test this
2618 +- * and retun success when trace point kvm_s390_create_vm
2619 +- * exists. Otherwise this test always fails.
2620 +- */
2621 +-static bool kvm_s390_create_vm_valid(void)
2622 +-{
2623 +- char *eventfile;
2624 +- bool rc = false;
2625 +-
2626 +- eventfile = get_events_file("kvm-s390");
2627 +-
2628 +- if (eventfile) {
2629 +- DIR *mydir = opendir(eventfile);
2630 +-
2631 +- if (mydir) {
2632 +- rc = true;
2633 +- closedir(mydir);
2634 +- }
2635 +- put_events_file(eventfile);
2636 +- }
2637 +-
2638 +- return rc;
2639 +-}
2640 +-#endif
2641 +-
2642 + static int test__checkevent_tracepoint(struct perf_evlist *evlist)
2643 + {
2644 + struct perf_evsel *evsel = perf_evlist__first(evlist);
2645 +@@ -1619,7 +1593,6 @@ static struct evlist_test test__events[] = {
2646 + {
2647 + .name = "kvm-s390:kvm_s390_create_vm",
2648 + .check = test__checkevent_tracepoint,
2649 +- .valid = kvm_s390_create_vm_valid,
2650 + .id = 100,
2651 + },
2652 + #endif
2653 +diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
2654 +new file mode 100644
2655 +index 000000000000..63ed533f73d6
2656 +--- /dev/null
2657 ++++ b/tools/testing/selftests/kvm/config
2658 +@@ -0,0 +1,3 @@
2659 ++CONFIG_KVM=y
2660 ++CONFIG_KVM_INTEL=y
2661 ++CONFIG_KVM_AMD=y
2662 +diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
2663 +index 85814d1bad11..87742c9803a7 100644
2664 +--- a/virt/kvm/arm/vgic/vgic-mmio.c
2665 ++++ b/virt/kvm/arm/vgic/vgic-mmio.c
2666 +@@ -120,6 +120,12 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
2667 + return value;
2668 + }
2669 +
2670 ++static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq)
2671 ++{
2672 ++ return (vgic_irq_is_sgi(irq->intid) &&
2673 ++ vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2);
2674 ++}
2675 ++
2676 + void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
2677 + gpa_t addr, unsigned int len,
2678 + unsigned long val)
2679 +@@ -130,6 +136,12 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu,
2680 + for_each_set_bit(i, &val, len * 8) {
2681 + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
2682 +
2683 ++ /* GICD_ISPENDR0 SGI bits are WI */
2684 ++ if (is_vgic_v2_sgi(vcpu, irq)) {
2685 ++ vgic_put_irq(vcpu->kvm, irq);
2686 ++ continue;
2687 ++ }
2688 ++
2689 + spin_lock(&irq->irq_lock);
2690 + irq->pending = true;
2691 + if (irq->config == VGIC_CONFIG_LEVEL)
2692 +@@ -150,6 +162,12 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu,
2693 + for_each_set_bit(i, &val, len * 8) {
2694 + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
2695 +
2696 ++ /* GICD_ICPENDR0 SGI bits are WI */
2697 ++ if (is_vgic_v2_sgi(vcpu, irq)) {
2698 ++ vgic_put_irq(vcpu->kvm, irq);
2699 ++ continue;
2700 ++ }
2701 ++
2702 + spin_lock(&irq->irq_lock);
2703 +
2704 + if (irq->config == VGIC_CONFIG_LEVEL) {
2705 +diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
2706 +index 1ab58f7b5d74..4c2919cc13ca 100644
2707 +--- a/virt/kvm/arm/vgic/vgic-v2.c
2708 ++++ b/virt/kvm/arm/vgic/vgic-v2.c
2709 +@@ -154,7 +154,10 @@ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
2710 + if (vgic_irq_is_sgi(irq->intid)) {
2711 + u32 src = ffs(irq->source);
2712 +
2713 +- BUG_ON(!src);
2714 ++ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
2715 ++ irq->intid))
2716 ++ return;
2717 ++
2718 + val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
2719 + irq->source &= ~(1 << (src - 1));
2720 + if (irq->source)
2721 +diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
2722 +index c7924718990e..267b1cf88a7f 100644
2723 +--- a/virt/kvm/arm/vgic/vgic-v3.c
2724 ++++ b/virt/kvm/arm/vgic/vgic-v3.c
2725 +@@ -137,7 +137,10 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
2726 + model == KVM_DEV_TYPE_ARM_VGIC_V2) {
2727 + u32 src = ffs(irq->source);
2728 +
2729 +- BUG_ON(!src);
2730 ++ if (WARN_RATELIMIT(!src, "No SGI source for INTID %d\n",
2731 ++ irq->intid))
2732 ++ return;
2733 ++
2734 + val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
2735 + irq->source &= ~(1 << (src - 1));
2736 + if (irq->source)
2737 +diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
2738 +index 6440b56ec90e..1934dc8a2ce0 100644
2739 +--- a/virt/kvm/arm/vgic/vgic.c
2740 ++++ b/virt/kvm/arm/vgic/vgic.c
2741 +@@ -196,6 +196,13 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b)
2742 + bool penda, pendb;
2743 + int ret;
2744 +
2745 ++ /*
2746 ++ * list_sort may call this function with the same element when
2747 ++ * the list is fairly long.
2748 ++ */
2749 ++ if (unlikely(irqa == irqb))
2750 ++ return 0;
2751 ++
2752 + spin_lock(&irqa->irq_lock);
2753 + spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING);
2754 +