Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Tue, 02 Jan 2018 20:12:52
Message-Id: 1514923958.c01c62de25217ce34aee2b44894bf32e6d7d2bb2.mpagano@gentoo
1 commit: c01c62de25217ce34aee2b44894bf32e6d7d2bb2
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jan 2 20:12:38 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jan 2 20:12:38 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=c01c62de
7
8 Linux patch 4.4.109
9
10 0000_README | 4 +
11 1108_linux-4.4.109.patch | 2290 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2294 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 832fff6..3be106c 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -475,6 +475,10 @@ Patch: 1107_linux-4.4.108.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.108
21
22 +Patch: 1108_linux-4.4.109.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.109
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1108_linux-4.4.109.patch b/1108_linux-4.4.109.patch
31 new file mode 100644
32 index 0000000..c82ff1c
33 --- /dev/null
34 +++ b/1108_linux-4.4.109.patch
35 @@ -0,0 +1,2290 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index 5d593ecadb90..b4a83a490212 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -2555,6 +2555,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 + nopat [X86] Disable PAT (page attribute table extension of
42 + pagetables) support.
43 +
44 ++ nopcid [X86-64] Disable the PCID cpu feature.
45 ++
46 + norandmaps Don't use address space randomization. Equivalent to
47 + echo 0 > /proc/sys/kernel/randomize_va_space
48 +
49 +diff --git a/Makefile b/Makefile
50 +index 99f9834c4ba6..5d67056e24dd 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,6 +1,6 @@
54 + VERSION = 4
55 + PATCHLEVEL = 4
56 +-SUBLEVEL = 108
57 ++SUBLEVEL = 109
58 + EXTRAVERSION =
59 + NAME = Blurry Fish Butt
60 +
61 +@@ -782,6 +782,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
62 + # disable invalid "can't wrap" optimizations for signed / pointers
63 + KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
64 +
65 ++# Make sure -fstack-check isn't enabled (like gentoo apparently did)
66 ++KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
67 ++
68 + # conserve stack if available
69 + KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
70 +
71 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
72 +index d1e65ce545b3..b2ab164a8094 100644
73 +--- a/arch/powerpc/perf/core-book3s.c
74 ++++ b/arch/powerpc/perf/core-book3s.c
75 +@@ -401,8 +401,12 @@ static __u64 power_pmu_bhrb_to(u64 addr)
76 + int ret;
77 + __u64 target;
78 +
79 +- if (is_kernel_addr(addr))
80 +- return branch_target((unsigned int *)addr);
81 ++ if (is_kernel_addr(addr)) {
82 ++ if (probe_kernel_read(&instr, (void *)addr, sizeof(instr)))
83 ++ return 0;
84 ++
85 ++ return branch_target(&instr);
86 ++ }
87 +
88 + /* Userspace: need copy instruction here then translate it */
89 + pagefault_disable();
90 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
91 +index 7e40905f6d4c..39d2dc66faa5 100644
92 +--- a/arch/x86/Kconfig
93 ++++ b/arch/x86/Kconfig
94 +@@ -42,7 +42,7 @@ config X86
95 + select ARCH_USE_CMPXCHG_LOCKREF if X86_64
96 + select ARCH_USE_QUEUED_RWLOCKS
97 + select ARCH_USE_QUEUED_SPINLOCKS
98 +- select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
99 ++ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
100 + select ARCH_WANTS_DYNAMIC_TASK_STRUCT
101 + select ARCH_WANT_FRAME_POINTERS
102 + select ARCH_WANT_IPC_PARSE_VERSION if X86_32
103 +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
104 +index f226df064660..8b17c2ad1048 100644
105 +--- a/arch/x86/include/asm/disabled-features.h
106 ++++ b/arch/x86/include/asm/disabled-features.h
107 +@@ -21,11 +21,13 @@
108 + # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
109 + # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
110 + # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
111 ++# define DISABLE_PCID 0
112 + #else
113 + # define DISABLE_VME 0
114 + # define DISABLE_K6_MTRR 0
115 + # define DISABLE_CYRIX_ARR 0
116 + # define DISABLE_CENTAUR_MCR 0
117 ++# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
118 + #endif /* CONFIG_X86_64 */
119 +
120 + /*
121 +@@ -35,7 +37,7 @@
122 + #define DISABLED_MASK1 0
123 + #define DISABLED_MASK2 0
124 + #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
125 +-#define DISABLED_MASK4 0
126 ++#define DISABLED_MASK4 (DISABLE_PCID)
127 + #define DISABLED_MASK5 0
128 + #define DISABLED_MASK6 0
129 + #define DISABLED_MASK7 0
130 +diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
131 +index 59405a248fc2..9b76cd331990 100644
132 +--- a/arch/x86/include/asm/hardirq.h
133 ++++ b/arch/x86/include/asm/hardirq.h
134 +@@ -22,8 +22,8 @@ typedef struct {
135 + #ifdef CONFIG_SMP
136 + unsigned int irq_resched_count;
137 + unsigned int irq_call_count;
138 +- unsigned int irq_tlb_count;
139 + #endif
140 ++ unsigned int irq_tlb_count;
141 + #ifdef CONFIG_X86_THERMAL_VECTOR
142 + unsigned int irq_thermal_count;
143 + #endif
144 +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
145 +index 55234d5e7160..7680b76adafc 100644
146 +--- a/arch/x86/include/asm/mmu.h
147 ++++ b/arch/x86/include/asm/mmu.h
148 +@@ -24,12 +24,6 @@ typedef struct {
149 + atomic_t perf_rdpmc_allowed; /* nonzero if rdpmc is allowed */
150 + } mm_context_t;
151 +
152 +-#ifdef CONFIG_SMP
153 + void leave_mm(int cpu);
154 +-#else
155 +-static inline void leave_mm(int cpu)
156 +-{
157 +-}
158 +-#endif
159 +
160 + #endif /* _ASM_X86_MMU_H */
161 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
162 +index 44fc93987869..9bfc5fd77015 100644
163 +--- a/arch/x86/include/asm/mmu_context.h
164 ++++ b/arch/x86/include/asm/mmu_context.h
165 +@@ -98,10 +98,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
166 +
167 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
168 + {
169 +-#ifdef CONFIG_SMP
170 + if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
171 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
172 +-#endif
173 + }
174 +
175 + extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
176 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
177 +index 4dc534175b5e..9fc5968da820 100644
178 +--- a/arch/x86/include/asm/tlbflush.h
179 ++++ b/arch/x86/include/asm/tlbflush.h
180 +@@ -6,6 +6,7 @@
181 +
182 + #include <asm/processor.h>
183 + #include <asm/special_insns.h>
184 ++#include <asm/smp.h>
185 +
186 + static inline void __invpcid(unsigned long pcid, unsigned long addr,
187 + unsigned long type)
188 +@@ -64,10 +65,8 @@ static inline void invpcid_flush_all_nonglobals(void)
189 + #endif
190 +
191 + struct tlb_state {
192 +-#ifdef CONFIG_SMP
193 + struct mm_struct *active_mm;
194 + int state;
195 +-#endif
196 +
197 + /*
198 + * Access to this CR4 shadow and to H/W CR4 is protected by
199 +@@ -191,6 +190,14 @@ static inline void __flush_tlb_all(void)
200 + __flush_tlb_global();
201 + else
202 + __flush_tlb();
203 ++
204 ++ /*
205 ++ * Note: if we somehow had PCID but not PGE, then this wouldn't work --
206 ++ * we'd end up flushing kernel translations for the current ASID but
207 ++ * we might fail to flush kernel translations for other cached ASIDs.
208 ++ *
209 ++ * To avoid this issue, we force PCID off if PGE is off.
210 ++ */
211 + }
212 +
213 + static inline void __flush_tlb_one(unsigned long addr)
214 +@@ -204,7 +211,6 @@ static inline void __flush_tlb_one(unsigned long addr)
215 + /*
216 + * TLB flushing:
217 + *
218 +- * - flush_tlb() flushes the current mm struct TLBs
219 + * - flush_tlb_all() flushes all processes TLBs
220 + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
221 + * - flush_tlb_page(vma, vmaddr) flushes one page
222 +@@ -216,84 +222,6 @@ static inline void __flush_tlb_one(unsigned long addr)
223 + * and page-granular flushes are available only on i486 and up.
224 + */
225 +
226 +-#ifndef CONFIG_SMP
227 +-
228 +-/* "_up" is for UniProcessor.
229 +- *
230 +- * This is a helper for other header functions. *Not* intended to be called
231 +- * directly. All global TLB flushes need to either call this, or to bump the
232 +- * vm statistics themselves.
233 +- */
234 +-static inline void __flush_tlb_up(void)
235 +-{
236 +- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
237 +- __flush_tlb();
238 +-}
239 +-
240 +-static inline void flush_tlb_all(void)
241 +-{
242 +- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
243 +- __flush_tlb_all();
244 +-}
245 +-
246 +-static inline void flush_tlb(void)
247 +-{
248 +- __flush_tlb_up();
249 +-}
250 +-
251 +-static inline void local_flush_tlb(void)
252 +-{
253 +- __flush_tlb_up();
254 +-}
255 +-
256 +-static inline void flush_tlb_mm(struct mm_struct *mm)
257 +-{
258 +- if (mm == current->active_mm)
259 +- __flush_tlb_up();
260 +-}
261 +-
262 +-static inline void flush_tlb_page(struct vm_area_struct *vma,
263 +- unsigned long addr)
264 +-{
265 +- if (vma->vm_mm == current->active_mm)
266 +- __flush_tlb_one(addr);
267 +-}
268 +-
269 +-static inline void flush_tlb_range(struct vm_area_struct *vma,
270 +- unsigned long start, unsigned long end)
271 +-{
272 +- if (vma->vm_mm == current->active_mm)
273 +- __flush_tlb_up();
274 +-}
275 +-
276 +-static inline void flush_tlb_mm_range(struct mm_struct *mm,
277 +- unsigned long start, unsigned long end, unsigned long vmflag)
278 +-{
279 +- if (mm == current->active_mm)
280 +- __flush_tlb_up();
281 +-}
282 +-
283 +-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
284 +- struct mm_struct *mm,
285 +- unsigned long start,
286 +- unsigned long end)
287 +-{
288 +-}
289 +-
290 +-static inline void reset_lazy_tlbstate(void)
291 +-{
292 +-}
293 +-
294 +-static inline void flush_tlb_kernel_range(unsigned long start,
295 +- unsigned long end)
296 +-{
297 +- flush_tlb_all();
298 +-}
299 +-
300 +-#else /* SMP */
301 +-
302 +-#include <asm/smp.h>
303 +-
304 + #define local_flush_tlb() __flush_tlb()
305 +
306 + #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
307 +@@ -302,13 +230,14 @@ static inline void flush_tlb_kernel_range(unsigned long start,
308 + flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
309 +
310 + extern void flush_tlb_all(void);
311 +-extern void flush_tlb_current_task(void);
312 +-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
313 + extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
314 + unsigned long end, unsigned long vmflag);
315 + extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
316 +
317 +-#define flush_tlb() flush_tlb_current_task()
318 ++static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
319 ++{
320 ++ flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
321 ++}
322 +
323 + void native_flush_tlb_others(const struct cpumask *cpumask,
324 + struct mm_struct *mm,
325 +@@ -323,8 +252,6 @@ static inline void reset_lazy_tlbstate(void)
326 + this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
327 + }
328 +
329 +-#endif /* SMP */
330 +-
331 + #ifndef CONFIG_PARAVIRT
332 + #define flush_tlb_others(mask, mm, start, end) \
333 + native_flush_tlb_others(mask, mm, start, end)
334 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
335 +index bd17db15a2c1..0b6124315441 100644
336 +--- a/arch/x86/kernel/cpu/bugs.c
337 ++++ b/arch/x86/kernel/cpu/bugs.c
338 +@@ -19,6 +19,14 @@
339 +
340 + void __init check_bugs(void)
341 + {
342 ++#ifdef CONFIG_X86_32
343 ++ /*
344 ++ * Regardless of whether PCID is enumerated, the SDM says
345 ++ * that it can't be enabled in 32-bit mode.
346 ++ */
347 ++ setup_clear_cpu_cap(X86_FEATURE_PCID);
348 ++#endif
349 ++
350 + identify_boot_cpu();
351 + #ifndef CONFIG_SMP
352 + pr_info("CPU: ");
353 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
354 +index c84b62956e8d..aa1e7246b06b 100644
355 +--- a/arch/x86/kernel/cpu/common.c
356 ++++ b/arch/x86/kernel/cpu/common.c
357 +@@ -162,6 +162,24 @@ static int __init x86_mpx_setup(char *s)
358 + }
359 + __setup("nompx", x86_mpx_setup);
360 +
361 ++#ifdef CONFIG_X86_64
362 ++static int __init x86_pcid_setup(char *s)
363 ++{
364 ++ /* require an exact match without trailing characters */
365 ++ if (strlen(s))
366 ++ return 0;
367 ++
368 ++ /* do not emit a message if the feature is not present */
369 ++ if (!boot_cpu_has(X86_FEATURE_PCID))
370 ++ return 1;
371 ++
372 ++ setup_clear_cpu_cap(X86_FEATURE_PCID);
373 ++ pr_info("nopcid: PCID feature disabled\n");
374 ++ return 1;
375 ++}
376 ++__setup("nopcid", x86_pcid_setup);
377 ++#endif
378 ++
379 + static int __init x86_noinvpcid_setup(char *s)
380 + {
381 + /* noinvpcid doesn't accept parameters */
382 +@@ -303,6 +321,25 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
383 + }
384 + }
385 +
386 ++static void setup_pcid(struct cpuinfo_x86 *c)
387 ++{
388 ++ if (cpu_has(c, X86_FEATURE_PCID)) {
389 ++ if (cpu_has(c, X86_FEATURE_PGE)) {
390 ++ cr4_set_bits(X86_CR4_PCIDE);
391 ++ } else {
392 ++ /*
393 ++ * flush_tlb_all(), as currently implemented, won't
394 ++ * work if PCID is on but PGE is not. Since that
395 ++ * combination doesn't exist on real hardware, there's
396 ++ * no reason to try to fully support it, but it's
397 ++ * polite to avoid corrupting data if we're on
398 ++ * an improperly configured VM.
399 ++ */
400 ++ clear_cpu_cap(c, X86_FEATURE_PCID);
401 ++ }
402 ++ }
403 ++}
404 ++
405 + /*
406 + * Some CPU features depend on higher CPUID levels, which may not always
407 + * be available due to CPUID level capping or broken virtualization
408 +@@ -934,6 +971,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
409 + setup_smep(c);
410 + setup_smap(c);
411 +
412 ++ /* Set up PCID */
413 ++ setup_pcid(c);
414 ++
415 + /*
416 + * The vendor-specific functions might have changed features.
417 + * Now we do "generic changes."
418 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
419 +index f660d63f40fe..9a16932c7258 100644
420 +--- a/arch/x86/kernel/reboot.c
421 ++++ b/arch/x86/kernel/reboot.c
422 +@@ -93,6 +93,10 @@ void __noreturn machine_real_restart(unsigned int type)
423 + load_cr3(initial_page_table);
424 + #else
425 + write_cr3(real_mode_header->trampoline_pgd);
426 ++
427 ++ /* Exiting long mode will fail if CR4.PCIDE is set. */
428 ++ if (static_cpu_has(X86_FEATURE_PCID))
429 ++ cr4_clear_bits(X86_CR4_PCIDE);
430 + #endif
431 +
432 + /* Jump to the identity-mapped low memory code */
433 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
434 +index fbabe4fcc7fb..fe89f938e0f0 100644
435 +--- a/arch/x86/kernel/smpboot.c
436 ++++ b/arch/x86/kernel/smpboot.c
437 +@@ -104,25 +104,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
438 + spin_lock_irqsave(&rtc_lock, flags);
439 + CMOS_WRITE(0xa, 0xf);
440 + spin_unlock_irqrestore(&rtc_lock, flags);
441 +- local_flush_tlb();
442 +- pr_debug("1.\n");
443 + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
444 + start_eip >> 4;
445 +- pr_debug("2.\n");
446 + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
447 + start_eip & 0xf;
448 +- pr_debug("3.\n");
449 + }
450 +
451 + static inline void smpboot_restore_warm_reset_vector(void)
452 + {
453 + unsigned long flags;
454 +
455 +- /*
456 +- * Install writable page 0 entry to set BIOS data area.
457 +- */
458 +- local_flush_tlb();
459 +-
460 + /*
461 + * Paranoid: Set warm reset code and vector here back
462 + * to default values.
463 +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
464 +index 524619351961..510e80da7de4 100644
465 +--- a/arch/x86/kernel/vm86_32.c
466 ++++ b/arch/x86/kernel/vm86_32.c
467 +@@ -187,7 +187,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
468 + pte_unmap_unlock(pte, ptl);
469 + out:
470 + up_write(&mm->mmap_sem);
471 +- flush_tlb();
472 ++ flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
473 + }
474 +
475 +
476 +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
477 +index 684edebb4a0c..00045499f6c2 100644
478 +--- a/arch/x86/kvm/emulate.c
479 ++++ b/arch/x86/kvm/emulate.c
480 +@@ -2383,9 +2383,21 @@ static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, u64 smbase, int n)
481 + }
482 +
483 + static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
484 +- u64 cr0, u64 cr4)
485 ++ u64 cr0, u64 cr3, u64 cr4)
486 + {
487 + int bad;
488 ++ u64 pcid;
489 ++
490 ++ /* In order to later set CR4.PCIDE, CR3[11:0] must be zero. */
491 ++ pcid = 0;
492 ++ if (cr4 & X86_CR4_PCIDE) {
493 ++ pcid = cr3 & 0xfff;
494 ++ cr3 &= ~0xfff;
495 ++ }
496 ++
497 ++ bad = ctxt->ops->set_cr(ctxt, 3, cr3);
498 ++ if (bad)
499 ++ return X86EMUL_UNHANDLEABLE;
500 +
501 + /*
502 + * First enable PAE, long mode needs it before CR0.PG = 1 is set.
503 +@@ -2404,6 +2416,12 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
504 + bad = ctxt->ops->set_cr(ctxt, 4, cr4);
505 + if (bad)
506 + return X86EMUL_UNHANDLEABLE;
507 ++ if (pcid) {
508 ++ bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
509 ++ if (bad)
510 ++ return X86EMUL_UNHANDLEABLE;
511 ++ }
512 ++
513 + }
514 +
515 + return X86EMUL_CONTINUE;
516 +@@ -2414,11 +2432,11 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
517 + struct desc_struct desc;
518 + struct desc_ptr dt;
519 + u16 selector;
520 +- u32 val, cr0, cr4;
521 ++ u32 val, cr0, cr3, cr4;
522 + int i;
523 +
524 + cr0 = GET_SMSTATE(u32, smbase, 0x7ffc);
525 +- ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u32, smbase, 0x7ff8));
526 ++ cr3 = GET_SMSTATE(u32, smbase, 0x7ff8);
527 + ctxt->eflags = GET_SMSTATE(u32, smbase, 0x7ff4) | X86_EFLAGS_FIXED;
528 + ctxt->_eip = GET_SMSTATE(u32, smbase, 0x7ff0);
529 +
530 +@@ -2460,14 +2478,14 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt, u64 smbase)
531 +
532 + ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7ef8));
533 +
534 +- return rsm_enter_protected_mode(ctxt, cr0, cr4);
535 ++ return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
536 + }
537 +
538 + static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
539 + {
540 + struct desc_struct desc;
541 + struct desc_ptr dt;
542 +- u64 val, cr0, cr4;
543 ++ u64 val, cr0, cr3, cr4;
544 + u32 base3;
545 + u16 selector;
546 + int i, r;
547 +@@ -2484,7 +2502,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
548 + ctxt->ops->set_dr(ctxt, 7, (val & DR7_VOLATILE) | DR7_FIXED_1);
549 +
550 + cr0 = GET_SMSTATE(u64, smbase, 0x7f58);
551 +- ctxt->ops->set_cr(ctxt, 3, GET_SMSTATE(u64, smbase, 0x7f50));
552 ++ cr3 = GET_SMSTATE(u64, smbase, 0x7f50);
553 + cr4 = GET_SMSTATE(u64, smbase, 0x7f48);
554 + ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smbase, 0x7f00));
555 + val = GET_SMSTATE(u64, smbase, 0x7ed0);
556 +@@ -2512,7 +2530,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
557 + dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
558 + ctxt->ops->set_gdt(ctxt, &dt);
559 +
560 +- r = rsm_enter_protected_mode(ctxt, cr0, cr4);
561 ++ r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
562 + if (r != X86EMUL_CONTINUE)
563 + return r;
564 +
565 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
566 +index e5f44f33de89..796f1ec67469 100644
567 +--- a/arch/x86/kvm/x86.c
568 ++++ b/arch/x86/kvm/x86.c
569 +@@ -6941,7 +6941,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
570 + #endif
571 +
572 + kvm_rip_write(vcpu, regs->rip);
573 +- kvm_set_rflags(vcpu, regs->rflags);
574 ++ kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED);
575 +
576 + vcpu->arch.exception.pending = false;
577 +
578 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
579 +index 3aebbd6c6f5f..ed4b372860e4 100644
580 +--- a/arch/x86/mm/init.c
581 ++++ b/arch/x86/mm/init.c
582 +@@ -753,10 +753,8 @@ void __init zone_sizes_init(void)
583 + }
584 +
585 + DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
586 +-#ifdef CONFIG_SMP
587 + .active_mm = &init_mm,
588 + .state = 0,
589 +-#endif
590 + .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
591 + };
592 + EXPORT_SYMBOL_GPL(cpu_tlbstate);
593 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
594 +index 45ba87466e6a..7a4cdb632508 100644
595 +--- a/arch/x86/mm/tlb.c
596 ++++ b/arch/x86/mm/tlb.c
597 +@@ -15,7 +15,7 @@
598 + #include <linux/debugfs.h>
599 +
600 + /*
601 +- * Smarter SMP flushing macros.
602 ++ * TLB flushing, formerly SMP-only
603 + * c/o Linus Torvalds.
604 + *
605 + * These mean you can really definitely utterly forget about
606 +@@ -28,8 +28,6 @@
607 + * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
608 + */
609 +
610 +-#ifdef CONFIG_SMP
611 +-
612 + struct flush_tlb_info {
613 + struct mm_struct *flush_mm;
614 + unsigned long flush_start;
615 +@@ -59,8 +57,6 @@ void leave_mm(int cpu)
616 + }
617 + EXPORT_SYMBOL_GPL(leave_mm);
618 +
619 +-#endif /* CONFIG_SMP */
620 +-
621 + void switch_mm(struct mm_struct *prev, struct mm_struct *next,
622 + struct task_struct *tsk)
623 + {
624 +@@ -77,10 +73,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
625 + unsigned cpu = smp_processor_id();
626 +
627 + if (likely(prev != next)) {
628 +-#ifdef CONFIG_SMP
629 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
630 + this_cpu_write(cpu_tlbstate.active_mm, next);
631 +-#endif
632 + cpumask_set_cpu(cpu, mm_cpumask(next));
633 +
634 + /*
635 +@@ -137,9 +131,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
636 + if (unlikely(prev->context.ldt != next->context.ldt))
637 + load_mm_ldt(next);
638 + #endif
639 +- }
640 +-#ifdef CONFIG_SMP
641 +- else {
642 ++ } else {
643 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
644 + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
645 +
646 +@@ -166,11 +158,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
647 + load_mm_ldt(next);
648 + }
649 + }
650 +-#endif
651 + }
652 +
653 +-#ifdef CONFIG_SMP
654 +-
655 + /*
656 + * The flush IPI assumes that a thread switch happens in this order:
657 + * [cpu0: the cpu that switches]
658 +@@ -272,23 +261,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
659 + smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
660 + }
661 +
662 +-void flush_tlb_current_task(void)
663 +-{
664 +- struct mm_struct *mm = current->mm;
665 +-
666 +- preempt_disable();
667 +-
668 +- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
669 +-
670 +- /* This is an implicit full barrier that synchronizes with switch_mm. */
671 +- local_flush_tlb();
672 +-
673 +- trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
674 +- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
675 +- flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
676 +- preempt_enable();
677 +-}
678 +-
679 + /*
680 + * See Documentation/x86/tlb.txt for details. We choose 33
681 + * because it is large enough to cover the vast majority (at
682 +@@ -309,6 +281,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
683 + unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
684 +
685 + preempt_disable();
686 ++
687 ++ if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
688 ++ base_pages_to_flush = (end - start) >> PAGE_SHIFT;
689 ++ if (base_pages_to_flush > tlb_single_page_flush_ceiling)
690 ++ base_pages_to_flush = TLB_FLUSH_ALL;
691 ++
692 + if (current->active_mm != mm) {
693 + /* Synchronize with switch_mm. */
694 + smp_mb();
695 +@@ -325,15 +303,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
696 + goto out;
697 + }
698 +
699 +- if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
700 +- base_pages_to_flush = (end - start) >> PAGE_SHIFT;
701 +-
702 + /*
703 + * Both branches below are implicit full barriers (MOV to CR or
704 + * INVLPG) that synchronize with switch_mm.
705 + */
706 +- if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
707 +- base_pages_to_flush = TLB_FLUSH_ALL;
708 ++ if (base_pages_to_flush == TLB_FLUSH_ALL) {
709 + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
710 + local_flush_tlb();
711 + } else {
712 +@@ -354,33 +328,6 @@ out:
713 + preempt_enable();
714 + }
715 +
716 +-void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
717 +-{
718 +- struct mm_struct *mm = vma->vm_mm;
719 +-
720 +- preempt_disable();
721 +-
722 +- if (current->active_mm == mm) {
723 +- if (current->mm) {
724 +- /*
725 +- * Implicit full barrier (INVLPG) that synchronizes
726 +- * with switch_mm.
727 +- */
728 +- __flush_tlb_one(start);
729 +- } else {
730 +- leave_mm(smp_processor_id());
731 +-
732 +- /* Synchronize with switch_mm. */
733 +- smp_mb();
734 +- }
735 +- }
736 +-
737 +- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
738 +- flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
739 +-
740 +- preempt_enable();
741 +-}
742 +-
743 + static void do_flush_tlb_all(void *info)
744 + {
745 + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
746 +@@ -465,5 +412,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
747 + return 0;
748 + }
749 + late_initcall(create_tlb_single_page_flush_ceiling);
750 +-
751 +-#endif /* CONFIG_SMP */
752 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
753 +index ffa41591bff9..cbef64b508e1 100644
754 +--- a/arch/x86/xen/enlighten.c
755 ++++ b/arch/x86/xen/enlighten.c
756 +@@ -433,6 +433,12 @@ static void __init xen_init_cpuid_mask(void)
757 + ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
758 + (1 << X86_FEATURE_ACC)); /* thermal monitoring */
759 +
760 ++ /*
761 ++ * Xen PV would need some work to support PCID: CR3 handling as well
762 ++ * as xen_flush_tlb_others() would need updating.
763 ++ */
764 ++ cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32)); /* disable PCID */
765 ++
766 + if (!xen_initial_domain())
767 + cpuid_leaf1_edx_mask &=
768 + ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
769 +diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
770 +index b4f3930266b1..f620fe09d20a 100644
771 +--- a/crypto/mcryptd.c
772 ++++ b/crypto/mcryptd.c
773 +@@ -80,6 +80,7 @@ static int mcryptd_init_queue(struct mcryptd_queue *queue,
774 + pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
775 + crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
776 + INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
777 ++ spin_lock_init(&cpu_queue->q_lock);
778 + }
779 + return 0;
780 + }
781 +@@ -103,15 +104,16 @@ static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
782 + int cpu, err;
783 + struct mcryptd_cpu_queue *cpu_queue;
784 +
785 +- cpu = get_cpu();
786 +- cpu_queue = this_cpu_ptr(queue->cpu_queue);
787 +- rctx->tag.cpu = cpu;
788 ++ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
789 ++ spin_lock(&cpu_queue->q_lock);
790 ++ cpu = smp_processor_id();
791 ++ rctx->tag.cpu = smp_processor_id();
792 +
793 + err = crypto_enqueue_request(&cpu_queue->queue, request);
794 + pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
795 + cpu, cpu_queue, request);
796 ++ spin_unlock(&cpu_queue->q_lock);
797 + queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
798 +- put_cpu();
799 +
800 + return err;
801 + }
802 +@@ -164,16 +166,11 @@ static void mcryptd_queue_worker(struct work_struct *work)
803 + cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
804 + i = 0;
805 + while (i < MCRYPTD_BATCH || single_task_running()) {
806 +- /*
807 +- * preempt_disable/enable is used to prevent
808 +- * being preempted by mcryptd_enqueue_request()
809 +- */
810 +- local_bh_disable();
811 +- preempt_disable();
812 ++
813 ++ spin_lock_bh(&cpu_queue->q_lock);
814 + backlog = crypto_get_backlog(&cpu_queue->queue);
815 + req = crypto_dequeue_request(&cpu_queue->queue);
816 +- preempt_enable();
817 +- local_bh_enable();
818 ++ spin_unlock_bh(&cpu_queue->q_lock);
819 +
820 + if (!req) {
821 + mcryptd_opportunistic_flush();
822 +@@ -188,7 +185,7 @@ static void mcryptd_queue_worker(struct work_struct *work)
823 + ++i;
824 + }
825 + if (cpu_queue->queue.qlen)
826 +- queue_work(kcrypto_wq, &cpu_queue->work);
827 ++ queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
828 + }
829 +
830 + void mcryptd_flusher(struct work_struct *__work)
831 +diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
832 +index 6682c5daf742..4c9be45ea328 100644
833 +--- a/drivers/acpi/apei/erst.c
834 ++++ b/drivers/acpi/apei/erst.c
835 +@@ -1020,7 +1020,7 @@ skip:
836 + /* The record may be cleared by others, try read next record */
837 + if (len == -ENOENT)
838 + goto skip;
839 +- else if (len < sizeof(*rcd)) {
840 ++ else if (len < 0 || len < sizeof(*rcd)) {
841 + rc = -EIO;
842 + goto out;
843 + }
844 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
845 +index bc147582bed9..6d62b69c898e 100644
846 +--- a/drivers/infiniband/hw/cxgb4/cq.c
847 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
848 +@@ -579,10 +579,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
849 + ret = -EAGAIN;
850 + goto skip_cqe;
851 + }
852 +- if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
853 ++ if (unlikely(!CQE_STATUS(hw_cqe) &&
854 ++ CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
855 + t4_set_wq_in_error(wq);
856 +- hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
857 +- goto proc_cqe;
858 ++ hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
859 + }
860 + goto proc_cqe;
861 + }
862 +diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
863 +index 6a0f6ec67c6b..ee7847a1ca06 100644
864 +--- a/drivers/mfd/cros_ec_spi.c
865 ++++ b/drivers/mfd/cros_ec_spi.c
866 +@@ -660,6 +660,7 @@ static int cros_ec_spi_probe(struct spi_device *spi)
867 + sizeof(struct ec_response_get_protocol_info);
868 + ec_dev->dout_size = sizeof(struct ec_host_request);
869 +
870 ++ ec_spi->last_transfer_ns = ktime_get_ns();
871 +
872 + err = cros_ec_register(ec_dev);
873 + if (err) {
874 +diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
875 +index 0a1606480023..cc832d309599 100644
876 +--- a/drivers/mfd/twl4030-audio.c
877 ++++ b/drivers/mfd/twl4030-audio.c
878 +@@ -159,13 +159,18 @@ unsigned int twl4030_audio_get_mclk(void)
879 + EXPORT_SYMBOL_GPL(twl4030_audio_get_mclk);
880 +
881 + static bool twl4030_audio_has_codec(struct twl4030_audio_data *pdata,
882 +- struct device_node *node)
883 ++ struct device_node *parent)
884 + {
885 ++ struct device_node *node;
886 ++
887 + if (pdata && pdata->codec)
888 + return true;
889 +
890 +- if (of_find_node_by_name(node, "codec"))
891 ++ node = of_get_child_by_name(parent, "codec");
892 ++ if (node) {
893 ++ of_node_put(node);
894 + return true;
895 ++ }
896 +
897 + return false;
898 + }
899 +diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c
900 +index 08a693cd38cc..72aab60ae846 100644
901 +--- a/drivers/mfd/twl6040.c
902 ++++ b/drivers/mfd/twl6040.c
903 +@@ -97,12 +97,16 @@ static struct reg_sequence twl6040_patch[] = {
904 + };
905 +
906 +
907 +-static bool twl6040_has_vibra(struct device_node *node)
908 ++static bool twl6040_has_vibra(struct device_node *parent)
909 + {
910 +-#ifdef CONFIG_OF
911 +- if (of_find_node_by_name(node, "vibra"))
912 ++ struct device_node *node;
913 ++
914 ++ node = of_get_child_by_name(parent, "vibra");
915 ++ if (node) {
916 ++ of_node_put(node);
917 + return true;
918 +-#endif
919 ++ }
920 ++
921 + return false;
922 + }
923 +
924 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
925 +index 3613469dc5c6..ab53e0cfb4dc 100644
926 +--- a/drivers/net/ethernet/broadcom/tg3.c
927 ++++ b/drivers/net/ethernet/broadcom/tg3.c
928 +@@ -14228,7 +14228,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
929 + /* Reset PHY, otherwise the read DMA engine will be in a mode that
930 + * breaks all requests to 256 bytes.
931 + */
932 +- if (tg3_asic_rev(tp) == ASIC_REV_57766)
933 ++ if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
934 ++ tg3_asic_rev(tp) == ASIC_REV_5717 ||
935 ++ tg3_asic_rev(tp) == ASIC_REV_5719)
936 + reset_phy = true;
937 +
938 + err = tg3_restart_hw(tp, reset_phy);
939 +diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
940 +index fc2fb25343f4..c122b3b99cd8 100644
941 +--- a/drivers/net/ethernet/marvell/mvmdio.c
942 ++++ b/drivers/net/ethernet/marvell/mvmdio.c
943 +@@ -241,7 +241,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
944 + dev->regs + MVMDIO_ERR_INT_MASK);
945 +
946 + } else if (dev->err_interrupt == -EPROBE_DEFER) {
947 +- return -EPROBE_DEFER;
948 ++ ret = -EPROBE_DEFER;
949 ++ goto out_mdio;
950 + }
951 +
952 + mutex_init(&dev->lock);
953 +diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
954 +index 15056f06754a..7430dd44019e 100644
955 +--- a/drivers/net/ethernet/marvell/mvneta.c
956 ++++ b/drivers/net/ethernet/marvell/mvneta.c
957 +@@ -914,6 +914,10 @@ static void mvneta_port_disable(struct mvneta_port *pp)
958 + val &= ~MVNETA_GMAC0_PORT_ENABLE;
959 + mvreg_write(pp, MVNETA_GMAC_CTRL_0, val);
960 +
961 ++ pp->link = 0;
962 ++ pp->duplex = -1;
963 ++ pp->speed = 0;
964 ++
965 + udelay(200);
966 + }
967 +
968 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
969 +index c8b85f1069ff..920391165f18 100644
970 +--- a/drivers/net/phy/micrel.c
971 ++++ b/drivers/net/phy/micrel.c
972 +@@ -541,6 +541,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
973 + phydev->link = 0;
974 + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
975 + phydev->drv->config_intr(phydev);
976 ++ return genphy_config_aneg(phydev);
977 + }
978 +
979 + return 0;
980 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
981 +index 2cbecbda1ae3..b0ea8dee5f06 100644
982 +--- a/drivers/net/usb/qmi_wwan.c
983 ++++ b/drivers/net/usb/qmi_wwan.c
984 +@@ -737,6 +737,7 @@ static const struct usb_device_id products[] = {
985 + {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
986 + {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
987 + {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
988 ++ {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
989 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
990 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
991 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
992 +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
993 +index d0c2759076a2..312cb5b74dec 100644
994 +--- a/drivers/parisc/lba_pci.c
995 ++++ b/drivers/parisc/lba_pci.c
996 +@@ -1654,3 +1654,36 @@ void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
997 + iounmap(base_addr);
998 + }
999 +
1000 ++
1001 ++/*
1002 ++ * The design of the Diva management card in rp34x0 machines (rp3410, rp3440)
1003 ++ * seems rushed, so that many built-in components simply don't work.
1004 ++ * The following quirks disable the serial AUX port and the built-in ATI RV100
1005 ++ * Radeon 7000 graphics card which both don't have any external connectors and
1006 ++ * thus are useless, and even worse, e.g. the AUX port occupies ttyS0 and as
1007 ++ * such makes those machines the only PARISC machines on which we can't use
1008 ++ * ttyS0 as boot console.
1009 ++ */
1010 ++static void quirk_diva_ati_card(struct pci_dev *dev)
1011 ++{
1012 ++ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1013 ++ dev->subsystem_device != 0x1292)
1014 ++ return;
1015 ++
1016 ++ dev_info(&dev->dev, "Hiding Diva built-in ATI card");
1017 ++ dev->device = 0;
1018 ++}
1019 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
1020 ++ quirk_diva_ati_card);
1021 ++
1022 ++static void quirk_diva_aux_disable(struct pci_dev *dev)
1023 ++{
1024 ++ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1025 ++ dev->subsystem_device != 0x1291)
1026 ++ return;
1027 ++
1028 ++ dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
1029 ++ dev->device = 0;
1030 ++}
1031 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
1032 ++ quirk_diva_aux_disable);
1033 +diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
1034 +index fca925543fae..32bd8ab79d53 100644
1035 +--- a/drivers/pci/pci-driver.c
1036 ++++ b/drivers/pci/pci-driver.c
1037 +@@ -944,7 +944,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
1038 + if (pci_has_legacy_pm_support(pci_dev))
1039 + return pci_legacy_resume_early(dev);
1040 +
1041 +- pci_update_current_state(pci_dev, PCI_D0);
1042 ++ /*
1043 ++ * pci_restore_state() requires the device to be in D0 (because of MSI
1044 ++ * restoration among other things), so force it into D0 in case the
1045 ++ * driver's "freeze" callbacks put it into a low-power state directly.
1046 ++ */
1047 ++ pci_set_power_state(pci_dev, PCI_D0);
1048 + pci_restore_state(pci_dev);
1049 +
1050 + if (drv && drv->pm && drv->pm->thaw_noirq)
1051 +diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
1052 +index 3009121173cd..3c6ea5c3ddd2 100644
1053 +--- a/drivers/spi/spi-xilinx.c
1054 ++++ b/drivers/spi/spi-xilinx.c
1055 +@@ -271,6 +271,7 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
1056 + while (remaining_words) {
1057 + int n_words, tx_words, rx_words;
1058 + u32 sr;
1059 ++ int stalled;
1060 +
1061 + n_words = min(remaining_words, xspi->buffer_size);
1062 +
1063 +@@ -299,7 +300,17 @@ static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
1064 +
1065 + /* Read out all the data from the Rx FIFO */
1066 + rx_words = n_words;
1067 ++ stalled = 10;
1068 + while (rx_words) {
1069 ++ if (rx_words == n_words && !(stalled--) &&
1070 ++ !(sr & XSPI_SR_TX_EMPTY_MASK) &&
1071 ++ (sr & XSPI_SR_RX_EMPTY_MASK)) {
1072 ++ dev_err(&spi->dev,
1073 ++ "Detected stall. Check C_SPI_MODE and C_SPI_MEMORY\n");
1074 ++ xspi_init_hw(xspi);
1075 ++ return -EIO;
1076 ++ }
1077 ++
1078 + if ((sr & XSPI_SR_TX_EMPTY_MASK) && (rx_words > 1)) {
1079 + xilinx_spi_rx(xspi);
1080 + rx_words--;
1081 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1082 +index 84e71bd19082..41dda25da049 100644
1083 +--- a/drivers/tty/n_tty.c
1084 ++++ b/drivers/tty/n_tty.c
1085 +@@ -1801,7 +1801,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1086 + {
1087 + struct n_tty_data *ldata = tty->disc_data;
1088 +
1089 +- if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
1090 ++ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
1091 + bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1092 + ldata->line_start = ldata->read_tail;
1093 + if (!L_ICANON(tty) || !read_cnt(ldata)) {
1094 +@@ -2493,7 +2493,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
1095 + return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
1096 + case TIOCINQ:
1097 + down_write(&tty->termios_rwsem);
1098 +- if (L_ICANON(tty))
1099 ++ if (L_ICANON(tty) && !L_EXTPROC(tty))
1100 + retval = inq_canon(ldata);
1101 + else
1102 + retval = read_cnt(ldata);
1103 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1104 +index f6fde903fcad..22dcccf2d286 100644
1105 +--- a/drivers/usb/core/config.c
1106 ++++ b/drivers/usb/core/config.c
1107 +@@ -973,7 +973,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1108 + case USB_SSP_CAP_TYPE:
1109 + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
1110 + ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
1111 +- USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
1112 ++ USB_SSP_SUBLINK_SPEED_ATTRIBS);
1113 + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
1114 + dev->bos->ssp_cap = ssp_cap;
1115 + break;
1116 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1117 +index 50010282c010..c05c4f877750 100644
1118 +--- a/drivers/usb/core/quirks.c
1119 ++++ b/drivers/usb/core/quirks.c
1120 +@@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = {
1121 + /* Microsoft LifeCam-VX700 v2.0 */
1122 + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
1123 +
1124 +- /* Logitech HD Pro Webcams C920, C920-C and C930e */
1125 ++ /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
1126 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
1127 + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
1128 + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
1129 ++ { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
1130 +
1131 + /* Logitech ConferenceCam CC3000e */
1132 + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
1133 +@@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1134 + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
1135 + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
1136 +
1137 ++ /* ELSA MicroLink 56K */
1138 ++ { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
1139 ++
1140 + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
1141 + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
1142 +
1143 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1144 +index e8f990642281..cbf3be66f89c 100644
1145 +--- a/drivers/usb/host/xhci-pci.c
1146 ++++ b/drivers/usb/host/xhci-pci.c
1147 +@@ -184,6 +184,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1148 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1149 + xhci->quirks |= XHCI_BROKEN_STREAMS;
1150 + }
1151 ++ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1152 ++ pdev->device == 0x0014)
1153 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1154 + if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1155 + pdev->device == 0x0015)
1156 + xhci->quirks |= XHCI_RESET_ON_RESUME;
1157 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1158 +index 30344efc123f..64fe9dc25ed4 100644
1159 +--- a/drivers/usb/serial/ftdi_sio.c
1160 ++++ b/drivers/usb/serial/ftdi_sio.c
1161 +@@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = {
1162 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1163 + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
1164 + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
1165 ++ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
1166 + { } /* Terminating entry */
1167 + };
1168 +
1169 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1170 +index f9d15bd62785..543d2801632b 100644
1171 +--- a/drivers/usb/serial/ftdi_sio_ids.h
1172 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
1173 +@@ -913,6 +913,12 @@
1174 + #define ICPDAS_I7561U_PID 0x0104
1175 + #define ICPDAS_I7563U_PID 0x0105
1176 +
1177 ++/*
1178 ++ * Airbus Defence and Space
1179 ++ */
1180 ++#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
1181 ++#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
1182 ++
1183 + /*
1184 + * RT Systems programming cables for various ham radios
1185 + */
1186 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1187 +index ffa8ec917ff5..a818c43a02ec 100644
1188 +--- a/drivers/usb/serial/option.c
1189 ++++ b/drivers/usb/serial/option.c
1190 +@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
1191 + /* These Quectel products use Qualcomm's vendor ID */
1192 + #define QUECTEL_PRODUCT_UC20 0x9003
1193 + #define QUECTEL_PRODUCT_UC15 0x9090
1194 ++/* These Yuga products use Qualcomm's vendor ID */
1195 ++#define YUGA_PRODUCT_CLM920_NC5 0x9625
1196 +
1197 + #define QUECTEL_VENDOR_ID 0x2c7c
1198 + /* These Quectel products use Quectel's vendor ID */
1199 +@@ -283,6 +285,7 @@ static void option_instat_callback(struct urb *urb);
1200 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
1201 + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
1202 + #define TELIT_PRODUCT_ME910 0x1100
1203 ++#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
1204 + #define TELIT_PRODUCT_LE920 0x1200
1205 + #define TELIT_PRODUCT_LE910 0x1201
1206 + #define TELIT_PRODUCT_LE910_USBCFG4 0x1206
1207 +@@ -648,6 +651,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
1208 + .reserved = BIT(1) | BIT(3),
1209 + };
1210 +
1211 ++static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
1212 ++ .sendsetup = BIT(0),
1213 ++ .reserved = BIT(3),
1214 ++};
1215 ++
1216 + static const struct option_blacklist_info telit_le910_blacklist = {
1217 + .sendsetup = BIT(0),
1218 + .reserved = BIT(1) | BIT(2),
1219 +@@ -677,6 +685,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
1220 + .reserved = BIT(4) | BIT(5),
1221 + };
1222 +
1223 ++static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
1224 ++ .reserved = BIT(1) | BIT(4),
1225 ++};
1226 ++
1227 + static const struct usb_device_id option_ids[] = {
1228 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
1229 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
1230 +@@ -1181,6 +1193,9 @@ static const struct usb_device_id option_ids[] = {
1231 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1232 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1233 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1234 ++ /* Yuga products use Qualcomm vendor ID */
1235 ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
1236 ++ .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
1237 + /* Quectel products using Quectel vendor ID */
1238 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1239 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1240 +@@ -1247,6 +1262,8 @@ static const struct usb_device_id option_ids[] = {
1241 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1242 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1243 + .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
1244 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1245 ++ .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
1246 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1247 + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1248 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
1249 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1250 +index 4516291df1b8..fb6dc16c754a 100644
1251 +--- a/drivers/usb/serial/qcserial.c
1252 ++++ b/drivers/usb/serial/qcserial.c
1253 +@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
1254 + {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
1255 + {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
1256 + {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
1257 ++ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
1258 ++ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
1259 + {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
1260 + {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
1261 + {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1262 +@@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
1263 + break;
1264 + case 2:
1265 + dev_dbg(dev, "NMEA GPS interface found\n");
1266 ++ sendsetup = true;
1267 + break;
1268 + case 3:
1269 + dev_dbg(dev, "Modem port found\n");
1270 +diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
1271 +index af10f7b131a4..325b4c05acdd 100644
1272 +--- a/drivers/usb/usbip/stub_main.c
1273 ++++ b/drivers/usb/usbip/stub_main.c
1274 +@@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
1275 + struct stub_priv *priv;
1276 + struct urb *urb;
1277 +
1278 +- dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
1279 ++ dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
1280 +
1281 + while ((priv = stub_priv_pop(sdev))) {
1282 + urb = priv->urb;
1283 +- dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
1284 ++ dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
1285 ++ priv->seqnum);
1286 + usb_kill_urb(urb);
1287 +
1288 + kmem_cache_free(stub_priv_cache, priv);
1289 +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
1290 +index 00e475c51a12..7de54a66044f 100644
1291 +--- a/drivers/usb/usbip/stub_rx.c
1292 ++++ b/drivers/usb/usbip/stub_rx.c
1293 +@@ -230,9 +230,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
1294 + if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
1295 + continue;
1296 +
1297 +- dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
1298 +- priv->urb);
1299 +-
1300 + /*
1301 + * This matched urb is not completed yet (i.e., be in
1302 + * flight in usb hcd hardware/driver). Now we are
1303 +@@ -271,8 +268,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
1304 + ret = usb_unlink_urb(priv->urb);
1305 + if (ret != -EINPROGRESS)
1306 + dev_err(&priv->urb->dev->dev,
1307 +- "failed to unlink a urb %p, ret %d\n",
1308 +- priv->urb, ret);
1309 ++ "failed to unlink a urb # %lu, ret %d\n",
1310 ++ priv->seqnum, ret);
1311 +
1312 + return 0;
1313 + }
1314 +diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
1315 +index af858d52608a..f4dd30c56f36 100644
1316 +--- a/drivers/usb/usbip/stub_tx.c
1317 ++++ b/drivers/usb/usbip/stub_tx.c
1318 +@@ -201,8 +201,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
1319 +
1320 + /* 1. setup usbip_header */
1321 + setup_ret_submit_pdu(&pdu_header, urb);
1322 +- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
1323 +- pdu_header.base.seqnum, urb);
1324 ++ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
1325 ++ pdu_header.base.seqnum);
1326 + usbip_header_correct_endian(&pdu_header, 1);
1327 +
1328 + iov[iovnum].iov_base = &pdu_header;
1329 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
1330 +index 81b2b9f808b5..f9af04d7f02f 100644
1331 +--- a/drivers/usb/usbip/vhci_hcd.c
1332 ++++ b/drivers/usb/usbip/vhci_hcd.c
1333 +@@ -467,9 +467,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1334 + int ret = 0;
1335 + struct vhci_device *vdev;
1336 +
1337 +- usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
1338 +- hcd, urb, mem_flags);
1339 +-
1340 + /* patch to usb_sg_init() is in 2.5.60 */
1341 + BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
1342 +
1343 +@@ -627,8 +624,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1344 + struct vhci_priv *priv;
1345 + struct vhci_device *vdev;
1346 +
1347 +- pr_info("dequeue a urb %p\n", urb);
1348 +-
1349 + spin_lock(&the_controller->lock);
1350 +
1351 + priv = urb->hcpriv;
1352 +@@ -656,7 +651,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1353 + /* tcp connection is closed */
1354 + spin_lock(&vdev->priv_lock);
1355 +
1356 +- pr_info("device %p seems to be disconnected\n", vdev);
1357 + list_del(&priv->list);
1358 + kfree(priv);
1359 + urb->hcpriv = NULL;
1360 +@@ -668,8 +662,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1361 + * vhci_rx will receive RET_UNLINK and give back the URB.
1362 + * Otherwise, we give back it here.
1363 + */
1364 +- pr_info("gives back urb %p\n", urb);
1365 +-
1366 + usb_hcd_unlink_urb_from_ep(hcd, urb);
1367 +
1368 + spin_unlock(&the_controller->lock);
1369 +@@ -698,8 +690,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1370 +
1371 + unlink->unlink_seqnum = priv->seqnum;
1372 +
1373 +- pr_info("device %p seems to be still connected\n", vdev);
1374 +-
1375 + /* send cmd_unlink and try to cancel the pending URB in the
1376 + * peer */
1377 + list_add_tail(&unlink->list, &vdev->unlink_tx);
1378 +diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
1379 +index 00e4a54308e4..bc4eb0855314 100644
1380 +--- a/drivers/usb/usbip/vhci_rx.c
1381 ++++ b/drivers/usb/usbip/vhci_rx.c
1382 +@@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
1383 + urb = priv->urb;
1384 + status = urb->status;
1385 +
1386 +- usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
1387 +- urb, priv, seqnum);
1388 ++ usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
1389 +
1390 + switch (status) {
1391 + case -ENOENT:
1392 + /* fall through */
1393 + case -ECONNRESET:
1394 +- dev_info(&urb->dev->dev,
1395 +- "urb %p was unlinked %ssynchronuously.\n", urb,
1396 +- status == -ENOENT ? "" : "a");
1397 ++ dev_dbg(&urb->dev->dev,
1398 ++ "urb seq# %u was unlinked %ssynchronuously\n",
1399 ++ seqnum, status == -ENOENT ? "" : "a");
1400 + break;
1401 + case -EINPROGRESS:
1402 + /* no info output */
1403 + break;
1404 + default:
1405 +- dev_info(&urb->dev->dev,
1406 +- "urb %p may be in a error, status %d\n", urb,
1407 +- status);
1408 ++ dev_dbg(&urb->dev->dev,
1409 ++ "urb seq# %u may be in a error, status %d\n",
1410 ++ seqnum, status);
1411 + }
1412 +
1413 + list_del(&priv->list);
1414 +@@ -78,8 +77,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1415 + spin_unlock(&vdev->priv_lock);
1416 +
1417 + if (!urb) {
1418 +- pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
1419 +- pr_info("max seqnum %d\n",
1420 ++ pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
1421 ++ pdu->base.seqnum,
1422 + atomic_read(&the_controller->seqnum));
1423 + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
1424 + return;
1425 +@@ -102,7 +101,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1426 + if (usbip_dbg_flag_vhci_rx)
1427 + usbip_dump_urb(urb);
1428 +
1429 +- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
1430 ++ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
1431 +
1432 + spin_lock(&the_controller->lock);
1433 + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
1434 +@@ -165,7 +164,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1435 + pr_info("the urb (seqnum %d) was already given back\n",
1436 + pdu->base.seqnum);
1437 + } else {
1438 +- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
1439 ++ usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
1440 +
1441 + /* If unlink is successful, status is -ECONNRESET */
1442 + urb->status = pdu->u.ret_unlink.status;
1443 +diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
1444 +index 409fd99f3257..3c5796c8633a 100644
1445 +--- a/drivers/usb/usbip/vhci_tx.c
1446 ++++ b/drivers/usb/usbip/vhci_tx.c
1447 +@@ -82,7 +82,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
1448 + memset(&msg, 0, sizeof(msg));
1449 + memset(&iov, 0, sizeof(iov));
1450 +
1451 +- usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
1452 ++ usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
1453 ++ priv->seqnum);
1454 +
1455 + /* 1. setup usbip_header */
1456 + setup_cmd_submit_pdu(&pdu_header, urb);
1457 +diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
1458 +index c23ee1f7ee80..c2ff077168d3 100644
1459 +--- a/include/crypto/mcryptd.h
1460 ++++ b/include/crypto/mcryptd.h
1461 +@@ -26,6 +26,7 @@ static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
1462 +
1463 + struct mcryptd_cpu_queue {
1464 + struct crypto_queue queue;
1465 ++ spinlock_t q_lock;
1466 + struct work_struct work;
1467 + };
1468 +
1469 +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
1470 +index 402753bccafa..7b8e3afcc291 100644
1471 +--- a/include/linux/ipv6.h
1472 ++++ b/include/linux/ipv6.h
1473 +@@ -215,7 +215,8 @@ struct ipv6_pinfo {
1474 + * 100: prefer care-of address
1475 + */
1476 + dontfrag:1,
1477 +- autoflowlabel:1;
1478 ++ autoflowlabel:1,
1479 ++ autoflowlabel_set:1;
1480 + __u8 min_hopcount;
1481 + __u8 tclass;
1482 + __be32 rcv_flowinfo;
1483 +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
1484 +index e623d392db0c..8ef3a61fdc74 100644
1485 +--- a/include/linux/vm_event_item.h
1486 ++++ b/include/linux/vm_event_item.h
1487 +@@ -80,10 +80,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
1488 + #endif
1489 + #endif
1490 + #ifdef CONFIG_DEBUG_TLBFLUSH
1491 +-#ifdef CONFIG_SMP
1492 + NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
1493 + NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
1494 +-#endif /* CONFIG_SMP */
1495 + NR_TLB_LOCAL_FLUSH_ALL,
1496 + NR_TLB_LOCAL_FLUSH_ONE,
1497 + #endif /* CONFIG_DEBUG_TLBFLUSH */
1498 +diff --git a/include/net/ip.h b/include/net/ip.h
1499 +index 7476bb10ff37..639398af273b 100644
1500 +--- a/include/net/ip.h
1501 ++++ b/include/net/ip.h
1502 +@@ -33,6 +33,8 @@
1503 + #include <net/flow.h>
1504 + #include <net/flow_dissector.h>
1505 +
1506 ++#define IPV4_MIN_MTU 68 /* RFC 791 */
1507 ++
1508 + struct sock;
1509 +
1510 + struct inet_skb_parm {
1511 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1512 +index 22c57e191a23..e5d228f7224c 100644
1513 +--- a/kernel/time/tick-sched.c
1514 ++++ b/kernel/time/tick-sched.c
1515 +@@ -568,6 +568,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
1516 + tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1517 + }
1518 +
1519 ++static inline bool local_timer_softirq_pending(void)
1520 ++{
1521 ++ return local_softirq_pending() & TIMER_SOFTIRQ;
1522 ++}
1523 ++
1524 + static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
1525 + ktime_t now, int cpu)
1526 + {
1527 +@@ -584,8 +589,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
1528 + } while (read_seqretry(&jiffies_lock, seq));
1529 + ts->last_jiffies = basejiff;
1530 +
1531 +- if (rcu_needs_cpu(basemono, &next_rcu) ||
1532 +- arch_needs_cpu() || irq_work_needs_cpu()) {
1533 ++ /*
1534 ++ * Keep the periodic tick, when RCU, architecture or irq_work
1535 ++ * requests it.
1536 ++ * Aside of that check whether the local timer softirq is
1537 ++ * pending. If so its a bad idea to call get_next_timer_interrupt()
1538 ++ * because there is an already expired timer, so it will request
1539 ++ * immeditate expiry, which rearms the hardware timer with a
1540 ++ * minimal delta which brings us back to this place
1541 ++ * immediately. Lather, rinse and repeat...
1542 ++ */
1543 ++ if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
1544 ++ irq_work_needs_cpu() || local_timer_softirq_pending()) {
1545 + next_tick = basemono + TICK_NSEC;
1546 + } else {
1547 + /*
1548 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
1549 +index 1275175b0946..d9cd6191760b 100644
1550 +--- a/kernel/trace/ring_buffer.c
1551 ++++ b/kernel/trace/ring_buffer.c
1552 +@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
1553 + /* Missed count stored at end */
1554 + #define RB_MISSED_STORED (1 << 30)
1555 +
1556 ++#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
1557 ++
1558 + struct buffer_data_page {
1559 + u64 time_stamp; /* page time stamp */
1560 + local_t commit; /* write committed index */
1561 +@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
1562 + */
1563 + size_t ring_buffer_page_len(void *page)
1564 + {
1565 +- return local_read(&((struct buffer_data_page *)page)->commit)
1566 ++ struct buffer_data_page *bpage = page;
1567 ++
1568 ++ return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
1569 + + BUF_PAGE_HDR_SIZE;
1570 + }
1571 +
1572 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1573 +index 61d0960559c8..8aef4e63ac57 100644
1574 +--- a/kernel/trace/trace.c
1575 ++++ b/kernel/trace/trace.c
1576 +@@ -5754,7 +5754,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
1577 + .spd_release = buffer_spd_release,
1578 + };
1579 + struct buffer_ref *ref;
1580 +- int entries, size, i;
1581 ++ int entries, i;
1582 + ssize_t ret = 0;
1583 +
1584 + #ifdef CONFIG_TRACER_MAX_TRACE
1585 +@@ -5805,14 +5805,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
1586 + break;
1587 + }
1588 +
1589 +- /*
1590 +- * zero out any left over data, this is going to
1591 +- * user land.
1592 +- */
1593 +- size = ring_buffer_page_len(ref->page);
1594 +- if (size < PAGE_SIZE)
1595 +- memset(ref->page + size, 0, PAGE_SIZE - size);
1596 +-
1597 + page = virt_to_page(ref->page);
1598 +
1599 + spd.pages[i] = page;
1600 +@@ -6539,6 +6531,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
1601 + buf->data = alloc_percpu(struct trace_array_cpu);
1602 + if (!buf->data) {
1603 + ring_buffer_free(buf->buffer);
1604 ++ buf->buffer = NULL;
1605 + return -ENOMEM;
1606 + }
1607 +
1608 +@@ -6562,7 +6555,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
1609 + allocate_snapshot ? size : 1);
1610 + if (WARN_ON(ret)) {
1611 + ring_buffer_free(tr->trace_buffer.buffer);
1612 ++ tr->trace_buffer.buffer = NULL;
1613 + free_percpu(tr->trace_buffer.data);
1614 ++ tr->trace_buffer.data = NULL;
1615 + return -ENOMEM;
1616 + }
1617 + tr->allocated_snapshot = allocate_snapshot;
1618 +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
1619 +index a1f697ec4fc2..0ce26a0f7913 100644
1620 +--- a/net/bridge/br_netlink.c
1621 ++++ b/net/bridge/br_netlink.c
1622 +@@ -1067,19 +1067,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1623 + struct net_bridge *br = netdev_priv(dev);
1624 + int err;
1625 +
1626 ++ err = register_netdevice(dev);
1627 ++ if (err)
1628 ++ return err;
1629 ++
1630 + if (tb[IFLA_ADDRESS]) {
1631 + spin_lock_bh(&br->lock);
1632 + br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1633 + spin_unlock_bh(&br->lock);
1634 + }
1635 +
1636 +- err = register_netdevice(dev);
1637 +- if (err)
1638 +- return err;
1639 +-
1640 + err = br_changelink(dev, tb, data);
1641 + if (err)
1642 +- unregister_netdevice(dev);
1643 ++ br_dev_delete(dev, NULL);
1644 ++
1645 + return err;
1646 + }
1647 +
1648 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
1649 +index 2e9a1c2818c7..b5c351d2830b 100644
1650 +--- a/net/core/net_namespace.c
1651 ++++ b/net/core/net_namespace.c
1652 +@@ -261,7 +261,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
1653 + spin_lock_irqsave(&net->nsid_lock, flags);
1654 + peer = idr_find(&net->netns_ids, id);
1655 + if (peer)
1656 +- get_net(peer);
1657 ++ peer = maybe_get_net(peer);
1658 + spin_unlock_irqrestore(&net->nsid_lock, flags);
1659 + rcu_read_unlock();
1660 +
1661 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1662 +index d33609c2f276..86b619501350 100644
1663 +--- a/net/core/skbuff.c
1664 ++++ b/net/core/skbuff.c
1665 +@@ -3676,7 +3676,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
1666 + struct sock *sk = skb->sk;
1667 +
1668 + if (!skb_may_tx_timestamp(sk, false))
1669 +- return;
1670 ++ goto err;
1671 +
1672 + /* Take a reference to prevent skb_orphan() from freeing the socket,
1673 + * but only if the socket refcount is not zero.
1674 +@@ -3685,7 +3685,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
1675 + *skb_hwtstamps(skb) = *hwtstamps;
1676 + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
1677 + sock_put(sk);
1678 ++ return;
1679 + }
1680 ++
1681 ++err:
1682 ++ kfree_skb(skb);
1683 + }
1684 + EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
1685 +
1686 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
1687 +index 0212591b0077..63f99e9a821b 100644
1688 +--- a/net/ipv4/devinet.c
1689 ++++ b/net/ipv4/devinet.c
1690 +@@ -1358,7 +1358,7 @@ skip:
1691 +
1692 + static bool inetdev_valid_mtu(unsigned int mtu)
1693 + {
1694 +- return mtu >= 68;
1695 ++ return mtu >= IPV4_MIN_MTU;
1696 + }
1697 +
1698 + static void inetdev_send_gratuitous_arp(struct net_device *dev,
1699 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1700 +index 0cb240c749bf..c9e68ff48a72 100644
1701 +--- a/net/ipv4/fib_frontend.c
1702 ++++ b/net/ipv4/fib_frontend.c
1703 +@@ -1252,7 +1252,7 @@ fail:
1704 +
1705 + static void ip_fib_net_exit(struct net *net)
1706 + {
1707 +- unsigned int i;
1708 ++ int i;
1709 +
1710 + rtnl_lock();
1711 + #ifdef CONFIG_IP_MULTIPLE_TABLES
1712 +@@ -1260,7 +1260,12 @@ static void ip_fib_net_exit(struct net *net)
1713 + RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
1714 + RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
1715 + #endif
1716 +- for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
1717 ++ /* Destroy the tables in reverse order to guarantee that the
1718 ++ * local table, ID 255, is destroyed before the main table, ID
1719 ++ * 254. This is necessary as the local table may contain
1720 ++ * references to data contained in the main table.
1721 ++ */
1722 ++ for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
1723 + struct hlist_head *head = &net->ipv4.fib_table_hash[i];
1724 + struct hlist_node *tmp;
1725 + struct fib_table *tb;
1726 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1727 +index 3809d523d012..b60106d34346 100644
1728 +--- a/net/ipv4/igmp.c
1729 ++++ b/net/ipv4/igmp.c
1730 +@@ -89,6 +89,7 @@
1731 + #include <linux/rtnetlink.h>
1732 + #include <linux/times.h>
1733 + #include <linux/pkt_sched.h>
1734 ++#include <linux/byteorder/generic.h>
1735 +
1736 + #include <net/net_namespace.h>
1737 + #include <net/arp.h>
1738 +@@ -327,6 +328,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
1739 + return scount;
1740 + }
1741 +
1742 ++/* source address selection per RFC 3376 section 4.2.13 */
1743 ++static __be32 igmpv3_get_srcaddr(struct net_device *dev,
1744 ++ const struct flowi4 *fl4)
1745 ++{
1746 ++ struct in_device *in_dev = __in_dev_get_rcu(dev);
1747 ++
1748 ++ if (!in_dev)
1749 ++ return htonl(INADDR_ANY);
1750 ++
1751 ++ for_ifa(in_dev) {
1752 ++ if (inet_ifa_match(fl4->saddr, ifa))
1753 ++ return fl4->saddr;
1754 ++ } endfor_ifa(in_dev);
1755 ++
1756 ++ return htonl(INADDR_ANY);
1757 ++}
1758 ++
1759 + static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
1760 + {
1761 + struct sk_buff *skb;
1762 +@@ -374,7 +392,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
1763 + pip->frag_off = htons(IP_DF);
1764 + pip->ttl = 1;
1765 + pip->daddr = fl4.daddr;
1766 +- pip->saddr = fl4.saddr;
1767 ++ pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
1768 + pip->protocol = IPPROTO_IGMP;
1769 + pip->tot_len = 0; /* filled in later */
1770 + ip_select_ident(net, skb, NULL);
1771 +@@ -410,16 +428,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
1772 + }
1773 +
1774 + static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
1775 +- int type, struct igmpv3_grec **ppgr)
1776 ++ int type, struct igmpv3_grec **ppgr, unsigned int mtu)
1777 + {
1778 + struct net_device *dev = pmc->interface->dev;
1779 + struct igmpv3_report *pih;
1780 + struct igmpv3_grec *pgr;
1781 +
1782 +- if (!skb)
1783 +- skb = igmpv3_newpack(dev, dev->mtu);
1784 +- if (!skb)
1785 +- return NULL;
1786 ++ if (!skb) {
1787 ++ skb = igmpv3_newpack(dev, mtu);
1788 ++ if (!skb)
1789 ++ return NULL;
1790 ++ }
1791 + pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
1792 + pgr->grec_type = type;
1793 + pgr->grec_auxwords = 0;
1794 +@@ -441,12 +460,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
1795 + struct igmpv3_grec *pgr = NULL;
1796 + struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
1797 + int scount, stotal, first, isquery, truncate;
1798 ++ unsigned int mtu;
1799 +
1800 + if (pmc->multiaddr == IGMP_ALL_HOSTS)
1801 + return skb;
1802 + if (ipv4_is_local_multicast(pmc->multiaddr) && !sysctl_igmp_llm_reports)
1803 + return skb;
1804 +
1805 ++ mtu = READ_ONCE(dev->mtu);
1806 ++ if (mtu < IPV4_MIN_MTU)
1807 ++ return skb;
1808 ++
1809 + isquery = type == IGMPV3_MODE_IS_INCLUDE ||
1810 + type == IGMPV3_MODE_IS_EXCLUDE;
1811 + truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
1812 +@@ -467,7 +491,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
1813 + AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
1814 + if (skb)
1815 + igmpv3_sendpack(skb);
1816 +- skb = igmpv3_newpack(dev, dev->mtu);
1817 ++ skb = igmpv3_newpack(dev, mtu);
1818 + }
1819 + }
1820 + first = 1;
1821 +@@ -494,12 +518,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
1822 + pgr->grec_nsrcs = htons(scount);
1823 + if (skb)
1824 + igmpv3_sendpack(skb);
1825 +- skb = igmpv3_newpack(dev, dev->mtu);
1826 ++ skb = igmpv3_newpack(dev, mtu);
1827 + first = 1;
1828 + scount = 0;
1829 + }
1830 + if (first) {
1831 +- skb = add_grhead(skb, pmc, type, &pgr);
1832 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
1833 + first = 0;
1834 + }
1835 + if (!skb)
1836 +@@ -533,7 +557,7 @@ empty_source:
1837 + igmpv3_sendpack(skb);
1838 + skb = NULL; /* add_grhead will get a new one */
1839 + }
1840 +- skb = add_grhead(skb, pmc, type, &pgr);
1841 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
1842 + }
1843 + }
1844 + if (pgr)
1845 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
1846 +index 3310ac75e3f3..c18245e05d26 100644
1847 +--- a/net/ipv4/ip_tunnel.c
1848 ++++ b/net/ipv4/ip_tunnel.c
1849 +@@ -400,8 +400,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
1850 + dev->needed_headroom = t_hlen + hlen;
1851 + mtu -= (dev->hard_header_len + t_hlen);
1852 +
1853 +- if (mtu < 68)
1854 +- mtu = 68;
1855 ++ if (mtu < IPV4_MIN_MTU)
1856 ++ mtu = IPV4_MIN_MTU;
1857 +
1858 + return mtu;
1859 + }
1860 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
1861 +index 8f2cd7d09720..4d3d4291c82f 100644
1862 +--- a/net/ipv4/raw.c
1863 ++++ b/net/ipv4/raw.c
1864 +@@ -500,11 +500,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1865 + int err;
1866 + struct ip_options_data opt_copy;
1867 + struct raw_frag_vec rfv;
1868 ++ int hdrincl;
1869 +
1870 + err = -EMSGSIZE;
1871 + if (len > 0xFFFF)
1872 + goto out;
1873 +
1874 ++ /* hdrincl should be READ_ONCE(inet->hdrincl)
1875 ++ * but READ_ONCE() doesn't work with bit fields
1876 ++ */
1877 ++ hdrincl = inet->hdrincl;
1878 + /*
1879 + * Check the flags.
1880 + */
1881 +@@ -579,7 +584,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1882 + /* Linux does not mangle headers on raw sockets,
1883 + * so that IP options + IP_HDRINCL is non-sense.
1884 + */
1885 +- if (inet->hdrincl)
1886 ++ if (hdrincl)
1887 + goto done;
1888 + if (ipc.opt->opt.srr) {
1889 + if (!daddr)
1890 +@@ -601,9 +606,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1891 +
1892 + flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
1893 + RT_SCOPE_UNIVERSE,
1894 +- inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
1895 ++ hdrincl ? IPPROTO_RAW : sk->sk_protocol,
1896 + inet_sk_flowi_flags(sk) |
1897 +- (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
1898 ++ (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
1899 + daddr, saddr, 0, 0);
1900 +
1901 + if (!saddr && ipc.oif) {
1902 +@@ -612,7 +617,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1903 + goto done;
1904 + }
1905 +
1906 +- if (!inet->hdrincl) {
1907 ++ if (!hdrincl) {
1908 + rfv.msg = msg;
1909 + rfv.hlen = 0;
1910 +
1911 +@@ -637,7 +642,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1912 + goto do_confirm;
1913 + back_from_confirm:
1914 +
1915 +- if (inet->hdrincl)
1916 ++ if (hdrincl)
1917 + err = raw_send_hdrinc(sk, &fl4, msg, len,
1918 + &rt, msg->msg_flags);
1919 +
1920 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
1921 +index a5d790c13ef5..61c93a93f228 100644
1922 +--- a/net/ipv4/tcp_ipv4.c
1923 ++++ b/net/ipv4/tcp_ipv4.c
1924 +@@ -823,7 +823,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
1925 + tcp_time_stamp,
1926 + req->ts_recent,
1927 + 0,
1928 +- tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
1929 ++ tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
1930 + AF_INET),
1931 + inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
1932 + ip_hdr(skb)->tos);
1933 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
1934 +index 83ec6639b04d..637a0e41b0aa 100644
1935 +--- a/net/ipv6/af_inet6.c
1936 ++++ b/net/ipv6/af_inet6.c
1937 +@@ -200,7 +200,6 @@ lookup_protocol:
1938 + np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
1939 + np->mc_loop = 1;
1940 + np->pmtudisc = IPV6_PMTUDISC_WANT;
1941 +- np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
1942 + sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
1943 +
1944 + /* Init the ipv4 part of the socket since we can have sockets
1945 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1946 +index 71624cf26832..1b4f5f2d2929 100644
1947 +--- a/net/ipv6/ip6_output.c
1948 ++++ b/net/ipv6/ip6_output.c
1949 +@@ -148,6 +148,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1950 + !(IP6CB(skb)->flags & IP6SKB_REROUTED));
1951 + }
1952 +
1953 ++static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
1954 ++{
1955 ++ if (!np->autoflowlabel_set)
1956 ++ return ip6_default_np_autolabel(net);
1957 ++ else
1958 ++ return np->autoflowlabel;
1959 ++}
1960 ++
1961 + /*
1962 + * xmit an sk_buff (used by TCP, SCTP and DCCP)
1963 + * Note : socket lock is not held for SYNACK packets, but might be modified
1964 +@@ -211,7 +219,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
1965 + hlimit = ip6_dst_hoplimit(dst);
1966 +
1967 + ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
1968 +- np->autoflowlabel, fl6));
1969 ++ ip6_autoflowlabel(net, np), fl6));
1970 +
1971 + hdr->payload_len = htons(seg_len);
1972 + hdr->nexthdr = proto;
1973 +@@ -1675,7 +1683,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
1974 +
1975 + ip6_flow_hdr(hdr, v6_cork->tclass,
1976 + ip6_make_flowlabel(net, skb, fl6->flowlabel,
1977 +- np->autoflowlabel, fl6));
1978 ++ ip6_autoflowlabel(net, np), fl6));
1979 + hdr->hop_limit = v6_cork->hop_limit;
1980 + hdr->nexthdr = proto;
1981 + hdr->saddr = fl6->saddr;
1982 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
1983 +index a4a30d2ca66f..435e26210587 100644
1984 +--- a/net/ipv6/ipv6_sockglue.c
1985 ++++ b/net/ipv6/ipv6_sockglue.c
1986 +@@ -872,6 +872,7 @@ pref_skip_coa:
1987 + break;
1988 + case IPV6_AUTOFLOWLABEL:
1989 + np->autoflowlabel = valbool;
1990 ++ np->autoflowlabel_set = 1;
1991 + retv = 0;
1992 + break;
1993 + }
1994 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
1995 +index d64ee7e83664..06640685ff43 100644
1996 +--- a/net/ipv6/mcast.c
1997 ++++ b/net/ipv6/mcast.c
1998 +@@ -1668,16 +1668,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
1999 + }
2000 +
2001 + static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2002 +- int type, struct mld2_grec **ppgr)
2003 ++ int type, struct mld2_grec **ppgr, unsigned int mtu)
2004 + {
2005 +- struct net_device *dev = pmc->idev->dev;
2006 + struct mld2_report *pmr;
2007 + struct mld2_grec *pgr;
2008 +
2009 +- if (!skb)
2010 +- skb = mld_newpack(pmc->idev, dev->mtu);
2011 +- if (!skb)
2012 +- return NULL;
2013 ++ if (!skb) {
2014 ++ skb = mld_newpack(pmc->idev, mtu);
2015 ++ if (!skb)
2016 ++ return NULL;
2017 ++ }
2018 + pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
2019 + pgr->grec_type = type;
2020 + pgr->grec_auxwords = 0;
2021 +@@ -1700,10 +1700,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2022 + struct mld2_grec *pgr = NULL;
2023 + struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2024 + int scount, stotal, first, isquery, truncate;
2025 ++ unsigned int mtu;
2026 +
2027 + if (pmc->mca_flags & MAF_NOREPORT)
2028 + return skb;
2029 +
2030 ++ mtu = READ_ONCE(dev->mtu);
2031 ++ if (mtu < IPV6_MIN_MTU)
2032 ++ return skb;
2033 ++
2034 + isquery = type == MLD2_MODE_IS_INCLUDE ||
2035 + type == MLD2_MODE_IS_EXCLUDE;
2036 + truncate = type == MLD2_MODE_IS_EXCLUDE ||
2037 +@@ -1724,7 +1729,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2038 + AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2039 + if (skb)
2040 + mld_sendpack(skb);
2041 +- skb = mld_newpack(idev, dev->mtu);
2042 ++ skb = mld_newpack(idev, mtu);
2043 + }
2044 + }
2045 + first = 1;
2046 +@@ -1751,12 +1756,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2047 + pgr->grec_nsrcs = htons(scount);
2048 + if (skb)
2049 + mld_sendpack(skb);
2050 +- skb = mld_newpack(idev, dev->mtu);
2051 ++ skb = mld_newpack(idev, mtu);
2052 + first = 1;
2053 + scount = 0;
2054 + }
2055 + if (first) {
2056 +- skb = add_grhead(skb, pmc, type, &pgr);
2057 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
2058 + first = 0;
2059 + }
2060 + if (!skb)
2061 +@@ -1790,7 +1795,7 @@ empty_source:
2062 + mld_sendpack(skb);
2063 + skb = NULL; /* add_grhead will get a new one */
2064 + }
2065 +- skb = add_grhead(skb, pmc, type, &pgr);
2066 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
2067 + }
2068 + }
2069 + if (pgr)
2070 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2071 +index 59c908ff251a..74cbcc4b399c 100644
2072 +--- a/net/ipv6/tcp_ipv6.c
2073 ++++ b/net/ipv6/tcp_ipv6.c
2074 +@@ -949,7 +949,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2075 + tcp_rsk(req)->rcv_nxt,
2076 + req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2077 + tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
2078 +- tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
2079 ++ tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
2080 + 0, 0);
2081 + }
2082 +
2083 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2084 +index 5fabe68e20dd..48e1608414e6 100644
2085 +--- a/net/netlink/af_netlink.c
2086 ++++ b/net/netlink/af_netlink.c
2087 +@@ -261,6 +261,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
2088 + struct sock *sk = skb->sk;
2089 + int ret = -ENOMEM;
2090 +
2091 ++ if (!net_eq(dev_net(dev), sock_net(sk)))
2092 ++ return 0;
2093 ++
2094 + dev_hold(dev);
2095 +
2096 + if (is_vmalloc_addr(skb->head))
2097 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2098 +index 61189c576963..a870d27ca778 100644
2099 +--- a/net/sctp/socket.c
2100 ++++ b/net/sctp/socket.c
2101 +@@ -4153,7 +4153,7 @@ static int sctp_init_sock(struct sock *sk)
2102 + SCTP_DBG_OBJCNT_INC(sock);
2103 +
2104 + local_bh_disable();
2105 +- percpu_counter_inc(&sctp_sockets_allocated);
2106 ++ sk_sockets_allocated_inc(sk);
2107 + sock_prot_inuse_add(net, sk->sk_prot, 1);
2108 +
2109 + /* Nothing can fail after this block, otherwise
2110 +@@ -4197,7 +4197,7 @@ static void sctp_destroy_sock(struct sock *sk)
2111 + }
2112 + sctp_endpoint_free(sp->ep);
2113 + local_bh_disable();
2114 +- percpu_counter_dec(&sctp_sockets_allocated);
2115 ++ sk_sockets_allocated_dec(sk);
2116 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
2117 + local_bh_enable();
2118 + }
2119 +diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
2120 +index b450a27588c8..16f8124b1150 100644
2121 +--- a/sound/core/rawmidi.c
2122 ++++ b/sound/core/rawmidi.c
2123 +@@ -579,15 +579,14 @@ static int snd_rawmidi_info_user(struct snd_rawmidi_substream *substream,
2124 + return 0;
2125 + }
2126 +
2127 +-int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
2128 ++static int __snd_rawmidi_info_select(struct snd_card *card,
2129 ++ struct snd_rawmidi_info *info)
2130 + {
2131 + struct snd_rawmidi *rmidi;
2132 + struct snd_rawmidi_str *pstr;
2133 + struct snd_rawmidi_substream *substream;
2134 +
2135 +- mutex_lock(&register_mutex);
2136 + rmidi = snd_rawmidi_search(card, info->device);
2137 +- mutex_unlock(&register_mutex);
2138 + if (!rmidi)
2139 + return -ENXIO;
2140 + if (info->stream < 0 || info->stream > 1)
2141 +@@ -603,6 +602,16 @@ int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info
2142 + }
2143 + return -ENXIO;
2144 + }
2145 ++
2146 ++int snd_rawmidi_info_select(struct snd_card *card, struct snd_rawmidi_info *info)
2147 ++{
2148 ++ int ret;
2149 ++
2150 ++ mutex_lock(&register_mutex);
2151 ++ ret = __snd_rawmidi_info_select(card, info);
2152 ++ mutex_unlock(&register_mutex);
2153 ++ return ret;
2154 ++}
2155 + EXPORT_SYMBOL(snd_rawmidi_info_select);
2156 +
2157 + static int snd_rawmidi_info_select_user(struct snd_card *card,
2158 +diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
2159 +index cce9ae5ec93b..bd7bcf428bcf 100644
2160 +--- a/sound/hda/hdac_i915.c
2161 ++++ b/sound/hda/hdac_i915.c
2162 +@@ -183,7 +183,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
2163 + */
2164 + int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
2165 + {
2166 +- if (WARN_ON(!hdac_acomp))
2167 ++ if (!hdac_acomp)
2168 + return -ENODEV;
2169 +
2170 + hdac_acomp->audio_ops = aops;
2171 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2172 +index 2159b18f76bf..5875a08d555e 100644
2173 +--- a/sound/pci/hda/patch_realtek.c
2174 ++++ b/sound/pci/hda/patch_realtek.c
2175 +@@ -5953,6 +5953,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2176 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2177 + {0x1b, 0x01011020},
2178 + {0x21, 0x02211010}),
2179 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2180 ++ {0x12, 0x90a60130},
2181 ++ {0x14, 0x90170110},
2182 ++ {0x1b, 0x01011020},
2183 ++ {0x21, 0x0221101f}),
2184 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2185 + {0x12, 0x90a60160},
2186 + {0x14, 0x90170120},
2187 +diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
2188 +index a5a4e9f75c57..a06395507225 100644
2189 +--- a/sound/soc/codecs/twl4030.c
2190 ++++ b/sound/soc/codecs/twl4030.c
2191 +@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
2192 + struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
2193 + struct device_node *twl4030_codec_node = NULL;
2194 +
2195 +- twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
2196 ++ twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
2197 + "codec");
2198 +
2199 + if (!pdata && twl4030_codec_node) {
2200 +@@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
2201 + GFP_KERNEL);
2202 + if (!pdata) {
2203 + dev_err(codec->dev, "Can not allocate memory\n");
2204 ++ of_node_put(twl4030_codec_node);
2205 + return NULL;
2206 + }
2207 + twl4030_setup_pdata_of(pdata, twl4030_codec_node);
2208 ++ of_node_put(twl4030_codec_node);
2209 + }
2210 +
2211 + return pdata;
2212 +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
2213 +index 95d2392303eb..7ca67613e0d4 100644
2214 +--- a/sound/soc/fsl/fsl_ssi.c
2215 ++++ b/sound/soc/fsl/fsl_ssi.c
2216 +@@ -1408,12 +1408,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2217 + sizeof(fsl_ssi_ac97_dai));
2218 +
2219 + fsl_ac97_data = ssi_private;
2220 +-
2221 +- ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
2222 +- if (ret) {
2223 +- dev_err(&pdev->dev, "could not set AC'97 ops\n");
2224 +- return ret;
2225 +- }
2226 + } else {
2227 + /* Initialize this copy of the CPU DAI driver structure */
2228 + memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
2229 +@@ -1473,6 +1467,14 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2230 + return ret;
2231 + }
2232 +
2233 ++ if (fsl_ssi_is_ac97(ssi_private)) {
2234 ++ ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
2235 ++ if (ret) {
2236 ++ dev_err(&pdev->dev, "could not set AC'97 ops\n");
2237 ++ goto error_ac97_ops;
2238 ++ }
2239 ++ }
2240 ++
2241 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
2242 + &ssi_private->cpu_dai_drv, 1);
2243 + if (ret) {
2244 +@@ -1556,6 +1558,10 @@ error_sound_card:
2245 + fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
2246 +
2247 + error_asoc_register:
2248 ++ if (fsl_ssi_is_ac97(ssi_private))
2249 ++ snd_soc_set_ac97_ops(NULL);
2250 ++
2251 ++error_ac97_ops:
2252 + if (ssi_private->soc->imx)
2253 + fsl_ssi_imx_clean(pdev, ssi_private);
2254 +
2255 +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
2256 +index 0ed9ae030ce1..c9ae29068c7c 100644
2257 +--- a/sound/usb/mixer.c
2258 ++++ b/sound/usb/mixer.c
2259 +@@ -2101,20 +2101,25 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
2260 + kctl->private_value = (unsigned long)namelist;
2261 + kctl->private_free = usb_mixer_selector_elem_free;
2262 +
2263 +- nameid = uac_selector_unit_iSelector(desc);
2264 ++ /* check the static mapping table at first */
2265 + len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
2266 +- if (len)
2267 +- ;
2268 +- else if (nameid)
2269 +- len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
2270 +- sizeof(kctl->id.name));
2271 +- else
2272 +- len = get_term_name(state, &state->oterm,
2273 +- kctl->id.name, sizeof(kctl->id.name), 0);
2274 +-
2275 + if (!len) {
2276 +- strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
2277 ++ /* no mapping ? */
2278 ++ /* if iSelector is given, use it */
2279 ++ nameid = uac_selector_unit_iSelector(desc);
2280 ++ if (nameid)
2281 ++ len = snd_usb_copy_string_desc(state, nameid,
2282 ++ kctl->id.name,
2283 ++ sizeof(kctl->id.name));
2284 ++ /* ... or pick up the terminal name at next */
2285 ++ if (!len)
2286 ++ len = get_term_name(state, &state->oterm,
2287 ++ kctl->id.name, sizeof(kctl->id.name), 0);
2288 ++ /* ... or use the fixed string "USB" as the last resort */
2289 ++ if (!len)
2290 ++ strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
2291 +
2292 ++ /* and add the proper suffix */
2293 + if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
2294 + append_ctl_name(kctl, " Clock Source");
2295 + else if ((state->oterm.type & 0xff00) == 0x0100)
2296 +diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
2297 +index 2b3d6d235015..3d7b42e77299 100644
2298 +--- a/tools/usb/usbip/src/utils.c
2299 ++++ b/tools/usb/usbip/src/utils.c
2300 +@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
2301 + char command[SYSFS_BUS_ID_SIZE + 4];
2302 + char match_busid_attr_path[SYSFS_PATH_MAX];
2303 + int rc;
2304 ++ int cmd_size;
2305 +
2306 + snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
2307 + "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
2308 +@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
2309 + attr_name);
2310 +
2311 + if (add)
2312 +- snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
2313 ++ cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
2314 ++ busid);
2315 + else
2316 +- snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
2317 ++ cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
2318 ++ busid);
2319 +
2320 + rc = write_sysfs_attribute(match_busid_attr_path, command,
2321 +- sizeof(command));
2322 ++ cmd_size);
2323 + if (rc < 0) {
2324 + dbg("failed to write match_busid: %s", strerror(errno));
2325 + return -1;