Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Tue, 02 Jan 2018 20:13:47
Message-Id: 1514924016.592fd2b1122f91a49dfa702aeb350477ee3c7c5f.mpagano@gentoo
1 commit: 592fd2b1122f91a49dfa702aeb350477ee3c7c5f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Tue Jan 2 20:13:36 2018 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Tue Jan 2 20:13:36 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=592fd2b1
7
8 Linux patch 4.9.74
9
10 0000_README | 4 +
11 1073_linux-4.9.74.patch | 2923 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2927 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index a3e2751..350d2c5 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -335,6 +335,10 @@ Patch: 1072_linux-4.9.73.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.73
21
22 +Patch: 1073_linux-4.9.74.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.74
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1073_linux-4.9.74.patch b/1073_linux-4.9.74.patch
31 new file mode 100644
32 index 0000000..7efaa13
33 --- /dev/null
34 +++ b/1073_linux-4.9.74.patch
35 @@ -0,0 +1,2923 @@
36 +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
37 +index 86a6746f6833..152ec4e87b57 100644
38 +--- a/Documentation/kernel-parameters.txt
39 ++++ b/Documentation/kernel-parameters.txt
40 +@@ -2795,6 +2795,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
41 + nopat [X86] Disable PAT (page attribute table extension of
42 + pagetables) support.
43 +
44 ++ nopcid [X86-64] Disable the PCID cpu feature.
45 ++
46 + norandmaps Don't use address space randomization. Equivalent to
47 + echo 0 > /proc/sys/kernel/randomize_va_space
48 +
49 +diff --git a/Makefile b/Makefile
50 +index 64eb0bf614ee..075e429732e7 100644
51 +--- a/Makefile
52 ++++ b/Makefile
53 +@@ -1,6 +1,6 @@
54 + VERSION = 4
55 + PATCHLEVEL = 9
56 +-SUBLEVEL = 73
57 ++SUBLEVEL = 74
58 + EXTRAVERSION =
59 + NAME = Roaring Lionus
60 +
61 +@@ -788,6 +788,9 @@ KBUILD_CFLAGS += $(call cc-disable-warning, pointer-sign)
62 + # disable invalid "can't wrap" optimizations for signed / pointers
63 + KBUILD_CFLAGS += $(call cc-option,-fno-strict-overflow)
64 +
65 ++# Make sure -fstack-check isn't enabled (like gentoo apparently did)
66 ++KBUILD_CFLAGS += $(call cc-option,-fno-stack-check,)
67 ++
68 + # conserve stack if available
69 + KBUILD_CFLAGS += $(call cc-option,-fconserve-stack)
70 +
71 +diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
72 +index b9c546a305a4..da8156fd3d58 100644
73 +--- a/arch/x86/Kconfig
74 ++++ b/arch/x86/Kconfig
75 +@@ -45,7 +45,7 @@ config X86
76 + select ARCH_USE_CMPXCHG_LOCKREF if X86_64
77 + select ARCH_USE_QUEUED_RWLOCKS
78 + select ARCH_USE_QUEUED_SPINLOCKS
79 +- select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP
80 ++ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
81 + select ARCH_WANTS_DYNAMIC_TASK_STRUCT
82 + select ARCH_WANT_FRAME_POINTERS
83 + select ARCH_WANT_IPC_PARSE_VERSION if X86_32
84 +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h
85 +index 85599ad4d024..21c5ac15657b 100644
86 +--- a/arch/x86/include/asm/disabled-features.h
87 ++++ b/arch/x86/include/asm/disabled-features.h
88 +@@ -21,11 +21,13 @@
89 + # define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
90 + # define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
91 + # define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
92 ++# define DISABLE_PCID 0
93 + #else
94 + # define DISABLE_VME 0
95 + # define DISABLE_K6_MTRR 0
96 + # define DISABLE_CYRIX_ARR 0
97 + # define DISABLE_CENTAUR_MCR 0
98 ++# define DISABLE_PCID (1<<(X86_FEATURE_PCID & 31))
99 + #endif /* CONFIG_X86_64 */
100 +
101 + #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
102 +@@ -43,7 +45,7 @@
103 + #define DISABLED_MASK1 0
104 + #define DISABLED_MASK2 0
105 + #define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
106 +-#define DISABLED_MASK4 0
107 ++#define DISABLED_MASK4 (DISABLE_PCID)
108 + #define DISABLED_MASK5 0
109 + #define DISABLED_MASK6 0
110 + #define DISABLED_MASK7 0
111 +diff --git a/arch/x86/include/asm/hardirq.h b/arch/x86/include/asm/hardirq.h
112 +index 59405a248fc2..9b76cd331990 100644
113 +--- a/arch/x86/include/asm/hardirq.h
114 ++++ b/arch/x86/include/asm/hardirq.h
115 +@@ -22,8 +22,8 @@ typedef struct {
116 + #ifdef CONFIG_SMP
117 + unsigned int irq_resched_count;
118 + unsigned int irq_call_count;
119 +- unsigned int irq_tlb_count;
120 + #endif
121 ++ unsigned int irq_tlb_count;
122 + #ifdef CONFIG_X86_THERMAL_VECTOR
123 + unsigned int irq_thermal_count;
124 + #endif
125 +diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
126 +index 72198c64e646..8b272a08d1a8 100644
127 +--- a/arch/x86/include/asm/mmu.h
128 ++++ b/arch/x86/include/asm/mmu.h
129 +@@ -33,12 +33,6 @@ typedef struct {
130 + #endif
131 + } mm_context_t;
132 +
133 +-#ifdef CONFIG_SMP
134 + void leave_mm(int cpu);
135 +-#else
136 +-static inline void leave_mm(int cpu)
137 +-{
138 +-}
139 +-#endif
140 +
141 + #endif /* _ASM_X86_MMU_H */
142 +diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
143 +index f9dd22469388..d23e35584f15 100644
144 +--- a/arch/x86/include/asm/mmu_context.h
145 ++++ b/arch/x86/include/asm/mmu_context.h
146 +@@ -99,10 +99,8 @@ static inline void load_mm_ldt(struct mm_struct *mm)
147 +
148 + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
149 + {
150 +-#ifdef CONFIG_SMP
151 + if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
152 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
153 +-#endif
154 + }
155 +
156 + static inline int init_new_context(struct task_struct *tsk,
157 +diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
158 +index fc5abff9b7fd..7d2ea6b1f7d9 100644
159 +--- a/arch/x86/include/asm/tlbflush.h
160 ++++ b/arch/x86/include/asm/tlbflush.h
161 +@@ -7,6 +7,7 @@
162 + #include <asm/processor.h>
163 + #include <asm/cpufeature.h>
164 + #include <asm/special_insns.h>
165 ++#include <asm/smp.h>
166 +
167 + static inline void __invpcid(unsigned long pcid, unsigned long addr,
168 + unsigned long type)
169 +@@ -65,10 +66,8 @@ static inline void invpcid_flush_all_nonglobals(void)
170 + #endif
171 +
172 + struct tlb_state {
173 +-#ifdef CONFIG_SMP
174 + struct mm_struct *active_mm;
175 + int state;
176 +-#endif
177 +
178 + /*
179 + * Access to this CR4 shadow and to H/W CR4 is protected by
180 +@@ -192,6 +191,14 @@ static inline void __flush_tlb_all(void)
181 + __flush_tlb_global();
182 + else
183 + __flush_tlb();
184 ++
185 ++ /*
186 ++ * Note: if we somehow had PCID but not PGE, then this wouldn't work --
187 ++ * we'd end up flushing kernel translations for the current ASID but
188 ++ * we might fail to flush kernel translations for other cached ASIDs.
189 ++ *
190 ++ * To avoid this issue, we force PCID off if PGE is off.
191 ++ */
192 + }
193 +
194 + static inline void __flush_tlb_one(unsigned long addr)
195 +@@ -205,7 +212,6 @@ static inline void __flush_tlb_one(unsigned long addr)
196 + /*
197 + * TLB flushing:
198 + *
199 +- * - flush_tlb() flushes the current mm struct TLBs
200 + * - flush_tlb_all() flushes all processes TLBs
201 + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
202 + * - flush_tlb_page(vma, vmaddr) flushes one page
203 +@@ -217,84 +223,6 @@ static inline void __flush_tlb_one(unsigned long addr)
204 + * and page-granular flushes are available only on i486 and up.
205 + */
206 +
207 +-#ifndef CONFIG_SMP
208 +-
209 +-/* "_up" is for UniProcessor.
210 +- *
211 +- * This is a helper for other header functions. *Not* intended to be called
212 +- * directly. All global TLB flushes need to either call this, or to bump the
213 +- * vm statistics themselves.
214 +- */
215 +-static inline void __flush_tlb_up(void)
216 +-{
217 +- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
218 +- __flush_tlb();
219 +-}
220 +-
221 +-static inline void flush_tlb_all(void)
222 +-{
223 +- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
224 +- __flush_tlb_all();
225 +-}
226 +-
227 +-static inline void flush_tlb(void)
228 +-{
229 +- __flush_tlb_up();
230 +-}
231 +-
232 +-static inline void local_flush_tlb(void)
233 +-{
234 +- __flush_tlb_up();
235 +-}
236 +-
237 +-static inline void flush_tlb_mm(struct mm_struct *mm)
238 +-{
239 +- if (mm == current->active_mm)
240 +- __flush_tlb_up();
241 +-}
242 +-
243 +-static inline void flush_tlb_page(struct vm_area_struct *vma,
244 +- unsigned long addr)
245 +-{
246 +- if (vma->vm_mm == current->active_mm)
247 +- __flush_tlb_one(addr);
248 +-}
249 +-
250 +-static inline void flush_tlb_range(struct vm_area_struct *vma,
251 +- unsigned long start, unsigned long end)
252 +-{
253 +- if (vma->vm_mm == current->active_mm)
254 +- __flush_tlb_up();
255 +-}
256 +-
257 +-static inline void flush_tlb_mm_range(struct mm_struct *mm,
258 +- unsigned long start, unsigned long end, unsigned long vmflag)
259 +-{
260 +- if (mm == current->active_mm)
261 +- __flush_tlb_up();
262 +-}
263 +-
264 +-static inline void native_flush_tlb_others(const struct cpumask *cpumask,
265 +- struct mm_struct *mm,
266 +- unsigned long start,
267 +- unsigned long end)
268 +-{
269 +-}
270 +-
271 +-static inline void reset_lazy_tlbstate(void)
272 +-{
273 +-}
274 +-
275 +-static inline void flush_tlb_kernel_range(unsigned long start,
276 +- unsigned long end)
277 +-{
278 +- flush_tlb_all();
279 +-}
280 +-
281 +-#else /* SMP */
282 +-
283 +-#include <asm/smp.h>
284 +-
285 + #define local_flush_tlb() __flush_tlb()
286 +
287 + #define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
288 +@@ -303,13 +231,14 @@ static inline void flush_tlb_kernel_range(unsigned long start,
289 + flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
290 +
291 + extern void flush_tlb_all(void);
292 +-extern void flush_tlb_current_task(void);
293 +-extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
294 + extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
295 + unsigned long end, unsigned long vmflag);
296 + extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
297 +
298 +-#define flush_tlb() flush_tlb_current_task()
299 ++static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
300 ++{
301 ++ flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, VM_NONE);
302 ++}
303 +
304 + void native_flush_tlb_others(const struct cpumask *cpumask,
305 + struct mm_struct *mm,
306 +@@ -324,8 +253,6 @@ static inline void reset_lazy_tlbstate(void)
307 + this_cpu_write(cpu_tlbstate.active_mm, &init_mm);
308 + }
309 +
310 +-#endif /* SMP */
311 +-
312 + #ifndef CONFIG_PARAVIRT
313 + #define flush_tlb_others(mask, mm, start, end) \
314 + native_flush_tlb_others(mask, mm, start, end)
315 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
316 +index bd17db15a2c1..0b6124315441 100644
317 +--- a/arch/x86/kernel/cpu/bugs.c
318 ++++ b/arch/x86/kernel/cpu/bugs.c
319 +@@ -19,6 +19,14 @@
320 +
321 + void __init check_bugs(void)
322 + {
323 ++#ifdef CONFIG_X86_32
324 ++ /*
325 ++ * Regardless of whether PCID is enumerated, the SDM says
326 ++ * that it can't be enabled in 32-bit mode.
327 ++ */
328 ++ setup_clear_cpu_cap(X86_FEATURE_PCID);
329 ++#endif
330 ++
331 + identify_boot_cpu();
332 + #ifndef CONFIG_SMP
333 + pr_info("CPU: ");
334 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
335 +index 4eece91ada37..91588be529b9 100644
336 +--- a/arch/x86/kernel/cpu/common.c
337 ++++ b/arch/x86/kernel/cpu/common.c
338 +@@ -163,6 +163,24 @@ static int __init x86_mpx_setup(char *s)
339 + }
340 + __setup("nompx", x86_mpx_setup);
341 +
342 ++#ifdef CONFIG_X86_64
343 ++static int __init x86_pcid_setup(char *s)
344 ++{
345 ++ /* require an exact match without trailing characters */
346 ++ if (strlen(s))
347 ++ return 0;
348 ++
349 ++ /* do not emit a message if the feature is not present */
350 ++ if (!boot_cpu_has(X86_FEATURE_PCID))
351 ++ return 1;
352 ++
353 ++ setup_clear_cpu_cap(X86_FEATURE_PCID);
354 ++ pr_info("nopcid: PCID feature disabled\n");
355 ++ return 1;
356 ++}
357 ++__setup("nopcid", x86_pcid_setup);
358 ++#endif
359 ++
360 + static int __init x86_noinvpcid_setup(char *s)
361 + {
362 + /* noinvpcid doesn't accept parameters */
363 +@@ -306,6 +324,25 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
364 + }
365 + }
366 +
367 ++static void setup_pcid(struct cpuinfo_x86 *c)
368 ++{
369 ++ if (cpu_has(c, X86_FEATURE_PCID)) {
370 ++ if (cpu_has(c, X86_FEATURE_PGE)) {
371 ++ cr4_set_bits(X86_CR4_PCIDE);
372 ++ } else {
373 ++ /*
374 ++ * flush_tlb_all(), as currently implemented, won't
375 ++ * work if PCID is on but PGE is not. Since that
376 ++ * combination doesn't exist on real hardware, there's
377 ++ * no reason to try to fully support it, but it's
378 ++ * polite to avoid corrupting data if we're on
379 ++ * an improperly configured VM.
380 ++ */
381 ++ clear_cpu_cap(c, X86_FEATURE_PCID);
382 ++ }
383 ++ }
384 ++}
385 ++
386 + /*
387 + * Protection Keys are not available in 32-bit mode.
388 + */
389 +@@ -1064,6 +1101,9 @@ static void identify_cpu(struct cpuinfo_x86 *c)
390 + setup_smep(c);
391 + setup_smap(c);
392 +
393 ++ /* Set up PCID */
394 ++ setup_pcid(c);
395 ++
396 + /*
397 + * The vendor-specific functions might have changed features.
398 + * Now we do "generic changes."
399 +diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
400 +index 067f9813fd2c..ce020a69bba9 100644
401 +--- a/arch/x86/kernel/reboot.c
402 ++++ b/arch/x86/kernel/reboot.c
403 +@@ -106,6 +106,10 @@ void __noreturn machine_real_restart(unsigned int type)
404 + load_cr3(initial_page_table);
405 + #else
406 + write_cr3(real_mode_header->trampoline_pgd);
407 ++
408 ++ /* Exiting long mode will fail if CR4.PCIDE is set. */
409 ++ if (static_cpu_has(X86_FEATURE_PCID))
410 ++ cr4_clear_bits(X86_CR4_PCIDE);
411 + #endif
412 +
413 + /* Jump to the identity-mapped low memory code */
414 +diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
415 +index 9fe7b9e1ae30..e803d72ef525 100644
416 +--- a/arch/x86/kernel/smpboot.c
417 ++++ b/arch/x86/kernel/smpboot.c
418 +@@ -115,25 +115,16 @@ static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
419 + spin_lock_irqsave(&rtc_lock, flags);
420 + CMOS_WRITE(0xa, 0xf);
421 + spin_unlock_irqrestore(&rtc_lock, flags);
422 +- local_flush_tlb();
423 +- pr_debug("1.\n");
424 + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
425 + start_eip >> 4;
426 +- pr_debug("2.\n");
427 + *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
428 + start_eip & 0xf;
429 +- pr_debug("3.\n");
430 + }
431 +
432 + static inline void smpboot_restore_warm_reset_vector(void)
433 + {
434 + unsigned long flags;
435 +
436 +- /*
437 +- * Install writable page 0 entry to set BIOS data area.
438 +- */
439 +- local_flush_tlb();
440 +-
441 + /*
442 + * Paranoid: Set warm reset code and vector here back
443 + * to default values.
444 +diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
445 +index 01f30e56f99e..4b3012888ada 100644
446 +--- a/arch/x86/kernel/vm86_32.c
447 ++++ b/arch/x86/kernel/vm86_32.c
448 +@@ -191,7 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
449 + pte_unmap_unlock(pte, ptl);
450 + out:
451 + up_write(&mm->mmap_sem);
452 +- flush_tlb();
453 ++ flush_tlb_mm_range(mm, 0xA0000, 0xA0000 + 32*PAGE_SIZE, 0UL);
454 + }
455 +
456 +
457 +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
458 +index 889e7619a091..0381638168d1 100644
459 +--- a/arch/x86/mm/init.c
460 ++++ b/arch/x86/mm/init.c
461 +@@ -764,10 +764,8 @@ void __init zone_sizes_init(void)
462 + }
463 +
464 + DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
465 +-#ifdef CONFIG_SMP
466 + .active_mm = &init_mm,
467 + .state = 0,
468 +-#endif
469 + .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
470 + };
471 + EXPORT_SYMBOL_GPL(cpu_tlbstate);
472 +diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
473 +index 75fb01109f94..53b72fb4e781 100644
474 +--- a/arch/x86/mm/tlb.c
475 ++++ b/arch/x86/mm/tlb.c
476 +@@ -15,7 +15,7 @@
477 + #include <linux/debugfs.h>
478 +
479 + /*
480 +- * Smarter SMP flushing macros.
481 ++ * TLB flushing, formerly SMP-only
482 + * c/o Linus Torvalds.
483 + *
484 + * These mean you can really definitely utterly forget about
485 +@@ -28,8 +28,6 @@
486 + * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
487 + */
488 +
489 +-#ifdef CONFIG_SMP
490 +-
491 + struct flush_tlb_info {
492 + struct mm_struct *flush_mm;
493 + unsigned long flush_start;
494 +@@ -59,8 +57,6 @@ void leave_mm(int cpu)
495 + }
496 + EXPORT_SYMBOL_GPL(leave_mm);
497 +
498 +-#endif /* CONFIG_SMP */
499 +-
500 + void switch_mm(struct mm_struct *prev, struct mm_struct *next,
501 + struct task_struct *tsk)
502 + {
503 +@@ -91,10 +87,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
504 + set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
505 + }
506 +
507 +-#ifdef CONFIG_SMP
508 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
509 + this_cpu_write(cpu_tlbstate.active_mm, next);
510 +-#endif
511 +
512 + cpumask_set_cpu(cpu, mm_cpumask(next));
513 +
514 +@@ -152,9 +146,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
515 + if (unlikely(prev->context.ldt != next->context.ldt))
516 + load_mm_ldt(next);
517 + #endif
518 +- }
519 +-#ifdef CONFIG_SMP
520 +- else {
521 ++ } else {
522 + this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
523 + BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
524 +
525 +@@ -181,11 +173,8 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
526 + load_mm_ldt(next);
527 + }
528 + }
529 +-#endif
530 + }
531 +
532 +-#ifdef CONFIG_SMP
533 +-
534 + /*
535 + * The flush IPI assumes that a thread switch happens in this order:
536 + * [cpu0: the cpu that switches]
537 +@@ -287,23 +276,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
538 + smp_call_function_many(cpumask, flush_tlb_func, &info, 1);
539 + }
540 +
541 +-void flush_tlb_current_task(void)
542 +-{
543 +- struct mm_struct *mm = current->mm;
544 +-
545 +- preempt_disable();
546 +-
547 +- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
548 +-
549 +- /* This is an implicit full barrier that synchronizes with switch_mm. */
550 +- local_flush_tlb();
551 +-
552 +- trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL);
553 +- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
554 +- flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
555 +- preempt_enable();
556 +-}
557 +-
558 + /*
559 + * See Documentation/x86/tlb.txt for details. We choose 33
560 + * because it is large enough to cover the vast majority (at
561 +@@ -324,6 +296,12 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
562 + unsigned long base_pages_to_flush = TLB_FLUSH_ALL;
563 +
564 + preempt_disable();
565 ++
566 ++ if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
567 ++ base_pages_to_flush = (end - start) >> PAGE_SHIFT;
568 ++ if (base_pages_to_flush > tlb_single_page_flush_ceiling)
569 ++ base_pages_to_flush = TLB_FLUSH_ALL;
570 ++
571 + if (current->active_mm != mm) {
572 + /* Synchronize with switch_mm. */
573 + smp_mb();
574 +@@ -340,15 +318,11 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
575 + goto out;
576 + }
577 +
578 +- if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB))
579 +- base_pages_to_flush = (end - start) >> PAGE_SHIFT;
580 +-
581 + /*
582 + * Both branches below are implicit full barriers (MOV to CR or
583 + * INVLPG) that synchronize with switch_mm.
584 + */
585 +- if (base_pages_to_flush > tlb_single_page_flush_ceiling) {
586 +- base_pages_to_flush = TLB_FLUSH_ALL;
587 ++ if (base_pages_to_flush == TLB_FLUSH_ALL) {
588 + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
589 + local_flush_tlb();
590 + } else {
591 +@@ -369,33 +343,6 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
592 + preempt_enable();
593 + }
594 +
595 +-void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
596 +-{
597 +- struct mm_struct *mm = vma->vm_mm;
598 +-
599 +- preempt_disable();
600 +-
601 +- if (current->active_mm == mm) {
602 +- if (current->mm) {
603 +- /*
604 +- * Implicit full barrier (INVLPG) that synchronizes
605 +- * with switch_mm.
606 +- */
607 +- __flush_tlb_one(start);
608 +- } else {
609 +- leave_mm(smp_processor_id());
610 +-
611 +- /* Synchronize with switch_mm. */
612 +- smp_mb();
613 +- }
614 +- }
615 +-
616 +- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
617 +- flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
618 +-
619 +- preempt_enable();
620 +-}
621 +-
622 + static void do_flush_tlb_all(void *info)
623 + {
624 + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
625 +@@ -480,5 +427,3 @@ static int __init create_tlb_single_page_flush_ceiling(void)
626 + return 0;
627 + }
628 + late_initcall(create_tlb_single_page_flush_ceiling);
629 +-
630 +-#endif /* CONFIG_SMP */
631 +diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
632 +index 8f1f7efa848c..2bea87cc0ff2 100644
633 +--- a/arch/x86/xen/enlighten.c
634 ++++ b/arch/x86/xen/enlighten.c
635 +@@ -444,6 +444,12 @@ static void __init xen_init_cpuid_mask(void)
636 + ~((1 << X86_FEATURE_MTRR) | /* disable MTRR */
637 + (1 << X86_FEATURE_ACC)); /* thermal monitoring */
638 +
639 ++ /*
640 ++ * Xen PV would need some work to support PCID: CR3 handling as well
641 ++ * as xen_flush_tlb_others() would need updating.
642 ++ */
643 ++ cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_PCID % 32)); /* disable PCID */
644 ++
645 + if (!xen_initial_domain())
646 + cpuid_leaf1_edx_mask &=
647 + ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
648 +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
649 +index 19c6477af19f..a856371bbe58 100644
650 +--- a/drivers/infiniband/hw/cxgb4/cq.c
651 ++++ b/drivers/infiniband/hw/cxgb4/cq.c
652 +@@ -575,10 +575,10 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
653 + ret = -EAGAIN;
654 + goto skip_cqe;
655 + }
656 +- if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
657 ++ if (unlikely(!CQE_STATUS(hw_cqe) &&
658 ++ CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) {
659 + t4_set_wq_in_error(wq);
660 +- hw_cqe->header |= htonl(CQE_STATUS_V(T4_ERR_MSN));
661 +- goto proc_cqe;
662 ++ hw_cqe->header |= cpu_to_be32(CQE_STATUS_V(T4_ERR_MSN));
663 + }
664 + goto proc_cqe;
665 + }
666 +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
667 +index edae2dcc4927..bb22d325e965 100644
668 +--- a/drivers/net/ethernet/broadcom/tg3.c
669 ++++ b/drivers/net/ethernet/broadcom/tg3.c
670 +@@ -14226,7 +14226,9 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
671 + /* Reset PHY, otherwise the read DMA engine will be in a mode that
672 + * breaks all requests to 256 bytes.
673 + */
674 +- if (tg3_asic_rev(tp) == ASIC_REV_57766)
675 ++ if (tg3_asic_rev(tp) == ASIC_REV_57766 ||
676 ++ tg3_asic_rev(tp) == ASIC_REV_5717 ||
677 ++ tg3_asic_rev(tp) == ASIC_REV_5719)
678 + reset_phy = true;
679 +
680 + err = tg3_restart_hw(tp, reset_phy);
681 +diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
682 +index 849b8712ec81..917091871259 100644
683 +--- a/drivers/net/ethernet/freescale/fec_main.c
684 ++++ b/drivers/net/ethernet/freescale/fec_main.c
685 +@@ -172,10 +172,12 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
686 + #endif /* CONFIG_M5272 */
687 +
688 + /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
689 ++ *
690 ++ * 2048 byte skbufs are allocated. However, alignment requirements
691 ++ * varies between FEC variants. Worst case is 64, so round down by 64.
692 + */
693 +-#define PKT_MAXBUF_SIZE 1522
694 ++#define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
695 + #define PKT_MINBUF_SIZE 64
696 +-#define PKT_MAXBLR_SIZE 1536
697 +
698 + /* FEC receive acceleration */
699 + #define FEC_RACC_IPDIS (1 << 1)
700 +@@ -813,6 +815,12 @@ static void fec_enet_bd_init(struct net_device *dev)
701 + for (i = 0; i < txq->bd.ring_size; i++) {
702 + /* Initialize the BD for every fragment in the page. */
703 + bdp->cbd_sc = cpu_to_fec16(0);
704 ++ if (bdp->cbd_bufaddr &&
705 ++ !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
706 ++ dma_unmap_single(&fep->pdev->dev,
707 ++ fec32_to_cpu(bdp->cbd_bufaddr),
708 ++ fec16_to_cpu(bdp->cbd_datlen),
709 ++ DMA_TO_DEVICE);
710 + if (txq->tx_skbuff[i]) {
711 + dev_kfree_skb_any(txq->tx_skbuff[i]);
712 + txq->tx_skbuff[i] = NULL;
713 +@@ -847,7 +855,7 @@ static void fec_enet_enable_ring(struct net_device *ndev)
714 + for (i = 0; i < fep->num_rx_queues; i++) {
715 + rxq = fep->rx_queue[i];
716 + writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
717 +- writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
718 ++ writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
719 +
720 + /* enable DMA1/2 */
721 + if (i)
722 +diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
723 +index a0d1b084ecec..7aeb7fedb364 100644
724 +--- a/drivers/net/ethernet/marvell/mvmdio.c
725 ++++ b/drivers/net/ethernet/marvell/mvmdio.c
726 +@@ -232,7 +232,8 @@ static int orion_mdio_probe(struct platform_device *pdev)
727 + dev->regs + MVMDIO_ERR_INT_MASK);
728 +
729 + } else if (dev->err_interrupt == -EPROBE_DEFER) {
730 +- return -EPROBE_DEFER;
731 ++ ret = -EPROBE_DEFER;
732 ++ goto out_mdio;
733 + }
734 +
735 + mutex_init(&dev->lock);
736 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
737 +index f7fabecc104f..4c3f1cb7e2c9 100644
738 +--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
739 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
740 +@@ -367,7 +367,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
741 + case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
742 + case MLX5_CMD_OP_ALLOC_Q_COUNTER:
743 + case MLX5_CMD_OP_QUERY_Q_COUNTER:
744 +- case MLX5_CMD_OP_SET_RATE_LIMIT:
745 ++ case MLX5_CMD_OP_SET_PP_RATE_LIMIT:
746 + case MLX5_CMD_OP_QUERY_RATE_LIMIT:
747 + case MLX5_CMD_OP_ALLOC_PD:
748 + case MLX5_CMD_OP_ALLOC_UAR:
749 +@@ -502,7 +502,7 @@ const char *mlx5_command_str(int command)
750 + MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
751 + MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
752 + MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
753 +- MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT);
754 ++ MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT);
755 + MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT);
756 + MLX5_COMMAND_STR_CASE(ALLOC_PD);
757 + MLX5_COMMAND_STR_CASE(DEALLOC_PD);
758 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
759 +index 9d3722930c95..38981db43bc3 100644
760 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
761 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
762 +@@ -3038,6 +3038,7 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
763 + struct sk_buff *skb,
764 + netdev_features_t features)
765 + {
766 ++ unsigned int offset = 0;
767 + struct udphdr *udph;
768 + u16 proto;
769 + u16 port = 0;
770 +@@ -3047,7 +3048,7 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
771 + proto = ip_hdr(skb)->protocol;
772 + break;
773 + case htons(ETH_P_IPV6):
774 +- proto = ipv6_hdr(skb)->nexthdr;
775 ++ proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
776 + break;
777 + default:
778 + goto out;
779 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
780 +index d0a4005fe63a..9346f3985edf 100644
781 +--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
782 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
783 +@@ -303,8 +303,8 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
784 + err_cmd:
785 + memset(din, 0, sizeof(din));
786 + memset(dout, 0, sizeof(dout));
787 +- MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
788 +- MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
789 ++ MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
790 ++ MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
791 + mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout));
792 + return err;
793 + }
794 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
795 +index 104902a93a0b..2be9ec5fd651 100644
796 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rl.c
797 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
798 +@@ -60,16 +60,16 @@ static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
799 + return ret_entry;
800 + }
801 +
802 +-static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
803 ++static int mlx5_set_pp_rate_limit_cmd(struct mlx5_core_dev *dev,
804 + u32 rate, u16 index)
805 + {
806 +- u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)] = {0};
807 +- u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)] = {0};
808 ++ u32 in[MLX5_ST_SZ_DW(set_pp_rate_limit_in)] = {0};
809 ++ u32 out[MLX5_ST_SZ_DW(set_pp_rate_limit_out)] = {0};
810 +
811 +- MLX5_SET(set_rate_limit_in, in, opcode,
812 +- MLX5_CMD_OP_SET_RATE_LIMIT);
813 +- MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
814 +- MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
815 ++ MLX5_SET(set_pp_rate_limit_in, in, opcode,
816 ++ MLX5_CMD_OP_SET_PP_RATE_LIMIT);
817 ++ MLX5_SET(set_pp_rate_limit_in, in, rate_limit_index, index);
818 ++ MLX5_SET(set_pp_rate_limit_in, in, rate_limit, rate);
819 + return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
820 + }
821 +
822 +@@ -108,7 +108,7 @@ int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
823 + entry->refcount++;
824 + } else {
825 + /* new rate limit */
826 +- err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
827 ++ err = mlx5_set_pp_rate_limit_cmd(dev, rate, entry->index);
828 + if (err) {
829 + mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
830 + rate, err);
831 +@@ -144,7 +144,7 @@ void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
832 + entry->refcount--;
833 + if (!entry->refcount) {
834 + /* need to remove rate */
835 +- mlx5_set_rate_limit_cmd(dev, 0, entry->index);
836 ++ mlx5_set_pp_rate_limit_cmd(dev, 0, entry->index);
837 + entry->rate = 0;
838 + }
839 +
840 +@@ -197,8 +197,8 @@ void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
841 + /* Clear all configured rates */
842 + for (i = 0; i < table->max_size; i++)
843 + if (table->rl_entry[i].rate)
844 +- mlx5_set_rate_limit_cmd(dev, 0,
845 +- table->rl_entry[i].index);
846 ++ mlx5_set_pp_rate_limit_cmd(dev, 0,
847 ++ table->rl_entry[i].index);
848 +
849 + kfree(dev->priv.rl_table.rl_entry);
850 + }
851 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
852 +index 07a9ba6cfc70..2f74953e4561 100644
853 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
854 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
855 +@@ -71,9 +71,9 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
856 + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
857 + struct mlx5e_vxlan *vxlan;
858 +
859 +- spin_lock(&vxlan_db->lock);
860 ++ spin_lock_bh(&vxlan_db->lock);
861 + vxlan = radix_tree_lookup(&vxlan_db->tree, port);
862 +- spin_unlock(&vxlan_db->lock);
863 ++ spin_unlock_bh(&vxlan_db->lock);
864 +
865 + return vxlan;
866 + }
867 +@@ -88,8 +88,12 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
868 + struct mlx5e_vxlan *vxlan;
869 + int err;
870 +
871 +- if (mlx5e_vxlan_lookup_port(priv, port))
872 ++ mutex_lock(&priv->state_lock);
873 ++ vxlan = mlx5e_vxlan_lookup_port(priv, port);
874 ++ if (vxlan) {
875 ++ atomic_inc(&vxlan->refcount);
876 + goto free_work;
877 ++ }
878 +
879 + if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
880 + goto free_work;
881 +@@ -99,10 +103,11 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
882 + goto err_delete_port;
883 +
884 + vxlan->udp_port = port;
885 ++ atomic_set(&vxlan->refcount, 1);
886 +
887 +- spin_lock_irq(&vxlan_db->lock);
888 ++ spin_lock_bh(&vxlan_db->lock);
889 + err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
890 +- spin_unlock_irq(&vxlan_db->lock);
891 ++ spin_unlock_bh(&vxlan_db->lock);
892 + if (err)
893 + goto err_free;
894 +
895 +@@ -113,35 +118,39 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
896 + err_delete_port:
897 + mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
898 + free_work:
899 ++ mutex_unlock(&priv->state_lock);
900 + kfree(vxlan_work);
901 + }
902 +
903 +-static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
904 ++static void mlx5e_vxlan_del_port(struct work_struct *work)
905 + {
906 ++ struct mlx5e_vxlan_work *vxlan_work =
907 ++ container_of(work, struct mlx5e_vxlan_work, work);
908 ++ struct mlx5e_priv *priv = vxlan_work->priv;
909 + struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
910 ++ u16 port = vxlan_work->port;
911 + struct mlx5e_vxlan *vxlan;
912 ++ bool remove = false;
913 +
914 +- spin_lock_irq(&vxlan_db->lock);
915 +- vxlan = radix_tree_delete(&vxlan_db->tree, port);
916 +- spin_unlock_irq(&vxlan_db->lock);
917 +-
918 ++ mutex_lock(&priv->state_lock);
919 ++ spin_lock_bh(&vxlan_db->lock);
920 ++ vxlan = radix_tree_lookup(&vxlan_db->tree, port);
921 + if (!vxlan)
922 +- return;
923 +-
924 +- mlx5e_vxlan_core_del_port_cmd(priv->mdev, vxlan->udp_port);
925 +-
926 +- kfree(vxlan);
927 +-}
928 ++ goto out_unlock;
929 +
930 +-static void mlx5e_vxlan_del_port(struct work_struct *work)
931 +-{
932 +- struct mlx5e_vxlan_work *vxlan_work =
933 +- container_of(work, struct mlx5e_vxlan_work, work);
934 +- struct mlx5e_priv *priv = vxlan_work->priv;
935 +- u16 port = vxlan_work->port;
936 ++ if (atomic_dec_and_test(&vxlan->refcount)) {
937 ++ radix_tree_delete(&vxlan_db->tree, port);
938 ++ remove = true;
939 ++ }
940 +
941 +- __mlx5e_vxlan_core_del_port(priv, port);
942 ++out_unlock:
943 ++ spin_unlock_bh(&vxlan_db->lock);
944 +
945 ++ if (remove) {
946 ++ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
947 ++ kfree(vxlan);
948 ++ }
949 ++ mutex_unlock(&priv->state_lock);
950 + kfree(vxlan_work);
951 + }
952 +
953 +@@ -171,12 +180,11 @@ void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
954 + struct mlx5e_vxlan *vxlan;
955 + unsigned int port = 0;
956 +
957 +- spin_lock_irq(&vxlan_db->lock);
958 ++ /* Lockless since we are the only radix-tree consumers, wq is disabled */
959 + while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
960 + port = vxlan->udp_port;
961 +- spin_unlock_irq(&vxlan_db->lock);
962 +- __mlx5e_vxlan_core_del_port(priv, (u16)port);
963 +- spin_lock_irq(&vxlan_db->lock);
964 ++ radix_tree_delete(&vxlan_db->tree, port);
965 ++ mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
966 ++ kfree(vxlan);
967 + }
968 +- spin_unlock_irq(&vxlan_db->lock);
969 + }
970 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
971 +index 5def12c048e3..5ef6ae7d568a 100644
972 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
973 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
974 +@@ -36,6 +36,7 @@
975 + #include "en.h"
976 +
977 + struct mlx5e_vxlan {
978 ++ atomic_t refcount;
979 + u16 udp_port;
980 + };
981 +
982 +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
983 +index fbf5945ce00d..2032a6de026b 100644
984 +--- a/drivers/net/phy/micrel.c
985 ++++ b/drivers/net/phy/micrel.c
986 +@@ -624,6 +624,7 @@ static int ksz9031_read_status(struct phy_device *phydev)
987 + phydev->link = 0;
988 + if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
989 + phydev->drv->config_intr(phydev);
990 ++ return genphy_config_aneg(phydev);
991 + }
992 +
993 + return 0;
994 +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
995 +index 105fbfb47e3a..db65d9ad4488 100644
996 +--- a/drivers/net/usb/qmi_wwan.c
997 ++++ b/drivers/net/usb/qmi_wwan.c
998 +@@ -907,6 +907,7 @@ static const struct usb_device_id products[] = {
999 + {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
1000 + {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1001 + {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
1002 ++ {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1003 + {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1004 + {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1005 + {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1006 +diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
1007 +index e72234efb648..9b5fc502f6a1 100644
1008 +--- a/drivers/s390/net/qeth_core.h
1009 ++++ b/drivers/s390/net/qeth_core.h
1010 +@@ -576,9 +576,9 @@ enum qeth_cq {
1011 + };
1012 +
1013 + struct qeth_ipato {
1014 +- int enabled;
1015 +- int invert4;
1016 +- int invert6;
1017 ++ bool enabled;
1018 ++ bool invert4;
1019 ++ bool invert6;
1020 + struct list_head entries;
1021 + };
1022 +
1023 +diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
1024 +index 838ed6213118..df8f74cb1406 100644
1025 +--- a/drivers/s390/net/qeth_core_main.c
1026 ++++ b/drivers/s390/net/qeth_core_main.c
1027 +@@ -1475,9 +1475,9 @@ static int qeth_setup_card(struct qeth_card *card)
1028 + qeth_set_intial_options(card);
1029 + /* IP address takeover */
1030 + INIT_LIST_HEAD(&card->ipato.entries);
1031 +- card->ipato.enabled = 0;
1032 +- card->ipato.invert4 = 0;
1033 +- card->ipato.invert6 = 0;
1034 ++ card->ipato.enabled = false;
1035 ++ card->ipato.invert4 = false;
1036 ++ card->ipato.invert6 = false;
1037 + /* init QDIO stuff */
1038 + qeth_init_qdio_info(card);
1039 + INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1040 +diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
1041 +index 26f79533e62e..eedf9b01a496 100644
1042 +--- a/drivers/s390/net/qeth_l3.h
1043 ++++ b/drivers/s390/net/qeth_l3.h
1044 +@@ -80,7 +80,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1045 + int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
1046 + void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
1047 + const u8 *);
1048 +-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *);
1049 ++void qeth_l3_update_ipato(struct qeth_card *card);
1050 + struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions);
1051 + int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *);
1052 + int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *);
1053 +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1054 +index f91e70c369ed..1487f8a0c575 100644
1055 +--- a/drivers/s390/net/qeth_l3_main.c
1056 ++++ b/drivers/s390/net/qeth_l3_main.c
1057 +@@ -168,8 +168,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len)
1058 + }
1059 + }
1060 +
1061 +-int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1062 +- struct qeth_ipaddr *addr)
1063 ++static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1064 ++ struct qeth_ipaddr *addr)
1065 + {
1066 + struct qeth_ipato_entry *ipatoe;
1067 + u8 addr_bits[128] = {0, };
1068 +@@ -178,6 +178,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
1069 +
1070 + if (!card->ipato.enabled)
1071 + return 0;
1072 ++ if (addr->type != QETH_IP_TYPE_NORMAL)
1073 ++ return 0;
1074 +
1075 + qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
1076 + (addr->proto == QETH_PROT_IPV4)? 4:16);
1077 +@@ -293,8 +295,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
1078 + memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr));
1079 + addr->ref_counter = 1;
1080 +
1081 +- if (addr->type == QETH_IP_TYPE_NORMAL &&
1082 +- qeth_l3_is_addr_covered_by_ipato(card, addr)) {
1083 ++ if (qeth_l3_is_addr_covered_by_ipato(card, addr)) {
1084 + QETH_CARD_TEXT(card, 2, "tkovaddr");
1085 + addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
1086 + }
1087 +@@ -607,6 +608,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
1088 + /*
1089 + * IP address takeover related functions
1090 + */
1091 ++
1092 ++/**
1093 ++ * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
1094 ++ *
1095 ++ * Caller must hold ip_lock.
1096 ++ */
1097 ++void qeth_l3_update_ipato(struct qeth_card *card)
1098 ++{
1099 ++ struct qeth_ipaddr *addr;
1100 ++ unsigned int i;
1101 ++
1102 ++ hash_for_each(card->ip_htable, i, addr, hnode) {
1103 ++ if (addr->type != QETH_IP_TYPE_NORMAL)
1104 ++ continue;
1105 ++ if (qeth_l3_is_addr_covered_by_ipato(card, addr))
1106 ++ addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG;
1107 ++ else
1108 ++ addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG;
1109 ++ }
1110 ++}
1111 ++
1112 + static void qeth_l3_clear_ipato_list(struct qeth_card *card)
1113 + {
1114 + struct qeth_ipato_entry *ipatoe, *tmp;
1115 +@@ -618,6 +640,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card)
1116 + kfree(ipatoe);
1117 + }
1118 +
1119 ++ qeth_l3_update_ipato(card);
1120 + spin_unlock_bh(&card->ip_lock);
1121 + }
1122 +
1123 +@@ -642,8 +665,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
1124 + }
1125 + }
1126 +
1127 +- if (!rc)
1128 ++ if (!rc) {
1129 + list_add_tail(&new->entry, &card->ipato.entries);
1130 ++ qeth_l3_update_ipato(card);
1131 ++ }
1132 +
1133 + spin_unlock_bh(&card->ip_lock);
1134 +
1135 +@@ -666,6 +691,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card,
1136 + (proto == QETH_PROT_IPV4)? 4:16) &&
1137 + (ipatoe->mask_bits == mask_bits)) {
1138 + list_del(&ipatoe->entry);
1139 ++ qeth_l3_update_ipato(card);
1140 + kfree(ipatoe);
1141 + }
1142 + }
1143 +diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
1144 +index cffe42f5775d..d6bdfc6e905a 100644
1145 +--- a/drivers/s390/net/qeth_l3_sys.c
1146 ++++ b/drivers/s390/net/qeth_l3_sys.c
1147 +@@ -372,8 +372,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
1148 + struct device_attribute *attr, const char *buf, size_t count)
1149 + {
1150 + struct qeth_card *card = dev_get_drvdata(dev);
1151 +- struct qeth_ipaddr *addr;
1152 +- int i, rc = 0;
1153 ++ bool enable;
1154 ++ int rc = 0;
1155 +
1156 + if (!card)
1157 + return -EINVAL;
1158 +@@ -386,25 +386,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
1159 + }
1160 +
1161 + if (sysfs_streq(buf, "toggle")) {
1162 +- card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
1163 +- } else if (sysfs_streq(buf, "1")) {
1164 +- card->ipato.enabled = 1;
1165 +- hash_for_each(card->ip_htable, i, addr, hnode) {
1166 +- if ((addr->type == QETH_IP_TYPE_NORMAL) &&
1167 +- qeth_l3_is_addr_covered_by_ipato(card, addr))
1168 +- addr->set_flags |=
1169 +- QETH_IPA_SETIP_TAKEOVER_FLAG;
1170 +- }
1171 +- } else if (sysfs_streq(buf, "0")) {
1172 +- card->ipato.enabled = 0;
1173 +- hash_for_each(card->ip_htable, i, addr, hnode) {
1174 +- if (addr->set_flags &
1175 +- QETH_IPA_SETIP_TAKEOVER_FLAG)
1176 +- addr->set_flags &=
1177 +- ~QETH_IPA_SETIP_TAKEOVER_FLAG;
1178 +- }
1179 +- } else
1180 ++ enable = !card->ipato.enabled;
1181 ++ } else if (kstrtobool(buf, &enable)) {
1182 + rc = -EINVAL;
1183 ++ goto out;
1184 ++ }
1185 ++
1186 ++ if (card->ipato.enabled != enable) {
1187 ++ card->ipato.enabled = enable;
1188 ++ spin_lock_bh(&card->ip_lock);
1189 ++ qeth_l3_update_ipato(card);
1190 ++ spin_unlock_bh(&card->ip_lock);
1191 ++ }
1192 + out:
1193 + mutex_unlock(&card->conf_mutex);
1194 + return rc ? rc : count;
1195 +@@ -430,20 +423,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
1196 + const char *buf, size_t count)
1197 + {
1198 + struct qeth_card *card = dev_get_drvdata(dev);
1199 ++ bool invert;
1200 + int rc = 0;
1201 +
1202 + if (!card)
1203 + return -EINVAL;
1204 +
1205 + mutex_lock(&card->conf_mutex);
1206 +- if (sysfs_streq(buf, "toggle"))
1207 +- card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
1208 +- else if (sysfs_streq(buf, "1"))
1209 +- card->ipato.invert4 = 1;
1210 +- else if (sysfs_streq(buf, "0"))
1211 +- card->ipato.invert4 = 0;
1212 +- else
1213 ++ if (sysfs_streq(buf, "toggle")) {
1214 ++ invert = !card->ipato.invert4;
1215 ++ } else if (kstrtobool(buf, &invert)) {
1216 + rc = -EINVAL;
1217 ++ goto out;
1218 ++ }
1219 ++
1220 ++ if (card->ipato.invert4 != invert) {
1221 ++ card->ipato.invert4 = invert;
1222 ++ spin_lock_bh(&card->ip_lock);
1223 ++ qeth_l3_update_ipato(card);
1224 ++ spin_unlock_bh(&card->ip_lock);
1225 ++ }
1226 ++out:
1227 + mutex_unlock(&card->conf_mutex);
1228 + return rc ? rc : count;
1229 + }
1230 +@@ -609,20 +609,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
1231 + struct device_attribute *attr, const char *buf, size_t count)
1232 + {
1233 + struct qeth_card *card = dev_get_drvdata(dev);
1234 ++ bool invert;
1235 + int rc = 0;
1236 +
1237 + if (!card)
1238 + return -EINVAL;
1239 +
1240 + mutex_lock(&card->conf_mutex);
1241 +- if (sysfs_streq(buf, "toggle"))
1242 +- card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
1243 +- else if (sysfs_streq(buf, "1"))
1244 +- card->ipato.invert6 = 1;
1245 +- else if (sysfs_streq(buf, "0"))
1246 +- card->ipato.invert6 = 0;
1247 +- else
1248 ++ if (sysfs_streq(buf, "toggle")) {
1249 ++ invert = !card->ipato.invert6;
1250 ++ } else if (kstrtobool(buf, &invert)) {
1251 + rc = -EINVAL;
1252 ++ goto out;
1253 ++ }
1254 ++
1255 ++ if (card->ipato.invert6 != invert) {
1256 ++ card->ipato.invert6 = invert;
1257 ++ spin_lock_bh(&card->ip_lock);
1258 ++ qeth_l3_update_ipato(card);
1259 ++ spin_unlock_bh(&card->ip_lock);
1260 ++ }
1261 ++out:
1262 + mutex_unlock(&card->conf_mutex);
1263 + return rc ? rc : count;
1264 + }
1265 +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1266 +index bdf0e6e89991..faf50df81622 100644
1267 +--- a/drivers/tty/n_tty.c
1268 ++++ b/drivers/tty/n_tty.c
1269 +@@ -1764,7 +1764,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1270 + {
1271 + struct n_tty_data *ldata = tty->disc_data;
1272 +
1273 +- if (!old || (old->c_lflag ^ tty->termios.c_lflag) & ICANON) {
1274 ++ if (!old || (old->c_lflag ^ tty->termios.c_lflag) & (ICANON | EXTPROC)) {
1275 + bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
1276 + ldata->line_start = ldata->read_tail;
1277 + if (!L_ICANON(tty) || !read_cnt(ldata)) {
1278 +@@ -2427,7 +2427,7 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
1279 + return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
1280 + case TIOCINQ:
1281 + down_write(&tty->termios_rwsem);
1282 +- if (L_ICANON(tty))
1283 ++ if (L_ICANON(tty) && !L_EXTPROC(tty))
1284 + retval = inq_canon(ldata);
1285 + else
1286 + retval = read_cnt(ldata);
1287 +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
1288 +index c220c2c0893f..e99f1c5b1df6 100644
1289 +--- a/drivers/tty/tty_buffer.c
1290 ++++ b/drivers/tty/tty_buffer.c
1291 +@@ -446,7 +446,7 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
1292 + * Callers other than flush_to_ldisc() need to exclude the kworker
1293 + * from concurrent use of the line discipline, see paste_selection().
1294 + *
1295 +- * Returns the number of bytes not processed
1296 ++ * Returns the number of bytes processed
1297 + */
1298 + int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
1299 + char *f, int count)
1300 +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
1301 +index ba9b29bc441f..7c54a19b20e0 100644
1302 +--- a/drivers/usb/core/config.c
1303 ++++ b/drivers/usb/core/config.c
1304 +@@ -1002,7 +1002,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
1305 + case USB_SSP_CAP_TYPE:
1306 + ssp_cap = (struct usb_ssp_cap_descriptor *)buffer;
1307 + ssac = (le32_to_cpu(ssp_cap->bmAttributes) &
1308 +- USB_SSP_SUBLINK_SPEED_ATTRIBS) + 1;
1309 ++ USB_SSP_SUBLINK_SPEED_ATTRIBS);
1310 + if (length >= USB_DT_USB_SSP_CAP_SIZE(ssac))
1311 + dev->bos->ssp_cap = ssp_cap;
1312 + break;
1313 +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1314 +index 50010282c010..c05c4f877750 100644
1315 +--- a/drivers/usb/core/quirks.c
1316 ++++ b/drivers/usb/core/quirks.c
1317 +@@ -57,10 +57,11 @@ static const struct usb_device_id usb_quirk_list[] = {
1318 + /* Microsoft LifeCam-VX700 v2.0 */
1319 + { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
1320 +
1321 +- /* Logitech HD Pro Webcams C920, C920-C and C930e */
1322 ++ /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
1323 + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
1324 + { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
1325 + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
1326 ++ { USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
1327 +
1328 + /* Logitech ConferenceCam CC3000e */
1329 + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
1330 +@@ -154,6 +155,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1331 + /* Genesys Logic hub, internally used by KY-688 USB 3.1 Type-C Hub */
1332 + { USB_DEVICE(0x05e3, 0x0612), .driver_info = USB_QUIRK_NO_LPM },
1333 +
1334 ++ /* ELSA MicroLink 56K */
1335 ++ { USB_DEVICE(0x05cc, 0x2267), .driver_info = USB_QUIRK_RESET_RESUME },
1336 ++
1337 + /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
1338 + { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
1339 +
1340 +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1341 +index c87ef38e7416..f6782a347cde 100644
1342 +--- a/drivers/usb/host/xhci-pci.c
1343 ++++ b/drivers/usb/host/xhci-pci.c
1344 +@@ -189,6 +189,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1345 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1346 + xhci->quirks |= XHCI_BROKEN_STREAMS;
1347 + }
1348 ++ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1349 ++ pdev->device == 0x0014)
1350 ++ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
1351 + if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
1352 + pdev->device == 0x0015)
1353 + xhci->quirks |= XHCI_RESET_ON_RESUME;
1354 +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1355 +index 3249f42b4b93..0c743e4cca1e 100644
1356 +--- a/drivers/usb/serial/ftdi_sio.c
1357 ++++ b/drivers/usb/serial/ftdi_sio.c
1358 +@@ -1017,6 +1017,7 @@ static const struct usb_device_id id_table_combined[] = {
1359 + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1360 + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
1361 + { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
1362 ++ { USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
1363 + { } /* Terminating entry */
1364 + };
1365 +
1366 +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
1367 +index f9d15bd62785..543d2801632b 100644
1368 +--- a/drivers/usb/serial/ftdi_sio_ids.h
1369 ++++ b/drivers/usb/serial/ftdi_sio_ids.h
1370 +@@ -913,6 +913,12 @@
1371 + #define ICPDAS_I7561U_PID 0x0104
1372 + #define ICPDAS_I7563U_PID 0x0105
1373 +
1374 ++/*
1375 ++ * Airbus Defence and Space
1376 ++ */
1377 ++#define AIRBUS_DS_VID 0x1e8e /* Vendor ID */
1378 ++#define AIRBUS_DS_P8GR 0x6001 /* Tetra P8GR */
1379 ++
1380 + /*
1381 + * RT Systems programming cables for various ham radios
1382 + */
1383 +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1384 +index ffa8ec917ff5..a818c43a02ec 100644
1385 +--- a/drivers/usb/serial/option.c
1386 ++++ b/drivers/usb/serial/option.c
1387 +@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
1388 + /* These Quectel products use Qualcomm's vendor ID */
1389 + #define QUECTEL_PRODUCT_UC20 0x9003
1390 + #define QUECTEL_PRODUCT_UC15 0x9090
1391 ++/* These Yuga products use Qualcomm's vendor ID */
1392 ++#define YUGA_PRODUCT_CLM920_NC5 0x9625
1393 +
1394 + #define QUECTEL_VENDOR_ID 0x2c7c
1395 + /* These Quectel products use Quectel's vendor ID */
1396 +@@ -283,6 +285,7 @@ static void option_instat_callback(struct urb *urb);
1397 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043
1398 + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045
1399 + #define TELIT_PRODUCT_ME910 0x1100
1400 ++#define TELIT_PRODUCT_ME910_DUAL_MODEM 0x1101
1401 + #define TELIT_PRODUCT_LE920 0x1200
1402 + #define TELIT_PRODUCT_LE910 0x1201
1403 + #define TELIT_PRODUCT_LE910_USBCFG4 0x1206
1404 +@@ -648,6 +651,11 @@ static const struct option_blacklist_info telit_me910_blacklist = {
1405 + .reserved = BIT(1) | BIT(3),
1406 + };
1407 +
1408 ++static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
1409 ++ .sendsetup = BIT(0),
1410 ++ .reserved = BIT(3),
1411 ++};
1412 ++
1413 + static const struct option_blacklist_info telit_le910_blacklist = {
1414 + .sendsetup = BIT(0),
1415 + .reserved = BIT(1) | BIT(2),
1416 +@@ -677,6 +685,10 @@ static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
1417 + .reserved = BIT(4) | BIT(5),
1418 + };
1419 +
1420 ++static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
1421 ++ .reserved = BIT(1) | BIT(4),
1422 ++};
1423 ++
1424 + static const struct usb_device_id option_ids[] = {
1425 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
1426 + { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
1427 +@@ -1181,6 +1193,9 @@ static const struct usb_device_id option_ids[] = {
1428 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1429 + { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
1430 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1431 ++ /* Yuga products use Qualcomm vendor ID */
1432 ++ { USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
1433 ++ .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
1434 + /* Quectel products using Quectel vendor ID */
1435 + { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
1436 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1437 +@@ -1247,6 +1262,8 @@ static const struct usb_device_id option_ids[] = {
1438 + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1439 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1440 + .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
1441 ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1442 ++ .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
1443 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1444 + .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1445 + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
1446 +diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
1447 +index 4516291df1b8..fb6dc16c754a 100644
1448 +--- a/drivers/usb/serial/qcserial.c
1449 ++++ b/drivers/usb/serial/qcserial.c
1450 +@@ -166,6 +166,8 @@ static const struct usb_device_id id_table[] = {
1451 + {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
1452 + {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
1453 + {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
1454 ++ {DEVICE_SWI(0x1199, 0x9090)}, /* Sierra Wireless EM7565 QDL */
1455 ++ {DEVICE_SWI(0x1199, 0x9091)}, /* Sierra Wireless EM7565 */
1456 + {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
1457 + {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
1458 + {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
1459 +@@ -346,6 +348,7 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
1460 + break;
1461 + case 2:
1462 + dev_dbg(dev, "NMEA GPS interface found\n");
1463 ++ sendsetup = true;
1464 + break;
1465 + case 3:
1466 + dev_dbg(dev, "Modem port found\n");
1467 +diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
1468 +index c653ce533430..1886d8e4f14e 100644
1469 +--- a/drivers/usb/usbip/stub_dev.c
1470 ++++ b/drivers/usb/usbip/stub_dev.c
1471 +@@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
1472 + * step 1?
1473 + */
1474 + if (ud->tcp_socket) {
1475 +- dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
1476 +- ud->tcp_socket);
1477 ++ dev_dbg(&sdev->udev->dev, "shutdown sockfd\n");
1478 + kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
1479 + }
1480 +
1481 +diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
1482 +index af10f7b131a4..325b4c05acdd 100644
1483 +--- a/drivers/usb/usbip/stub_main.c
1484 ++++ b/drivers/usb/usbip/stub_main.c
1485 +@@ -252,11 +252,12 @@ void stub_device_cleanup_urbs(struct stub_device *sdev)
1486 + struct stub_priv *priv;
1487 + struct urb *urb;
1488 +
1489 +- dev_dbg(&sdev->udev->dev, "free sdev %p\n", sdev);
1490 ++ dev_dbg(&sdev->udev->dev, "Stub device cleaning up urbs\n");
1491 +
1492 + while ((priv = stub_priv_pop(sdev))) {
1493 + urb = priv->urb;
1494 +- dev_dbg(&sdev->udev->dev, "free urb %p\n", urb);
1495 ++ dev_dbg(&sdev->udev->dev, "free urb seqnum %lu\n",
1496 ++ priv->seqnum);
1497 + usb_kill_urb(urb);
1498 +
1499 + kmem_cache_free(stub_priv_cache, priv);
1500 +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
1501 +index 283a9be77a22..5b807185f79e 100644
1502 +--- a/drivers/usb/usbip/stub_rx.c
1503 ++++ b/drivers/usb/usbip/stub_rx.c
1504 +@@ -225,9 +225,6 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
1505 + if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
1506 + continue;
1507 +
1508 +- dev_info(&priv->urb->dev->dev, "unlink urb %p\n",
1509 +- priv->urb);
1510 +-
1511 + /*
1512 + * This matched urb is not completed yet (i.e., be in
1513 + * flight in usb hcd hardware/driver). Now we are
1514 +@@ -266,8 +263,8 @@ static int stub_recv_cmd_unlink(struct stub_device *sdev,
1515 + ret = usb_unlink_urb(priv->urb);
1516 + if (ret != -EINPROGRESS)
1517 + dev_err(&priv->urb->dev->dev,
1518 +- "failed to unlink a urb %p, ret %d\n",
1519 +- priv->urb, ret);
1520 ++ "failed to unlink a urb # %lu, ret %d\n",
1521 ++ priv->seqnum, ret);
1522 +
1523 + return 0;
1524 + }
1525 +diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
1526 +index 87ff94be4235..96aa375b80d9 100644
1527 +--- a/drivers/usb/usbip/stub_tx.c
1528 ++++ b/drivers/usb/usbip/stub_tx.c
1529 +@@ -102,7 +102,7 @@ void stub_complete(struct urb *urb)
1530 + /* link a urb to the queue of tx. */
1531 + spin_lock_irqsave(&sdev->priv_lock, flags);
1532 + if (sdev->ud.tcp_socket == NULL) {
1533 +- usbip_dbg_stub_tx("ignore urb for closed connection %p", urb);
1534 ++ usbip_dbg_stub_tx("ignore urb for closed connection\n");
1535 + /* It will be freed in stub_device_cleanup_urbs(). */
1536 + } else if (priv->unlinking) {
1537 + stub_enqueue_ret_unlink(sdev, priv->seqnum, urb->status);
1538 +@@ -204,8 +204,8 @@ static int stub_send_ret_submit(struct stub_device *sdev)
1539 +
1540 + /* 1. setup usbip_header */
1541 + setup_ret_submit_pdu(&pdu_header, urb);
1542 +- usbip_dbg_stub_tx("setup txdata seqnum: %d urb: %p\n",
1543 +- pdu_header.base.seqnum, urb);
1544 ++ usbip_dbg_stub_tx("setup txdata seqnum: %d\n",
1545 ++ pdu_header.base.seqnum);
1546 + usbip_header_correct_endian(&pdu_header, 1);
1547 +
1548 + iov[iovnum].iov_base = &pdu_header;
1549 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
1550 +index 8b232290be6b..e24b24fa0f16 100644
1551 +--- a/drivers/usb/usbip/usbip_common.c
1552 ++++ b/drivers/usb/usbip/usbip_common.c
1553 +@@ -335,13 +335,10 @@ int usbip_recv(struct socket *sock, void *buf, int size)
1554 + char *bp = buf;
1555 + int osize = size;
1556 +
1557 +- usbip_dbg_xmit("enter\n");
1558 +-
1559 +- if (!sock || !buf || !size) {
1560 +- pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
1561 +- size);
1562 ++ if (!sock || !buf || !size)
1563 + return -EINVAL;
1564 +- }
1565 ++
1566 ++ usbip_dbg_xmit("enter\n");
1567 +
1568 + do {
1569 + sock->sk->sk_allocation = GFP_NOIO;
1570 +@@ -354,11 +351,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
1571 + msg.msg_flags = MSG_NOSIGNAL;
1572 +
1573 + result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
1574 +- if (result <= 0) {
1575 +- pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
1576 +- sock, buf, size, result, total);
1577 ++ if (result <= 0)
1578 + goto err;
1579 +- }
1580 +
1581 + size -= result;
1582 + buf += result;
1583 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
1584 +index d6dc165e924b..7f161b095176 100644
1585 +--- a/drivers/usb/usbip/vhci_hcd.c
1586 ++++ b/drivers/usb/usbip/vhci_hcd.c
1587 +@@ -506,9 +506,6 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1588 + struct vhci_device *vdev;
1589 + unsigned long flags;
1590 +
1591 +- usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
1592 +- hcd, urb, mem_flags);
1593 +-
1594 + if (portnum > VHCI_HC_PORTS) {
1595 + pr_err("invalid port number %d\n", portnum);
1596 + return -ENODEV;
1597 +@@ -671,8 +668,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1598 + struct vhci_device *vdev;
1599 + unsigned long flags;
1600 +
1601 +- pr_info("dequeue a urb %p\n", urb);
1602 +-
1603 + spin_lock_irqsave(&vhci->lock, flags);
1604 +
1605 + priv = urb->hcpriv;
1606 +@@ -700,7 +695,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1607 + /* tcp connection is closed */
1608 + spin_lock(&vdev->priv_lock);
1609 +
1610 +- pr_info("device %p seems to be disconnected\n", vdev);
1611 + list_del(&priv->list);
1612 + kfree(priv);
1613 + urb->hcpriv = NULL;
1614 +@@ -712,8 +706,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1615 + * vhci_rx will receive RET_UNLINK and give back the URB.
1616 + * Otherwise, we give back it here.
1617 + */
1618 +- pr_info("gives back urb %p\n", urb);
1619 +-
1620 + usb_hcd_unlink_urb_from_ep(hcd, urb);
1621 +
1622 + spin_unlock_irqrestore(&vhci->lock, flags);
1623 +@@ -741,8 +733,6 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1624 +
1625 + unlink->unlink_seqnum = priv->seqnum;
1626 +
1627 +- pr_info("device %p seems to be still connected\n", vdev);
1628 +-
1629 + /* send cmd_unlink and try to cancel the pending URB in the
1630 + * peer */
1631 + list_add_tail(&unlink->list, &vdev->unlink_tx);
1632 +@@ -823,7 +813,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
1633 +
1634 + /* need this? see stub_dev.c */
1635 + if (ud->tcp_socket) {
1636 +- pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
1637 ++ pr_debug("shutdown tcp_socket\n");
1638 + kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
1639 + }
1640 +
1641 +diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
1642 +index fc2d319e2360..5943deeec115 100644
1643 +--- a/drivers/usb/usbip/vhci_rx.c
1644 ++++ b/drivers/usb/usbip/vhci_rx.c
1645 +@@ -37,24 +37,23 @@ struct urb *pickup_urb_and_free_priv(struct vhci_device *vdev, __u32 seqnum)
1646 + urb = priv->urb;
1647 + status = urb->status;
1648 +
1649 +- usbip_dbg_vhci_rx("find urb %p vurb %p seqnum %u\n",
1650 +- urb, priv, seqnum);
1651 ++ usbip_dbg_vhci_rx("find urb seqnum %u\n", seqnum);
1652 +
1653 + switch (status) {
1654 + case -ENOENT:
1655 + /* fall through */
1656 + case -ECONNRESET:
1657 +- dev_info(&urb->dev->dev,
1658 +- "urb %p was unlinked %ssynchronuously.\n", urb,
1659 +- status == -ENOENT ? "" : "a");
1660 ++ dev_dbg(&urb->dev->dev,
1661 ++ "urb seq# %u was unlinked %ssynchronuously\n",
1662 ++ seqnum, status == -ENOENT ? "" : "a");
1663 + break;
1664 + case -EINPROGRESS:
1665 + /* no info output */
1666 + break;
1667 + default:
1668 +- dev_info(&urb->dev->dev,
1669 +- "urb %p may be in a error, status %d\n", urb,
1670 +- status);
1671 ++ dev_dbg(&urb->dev->dev,
1672 ++ "urb seq# %u may be in a error, status %d\n",
1673 ++ seqnum, status);
1674 + }
1675 +
1676 + list_del(&priv->list);
1677 +@@ -80,8 +79,8 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1678 + spin_unlock_irqrestore(&vdev->priv_lock, flags);
1679 +
1680 + if (!urb) {
1681 +- pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
1682 +- pr_info("max seqnum %d\n",
1683 ++ pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
1684 ++ pdu->base.seqnum,
1685 + atomic_read(&vhci->seqnum));
1686 + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
1687 + return;
1688 +@@ -104,7 +103,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1689 + if (usbip_dbg_flag_vhci_rx)
1690 + usbip_dump_urb(urb);
1691 +
1692 +- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
1693 ++ usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
1694 +
1695 + spin_lock_irqsave(&vhci->lock, flags);
1696 + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(vhci), urb);
1697 +@@ -170,7 +169,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1698 + pr_info("the urb (seqnum %d) was already given back\n",
1699 + pdu->base.seqnum);
1700 + } else {
1701 +- usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
1702 ++ usbip_dbg_vhci_rx("now giveback urb %d\n", pdu->base.seqnum);
1703 +
1704 + /* If unlink is successful, status is -ECONNRESET */
1705 + urb->status = pdu->u.ret_unlink.status;
1706 +diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
1707 +index 3e7878fe2fd4..a9a663a578b6 100644
1708 +--- a/drivers/usb/usbip/vhci_tx.c
1709 ++++ b/drivers/usb/usbip/vhci_tx.c
1710 +@@ -83,7 +83,8 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
1711 + memset(&msg, 0, sizeof(msg));
1712 + memset(&iov, 0, sizeof(iov));
1713 +
1714 +- usbip_dbg_vhci_tx("setup txdata urb %p\n", urb);
1715 ++ usbip_dbg_vhci_tx("setup txdata urb seqnum %lu\n",
1716 ++ priv->seqnum);
1717 +
1718 + /* 1. setup usbip_header */
1719 + setup_cmd_submit_pdu(&pdu_header, urb);
1720 +diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
1721 +index 965cc5693a46..c9447a689522 100644
1722 +--- a/include/linux/cpuhotplug.h
1723 ++++ b/include/linux/cpuhotplug.h
1724 +@@ -48,7 +48,7 @@ enum cpuhp_state {
1725 + CPUHP_ARM_SHMOBILE_SCU_PREPARE,
1726 + CPUHP_SH_SH3X_PREPARE,
1727 + CPUHP_BLK_MQ_PREPARE,
1728 +- CPUHP_TIMERS_DEAD,
1729 ++ CPUHP_TIMERS_PREPARE,
1730 + CPUHP_NOTF_ERR_INJ_PREPARE,
1731 + CPUHP_MIPS_SOC_PREPARE,
1732 + CPUHP_BRINGUP_CPU,
1733 +diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
1734 +index a0649973ee5b..b9dfca557a6c 100644
1735 +--- a/include/linux/ipv6.h
1736 ++++ b/include/linux/ipv6.h
1737 +@@ -246,7 +246,8 @@ struct ipv6_pinfo {
1738 + * 100: prefer care-of address
1739 + */
1740 + dontfrag:1,
1741 +- autoflowlabel:1;
1742 ++ autoflowlabel:1,
1743 ++ autoflowlabel_set:1;
1744 + __u8 min_hopcount;
1745 + __u8 tclass;
1746 + __be32 rcv_flowinfo;
1747 +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1748 +index 6045d4d58065..25ed105bbcfb 100644
1749 +--- a/include/linux/mlx5/mlx5_ifc.h
1750 ++++ b/include/linux/mlx5/mlx5_ifc.h
1751 +@@ -143,7 +143,7 @@ enum {
1752 + MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771,
1753 + MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772,
1754 + MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773,
1755 +- MLX5_CMD_OP_SET_RATE_LIMIT = 0x780,
1756 ++ MLX5_CMD_OP_SET_PP_RATE_LIMIT = 0x780,
1757 + MLX5_CMD_OP_QUERY_RATE_LIMIT = 0x781,
1758 + MLX5_CMD_OP_ALLOC_PD = 0x800,
1759 + MLX5_CMD_OP_DEALLOC_PD = 0x801,
1760 +@@ -6689,7 +6689,7 @@ struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
1761 + u8 vxlan_udp_port[0x10];
1762 + };
1763 +
1764 +-struct mlx5_ifc_set_rate_limit_out_bits {
1765 ++struct mlx5_ifc_set_pp_rate_limit_out_bits {
1766 + u8 status[0x8];
1767 + u8 reserved_at_8[0x18];
1768 +
1769 +@@ -6698,7 +6698,7 @@ struct mlx5_ifc_set_rate_limit_out_bits {
1770 + u8 reserved_at_40[0x40];
1771 + };
1772 +
1773 +-struct mlx5_ifc_set_rate_limit_in_bits {
1774 ++struct mlx5_ifc_set_pp_rate_limit_in_bits {
1775 + u8 opcode[0x10];
1776 + u8 reserved_at_10[0x10];
1777 +
1778 +@@ -6711,6 +6711,8 @@ struct mlx5_ifc_set_rate_limit_in_bits {
1779 + u8 reserved_at_60[0x20];
1780 +
1781 + u8 rate_limit[0x20];
1782 ++
1783 ++ u8 reserved_at_a0[0x160];
1784 + };
1785 +
1786 + struct mlx5_ifc_access_register_out_bits {
1787 +diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
1788 +index b83507c0640c..e38f471a5402 100644
1789 +--- a/include/linux/ptr_ring.h
1790 ++++ b/include/linux/ptr_ring.h
1791 +@@ -99,12 +99,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
1792 +
1793 + /* Note: callers invoking this in a loop must use a compiler barrier,
1794 + * for example cpu_relax(). Callers must hold producer_lock.
1795 ++ * Callers are responsible for making sure pointer that is being queued
1796 ++ * points to a valid data.
1797 + */
1798 + static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
1799 + {
1800 + if (unlikely(!r->size) || r->queue[r->producer])
1801 + return -ENOSPC;
1802 +
1803 ++ /* Make sure the pointer we are storing points to a valid data. */
1804 ++ /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
1805 ++ smp_wmb();
1806 ++
1807 + r->queue[r->producer++] = ptr;
1808 + if (unlikely(r->producer >= r->size))
1809 + r->producer = 0;
1810 +@@ -244,6 +250,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
1811 + if (ptr)
1812 + __ptr_ring_discard_one(r);
1813 +
1814 ++ /* Make sure anyone accessing data through the pointer is up to date. */
1815 ++ /* Pairs with smp_wmb in __ptr_ring_produce. */
1816 ++ smp_read_barrier_depends();
1817 + return ptr;
1818 + }
1819 +
1820 +diff --git a/include/linux/tcp.h b/include/linux/tcp.h
1821 +index 647532b0eb03..f50b717ce644 100644
1822 +--- a/include/linux/tcp.h
1823 ++++ b/include/linux/tcp.h
1824 +@@ -219,7 +219,8 @@ struct tcp_sock {
1825 + } rack;
1826 + u16 advmss; /* Advertised MSS */
1827 + u8 rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
1828 +- unused:7;
1829 ++ is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
1830 ++ unused:6;
1831 + u8 nonagle : 4,/* Disable Nagle algorithm? */
1832 + thin_lto : 1,/* Use linear timeouts for thin streams */
1833 + thin_dupack : 1,/* Fast retransmit on first dupack */
1834 +diff --git a/include/linux/timer.h b/include/linux/timer.h
1835 +index 51d601f192d4..ec86e4e55ea3 100644
1836 +--- a/include/linux/timer.h
1837 ++++ b/include/linux/timer.h
1838 +@@ -274,9 +274,11 @@ unsigned long round_jiffies_up(unsigned long j);
1839 + unsigned long round_jiffies_up_relative(unsigned long j);
1840 +
1841 + #ifdef CONFIG_HOTPLUG_CPU
1842 ++int timers_prepare_cpu(unsigned int cpu);
1843 + int timers_dead_cpu(unsigned int cpu);
1844 + #else
1845 +-#define timers_dead_cpu NULL
1846 ++#define timers_prepare_cpu NULL
1847 ++#define timers_dead_cpu NULL
1848 + #endif
1849 +
1850 + #endif
1851 +diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
1852 +index 4d6ec58a8d45..2edb150f1a4d 100644
1853 +--- a/include/linux/vm_event_item.h
1854 ++++ b/include/linux/vm_event_item.h
1855 +@@ -89,10 +89,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
1856 + #endif
1857 + #endif
1858 + #ifdef CONFIG_DEBUG_TLBFLUSH
1859 +-#ifdef CONFIG_SMP
1860 + NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
1861 + NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
1862 +-#endif /* CONFIG_SMP */
1863 + NR_TLB_LOCAL_FLUSH_ALL,
1864 + NR_TLB_LOCAL_FLUSH_ONE,
1865 + #endif /* CONFIG_DEBUG_TLBFLUSH */
1866 +diff --git a/include/net/ip.h b/include/net/ip.h
1867 +index 51c6b9786c46..0e3dcd5a134d 100644
1868 +--- a/include/net/ip.h
1869 ++++ b/include/net/ip.h
1870 +@@ -33,6 +33,8 @@
1871 + #include <net/flow.h>
1872 + #include <net/flow_dissector.h>
1873 +
1874 ++#define IPV4_MIN_MTU 68 /* RFC 791 */
1875 ++
1876 + struct sock;
1877 +
1878 + struct inet_skb_parm {
1879 +diff --git a/include/net/tcp.h b/include/net/tcp.h
1880 +index fba4fc46871d..caf35e062639 100644
1881 +--- a/include/net/tcp.h
1882 ++++ b/include/net/tcp.h
1883 +@@ -1001,7 +1001,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1884 + void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1885 + struct rate_sample *rs);
1886 + void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1887 +- struct skb_mstamp *now, struct rate_sample *rs);
1888 ++ bool is_sack_reneg, struct skb_mstamp *now, struct rate_sample *rs);
1889 + void tcp_rate_check_app_limited(struct sock *sk);
1890 +
1891 + /* These functions determine how the current flow behaves in respect of SACK
1892 +diff --git a/kernel/cpu.c b/kernel/cpu.c
1893 +index e1436ca4aed0..802eb3361a0a 100644
1894 +--- a/kernel/cpu.c
1895 ++++ b/kernel/cpu.c
1896 +@@ -1309,9 +1309,9 @@ static struct cpuhp_step cpuhp_bp_states[] = {
1897 + * before blk_mq_queue_reinit_notify() from notify_dead(),
1898 + * otherwise a RCU stall occurs.
1899 + */
1900 +- [CPUHP_TIMERS_DEAD] = {
1901 ++ [CPUHP_TIMERS_PREPARE] = {
1902 + .name = "timers:dead",
1903 +- .startup.single = NULL,
1904 ++ .startup.single = timers_prepare_cpu,
1905 + .teardown.single = timers_dead_cpu,
1906 + },
1907 + /* Kicks the plugged cpu into life */
1908 +diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
1909 +index 3bcb61b52f6c..dae1a45be504 100644
1910 +--- a/kernel/time/tick-sched.c
1911 ++++ b/kernel/time/tick-sched.c
1912 +@@ -663,6 +663,11 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
1913 + tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
1914 + }
1915 +
1916 ++static inline bool local_timer_softirq_pending(void)
1917 ++{
1918 ++ return local_softirq_pending() & TIMER_SOFTIRQ;
1919 ++}
1920 ++
1921 + static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
1922 + ktime_t now, int cpu)
1923 + {
1924 +@@ -679,8 +684,18 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
1925 + } while (read_seqretry(&jiffies_lock, seq));
1926 + ts->last_jiffies = basejiff;
1927 +
1928 +- if (rcu_needs_cpu(basemono, &next_rcu) ||
1929 +- arch_needs_cpu() || irq_work_needs_cpu()) {
1930 ++ /*
1931 ++ * Keep the periodic tick, when RCU, architecture or irq_work
1932 ++ * requests it.
1933 ++ * Aside of that check whether the local timer softirq is
1934 ++ * pending. If so its a bad idea to call get_next_timer_interrupt()
1935 ++ * because there is an already expired timer, so it will request
1936 ++ * immeditate expiry, which rearms the hardware timer with a
1937 ++ * minimal delta which brings us back to this place
1938 ++ * immediately. Lather, rinse and repeat...
1939 ++ */
1940 ++ if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
1941 ++ irq_work_needs_cpu() || local_timer_softirq_pending()) {
1942 + next_tick = basemono + TICK_NSEC;
1943 + } else {
1944 + /*
1945 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1946 +index 7d670362891a..e872f7f05e8a 100644
1947 +--- a/kernel/time/timer.c
1948 ++++ b/kernel/time/timer.c
1949 +@@ -849,11 +849,10 @@ static inline struct timer_base *get_timer_cpu_base(u32 tflags, u32 cpu)
1950 + struct timer_base *base = per_cpu_ptr(&timer_bases[BASE_STD], cpu);
1951 +
1952 + /*
1953 +- * If the timer is deferrable and nohz is active then we need to use
1954 +- * the deferrable base.
1955 ++ * If the timer is deferrable and NO_HZ_COMMON is set then we need
1956 ++ * to use the deferrable base.
1957 + */
1958 +- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
1959 +- (tflags & TIMER_DEFERRABLE))
1960 ++ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
1961 + base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
1962 + return base;
1963 + }
1964 +@@ -863,11 +862,10 @@ static inline struct timer_base *get_timer_this_cpu_base(u32 tflags)
1965 + struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1966 +
1967 + /*
1968 +- * If the timer is deferrable and nohz is active then we need to use
1969 +- * the deferrable base.
1970 ++ * If the timer is deferrable and NO_HZ_COMMON is set then we need
1971 ++ * to use the deferrable base.
1972 + */
1973 +- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
1974 +- (tflags & TIMER_DEFERRABLE))
1975 ++ if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && (tflags & TIMER_DEFERRABLE))
1976 + base = this_cpu_ptr(&timer_bases[BASE_DEF]);
1977 + return base;
1978 + }
1979 +@@ -1021,8 +1019,6 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1980 + if (!ret && pending_only)
1981 + goto out_unlock;
1982 +
1983 +- debug_activate(timer, expires);
1984 +-
1985 + new_base = get_target_base(base, timer->flags);
1986 +
1987 + if (base != new_base) {
1988 +@@ -1046,6 +1042,8 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1989 + }
1990 + }
1991 +
1992 ++ debug_activate(timer, expires);
1993 ++
1994 + timer->expires = expires;
1995 + /*
1996 + * If 'idx' was calculated above and the base time did not advance
1997 +@@ -1684,7 +1682,7 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1998 + base->must_forward_clk = false;
1999 +
2000 + __run_timers(base);
2001 +- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
2002 ++ if (IS_ENABLED(CONFIG_NO_HZ_COMMON))
2003 + __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
2004 + }
2005 +
2006 +@@ -1853,6 +1851,21 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h
2007 + }
2008 + }
2009 +
2010 ++int timers_prepare_cpu(unsigned int cpu)
2011 ++{
2012 ++ struct timer_base *base;
2013 ++ int b;
2014 ++
2015 ++ for (b = 0; b < NR_BASES; b++) {
2016 ++ base = per_cpu_ptr(&timer_bases[b], cpu);
2017 ++ base->clk = jiffies;
2018 ++ base->next_expiry = base->clk + NEXT_TIMER_MAX_DELTA;
2019 ++ base->is_idle = false;
2020 ++ base->must_forward_clk = true;
2021 ++ }
2022 ++ return 0;
2023 ++}
2024 ++
2025 + int timers_dead_cpu(unsigned int cpu)
2026 + {
2027 + struct timer_base *old_base;
2028 +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
2029 +index f5c016e8fc88..3e1d11f4fe44 100644
2030 +--- a/kernel/trace/ring_buffer.c
2031 ++++ b/kernel/trace/ring_buffer.c
2032 +@@ -280,6 +280,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
2033 + /* Missed count stored at end */
2034 + #define RB_MISSED_STORED (1 << 30)
2035 +
2036 ++#define RB_MISSED_FLAGS (RB_MISSED_EVENTS|RB_MISSED_STORED)
2037 ++
2038 + struct buffer_data_page {
2039 + u64 time_stamp; /* page time stamp */
2040 + local_t commit; /* write committed index */
2041 +@@ -331,7 +333,9 @@ static void rb_init_page(struct buffer_data_page *bpage)
2042 + */
2043 + size_t ring_buffer_page_len(void *page)
2044 + {
2045 +- return local_read(&((struct buffer_data_page *)page)->commit)
2046 ++ struct buffer_data_page *bpage = page;
2047 ++
2048 ++ return (local_read(&bpage->commit) & ~RB_MISSED_FLAGS)
2049 + + BUF_PAGE_HDR_SIZE;
2050 + }
2051 +
2052 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2053 +index 4214cd960b8e..15b02645ce8b 100644
2054 +--- a/kernel/trace/trace.c
2055 ++++ b/kernel/trace/trace.c
2056 +@@ -6181,7 +6181,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2057 + .spd_release = buffer_spd_release,
2058 + };
2059 + struct buffer_ref *ref;
2060 +- int entries, size, i;
2061 ++ int entries, i;
2062 + ssize_t ret = 0;
2063 +
2064 + #ifdef CONFIG_TRACER_MAX_TRACE
2065 +@@ -6232,14 +6232,6 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
2066 + break;
2067 + }
2068 +
2069 +- /*
2070 +- * zero out any left over data, this is going to
2071 +- * user land.
2072 +- */
2073 +- size = ring_buffer_page_len(ref->page);
2074 +- if (size < PAGE_SIZE)
2075 +- memset(ref->page + size, 0, PAGE_SIZE - size);
2076 +-
2077 + page = virt_to_page(ref->page);
2078 +
2079 + spd.pages[i] = page;
2080 +@@ -6963,6 +6955,7 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
2081 + buf->data = alloc_percpu(struct trace_array_cpu);
2082 + if (!buf->data) {
2083 + ring_buffer_free(buf->buffer);
2084 ++ buf->buffer = NULL;
2085 + return -ENOMEM;
2086 + }
2087 +
2088 +@@ -6986,7 +6979,9 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
2089 + allocate_snapshot ? size : 1);
2090 + if (WARN_ON(ret)) {
2091 + ring_buffer_free(tr->trace_buffer.buffer);
2092 ++ tr->trace_buffer.buffer = NULL;
2093 + free_percpu(tr->trace_buffer.data);
2094 ++ tr->trace_buffer.data = NULL;
2095 + return -ENOMEM;
2096 + }
2097 + tr->allocated_snapshot = allocate_snapshot;
2098 +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
2099 +index 5d4006e589cb..4f831225d34f 100644
2100 +--- a/net/bridge/br_netlink.c
2101 ++++ b/net/bridge/br_netlink.c
2102 +@@ -1092,19 +1092,20 @@ static int br_dev_newlink(struct net *src_net, struct net_device *dev,
2103 + struct net_bridge *br = netdev_priv(dev);
2104 + int err;
2105 +
2106 ++ err = register_netdevice(dev);
2107 ++ if (err)
2108 ++ return err;
2109 ++
2110 + if (tb[IFLA_ADDRESS]) {
2111 + spin_lock_bh(&br->lock);
2112 + br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
2113 + spin_unlock_bh(&br->lock);
2114 + }
2115 +
2116 +- err = register_netdevice(dev);
2117 +- if (err)
2118 +- return err;
2119 +-
2120 + err = br_changelink(dev, tb, data);
2121 + if (err)
2122 +- unregister_netdevice(dev);
2123 ++ br_dev_delete(dev, NULL);
2124 ++
2125 + return err;
2126 + }
2127 +
2128 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
2129 +index 7001da910c6b..b7efe2f19f83 100644
2130 +--- a/net/core/net_namespace.c
2131 ++++ b/net/core/net_namespace.c
2132 +@@ -263,7 +263,7 @@ struct net *get_net_ns_by_id(struct net *net, int id)
2133 + spin_lock_irqsave(&net->nsid_lock, flags);
2134 + peer = idr_find(&net->netns_ids, id);
2135 + if (peer)
2136 +- get_net(peer);
2137 ++ peer = maybe_get_net(peer);
2138 + spin_unlock_irqrestore(&net->nsid_lock, flags);
2139 + rcu_read_unlock();
2140 +
2141 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2142 +index aec5605944d3..a64515583bc1 100644
2143 +--- a/net/core/skbuff.c
2144 ++++ b/net/core/skbuff.c
2145 +@@ -3823,7 +3823,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
2146 + struct sock *sk = skb->sk;
2147 +
2148 + if (!skb_may_tx_timestamp(sk, false))
2149 +- return;
2150 ++ goto err;
2151 +
2152 + /* Take a reference to prevent skb_orphan() from freeing the socket,
2153 + * but only if the socket refcount is not zero.
2154 +@@ -3832,7 +3832,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
2155 + *skb_hwtstamps(skb) = *hwtstamps;
2156 + __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
2157 + sock_put(sk);
2158 ++ return;
2159 + }
2160 ++
2161 ++err:
2162 ++ kfree_skb(skb);
2163 + }
2164 + EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
2165 +
2166 +diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
2167 +index 062a67ca9a21..f08f984ebc56 100644
2168 +--- a/net/ipv4/devinet.c
2169 ++++ b/net/ipv4/devinet.c
2170 +@@ -1380,7 +1380,7 @@ static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
2171 +
2172 + static bool inetdev_valid_mtu(unsigned int mtu)
2173 + {
2174 +- return mtu >= 68;
2175 ++ return mtu >= IPV4_MIN_MTU;
2176 + }
2177 +
2178 + static void inetdev_send_gratuitous_arp(struct net_device *dev,
2179 +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
2180 +index 968d8e165e3d..ffae472e250a 100644
2181 +--- a/net/ipv4/fib_frontend.c
2182 ++++ b/net/ipv4/fib_frontend.c
2183 +@@ -1253,14 +1253,19 @@ static int __net_init ip_fib_net_init(struct net *net)
2184 +
2185 + static void ip_fib_net_exit(struct net *net)
2186 + {
2187 +- unsigned int i;
2188 ++ int i;
2189 +
2190 + rtnl_lock();
2191 + #ifdef CONFIG_IP_MULTIPLE_TABLES
2192 + RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
2193 + RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
2194 + #endif
2195 +- for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
2196 ++ /* Destroy the tables in reverse order to guarantee that the
2197 ++ * local table, ID 255, is destroyed before the main table, ID
2198 ++ * 254. This is necessary as the local table may contain
2199 ++ * references to data contained in the main table.
2200 ++ */
2201 ++ for (i = FIB_TABLE_HASHSZ - 1; i >= 0; i--) {
2202 + struct hlist_head *head = &net->ipv4.fib_table_hash[i];
2203 + struct hlist_node *tmp;
2204 + struct fib_table *tb;
2205 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2206 +index 08575e3bd135..7bff0c65046f 100644
2207 +--- a/net/ipv4/igmp.c
2208 ++++ b/net/ipv4/igmp.c
2209 +@@ -89,6 +89,7 @@
2210 + #include <linux/rtnetlink.h>
2211 + #include <linux/times.h>
2212 + #include <linux/pkt_sched.h>
2213 ++#include <linux/byteorder/generic.h>
2214 +
2215 + #include <net/net_namespace.h>
2216 + #include <net/arp.h>
2217 +@@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
2218 + return scount;
2219 + }
2220 +
2221 ++/* source address selection per RFC 3376 section 4.2.13 */
2222 ++static __be32 igmpv3_get_srcaddr(struct net_device *dev,
2223 ++ const struct flowi4 *fl4)
2224 ++{
2225 ++ struct in_device *in_dev = __in_dev_get_rcu(dev);
2226 ++
2227 ++ if (!in_dev)
2228 ++ return htonl(INADDR_ANY);
2229 ++
2230 ++ for_ifa(in_dev) {
2231 ++ if (inet_ifa_match(fl4->saddr, ifa))
2232 ++ return fl4->saddr;
2233 ++ } endfor_ifa(in_dev);
2234 ++
2235 ++ return htonl(INADDR_ANY);
2236 ++}
2237 ++
2238 + static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
2239 + {
2240 + struct sk_buff *skb;
2241 +@@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
2242 + pip->frag_off = htons(IP_DF);
2243 + pip->ttl = 1;
2244 + pip->daddr = fl4.daddr;
2245 +- pip->saddr = fl4.saddr;
2246 ++ pip->saddr = igmpv3_get_srcaddr(dev, &fl4);
2247 + pip->protocol = IPPROTO_IGMP;
2248 + pip->tot_len = 0; /* filled in later */
2249 + ip_select_ident(net, skb, NULL);
2250 +@@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
2251 + }
2252 +
2253 + static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
2254 +- int type, struct igmpv3_grec **ppgr)
2255 ++ int type, struct igmpv3_grec **ppgr, unsigned int mtu)
2256 + {
2257 + struct net_device *dev = pmc->interface->dev;
2258 + struct igmpv3_report *pih;
2259 + struct igmpv3_grec *pgr;
2260 +
2261 +- if (!skb)
2262 +- skb = igmpv3_newpack(dev, dev->mtu);
2263 +- if (!skb)
2264 +- return NULL;
2265 ++ if (!skb) {
2266 ++ skb = igmpv3_newpack(dev, mtu);
2267 ++ if (!skb)
2268 ++ return NULL;
2269 ++ }
2270 + pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
2271 + pgr->grec_type = type;
2272 + pgr->grec_auxwords = 0;
2273 +@@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2274 + struct igmpv3_grec *pgr = NULL;
2275 + struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2276 + int scount, stotal, first, isquery, truncate;
2277 ++ unsigned int mtu;
2278 +
2279 + if (pmc->multiaddr == IGMP_ALL_HOSTS)
2280 + return skb;
2281 + if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports)
2282 + return skb;
2283 +
2284 ++ mtu = READ_ONCE(dev->mtu);
2285 ++ if (mtu < IPV4_MIN_MTU)
2286 ++ return skb;
2287 ++
2288 + isquery = type == IGMPV3_MODE_IS_INCLUDE ||
2289 + type == IGMPV3_MODE_IS_EXCLUDE;
2290 + truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
2291 +@@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2292 + AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2293 + if (skb)
2294 + igmpv3_sendpack(skb);
2295 +- skb = igmpv3_newpack(dev, dev->mtu);
2296 ++ skb = igmpv3_newpack(dev, mtu);
2297 + }
2298 + }
2299 + first = 1;
2300 +@@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2301 + pgr->grec_nsrcs = htons(scount);
2302 + if (skb)
2303 + igmpv3_sendpack(skb);
2304 +- skb = igmpv3_newpack(dev, dev->mtu);
2305 ++ skb = igmpv3_newpack(dev, mtu);
2306 + first = 1;
2307 + scount = 0;
2308 + }
2309 + if (first) {
2310 +- skb = add_grhead(skb, pmc, type, &pgr);
2311 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
2312 + first = 0;
2313 + }
2314 + if (!skb)
2315 +@@ -538,7 +562,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
2316 + igmpv3_sendpack(skb);
2317 + skb = NULL; /* add_grhead will get a new one */
2318 + }
2319 +- skb = add_grhead(skb, pmc, type, &pgr);
2320 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
2321 + }
2322 + }
2323 + if (pgr)
2324 +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
2325 +index bd7f1836bb70..96536a0d6e2d 100644
2326 +--- a/net/ipv4/ip_tunnel.c
2327 ++++ b/net/ipv4/ip_tunnel.c
2328 +@@ -346,8 +346,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
2329 + dev->needed_headroom = t_hlen + hlen;
2330 + mtu -= (dev->hard_header_len + t_hlen);
2331 +
2332 +- if (mtu < 68)
2333 +- mtu = 68;
2334 ++ if (mtu < IPV4_MIN_MTU)
2335 ++ mtu = IPV4_MIN_MTU;
2336 +
2337 + return mtu;
2338 + }
2339 +diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
2340 +index 9879b73d5565..59d8770055ed 100644
2341 +--- a/net/ipv4/raw.c
2342 ++++ b/net/ipv4/raw.c
2343 +@@ -502,11 +502,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2344 + int err;
2345 + struct ip_options_data opt_copy;
2346 + struct raw_frag_vec rfv;
2347 ++ int hdrincl;
2348 +
2349 + err = -EMSGSIZE;
2350 + if (len > 0xFFFF)
2351 + goto out;
2352 +
2353 ++ /* hdrincl should be READ_ONCE(inet->hdrincl)
2354 ++ * but READ_ONCE() doesn't work with bit fields
2355 ++ */
2356 ++ hdrincl = inet->hdrincl;
2357 + /*
2358 + * Check the flags.
2359 + */
2360 +@@ -582,7 +587,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2361 + /* Linux does not mangle headers on raw sockets,
2362 + * so that IP options + IP_HDRINCL is non-sense.
2363 + */
2364 +- if (inet->hdrincl)
2365 ++ if (hdrincl)
2366 + goto done;
2367 + if (ipc.opt->opt.srr) {
2368 + if (!daddr)
2369 +@@ -604,12 +609,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2370 +
2371 + flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
2372 + RT_SCOPE_UNIVERSE,
2373 +- inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
2374 ++ hdrincl ? IPPROTO_RAW : sk->sk_protocol,
2375 + inet_sk_flowi_flags(sk) |
2376 +- (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
2377 ++ (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
2378 + daddr, saddr, 0, 0);
2379 +
2380 +- if (!inet->hdrincl) {
2381 ++ if (!hdrincl) {
2382 + rfv.msg = msg;
2383 + rfv.hlen = 0;
2384 +
2385 +@@ -634,7 +639,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
2386 + goto do_confirm;
2387 + back_from_confirm:
2388 +
2389 +- if (inet->hdrincl)
2390 ++ if (hdrincl)
2391 + err = raw_send_hdrinc(sk, &fl4, msg, len,
2392 + &rt, msg->msg_flags, &ipc.sockc);
2393 +
2394 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2395 +index dd33c785ce16..05d2bde00864 100644
2396 +--- a/net/ipv4/tcp.c
2397 ++++ b/net/ipv4/tcp.c
2398 +@@ -2297,6 +2297,7 @@ int tcp_disconnect(struct sock *sk, int flags)
2399 + tp->snd_cwnd_cnt = 0;
2400 + tp->window_clamp = 0;
2401 + tcp_set_ca_state(sk, TCP_CA_Open);
2402 ++ tp->is_sack_reneg = 0;
2403 + tcp_clear_retrans(tp);
2404 + inet_csk_delack_init(sk);
2405 + /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2406 +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
2407 +index cb8db347c680..97f9cac98348 100644
2408 +--- a/net/ipv4/tcp_bbr.c
2409 ++++ b/net/ipv4/tcp_bbr.c
2410 +@@ -81,7 +81,8 @@ struct bbr {
2411 + u32 lt_last_lost; /* LT intvl start: tp->lost */
2412 + u32 pacing_gain:10, /* current gain for setting pacing rate */
2413 + cwnd_gain:10, /* current gain for setting cwnd */
2414 +- full_bw_cnt:3, /* number of rounds without large bw gains */
2415 ++ full_bw_reached:1, /* reached full bw in Startup? */
2416 ++ full_bw_cnt:2, /* number of rounds without large bw gains */
2417 + cycle_idx:3, /* current index in pacing_gain cycle array */
2418 + has_seen_rtt:1, /* have we seen an RTT sample yet? */
2419 + unused_b:5;
2420 +@@ -151,7 +152,7 @@ static bool bbr_full_bw_reached(const struct sock *sk)
2421 + {
2422 + const struct bbr *bbr = inet_csk_ca(sk);
2423 +
2424 +- return bbr->full_bw_cnt >= bbr_full_bw_cnt;
2425 ++ return bbr->full_bw_reached;
2426 + }
2427 +
2428 + /* Return the windowed max recent bandwidth sample, in pkts/uS << BW_SCALE. */
2429 +@@ -688,6 +689,7 @@ static void bbr_check_full_bw_reached(struct sock *sk,
2430 + return;
2431 + }
2432 + ++bbr->full_bw_cnt;
2433 ++ bbr->full_bw_reached = bbr->full_bw_cnt >= bbr_full_bw_cnt;
2434 + }
2435 +
2436 + /* If pipe is probably full, drain the queue and then enter steady-state. */
2437 +@@ -821,6 +823,7 @@ static void bbr_init(struct sock *sk)
2438 + bbr->restore_cwnd = 0;
2439 + bbr->round_start = 0;
2440 + bbr->idle_restart = 0;
2441 ++ bbr->full_bw_reached = 0;
2442 + bbr->full_bw = 0;
2443 + bbr->full_bw_cnt = 0;
2444 + bbr->cycle_mstamp.v64 = 0;
2445 +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2446 +index 05255a286888..2f107e46355c 100644
2447 +--- a/net/ipv4/tcp_input.c
2448 ++++ b/net/ipv4/tcp_input.c
2449 +@@ -1966,6 +1966,8 @@ void tcp_enter_loss(struct sock *sk)
2450 + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
2451 + tp->sacked_out = 0;
2452 + tp->fackets_out = 0;
2453 ++ /* Mark SACK reneging until we recover from this loss event. */
2454 ++ tp->is_sack_reneg = 1;
2455 + }
2456 + tcp_clear_all_retrans_hints(tp);
2457 +
2458 +@@ -2463,6 +2465,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2459 + return true;
2460 + }
2461 + tcp_set_ca_state(sk, TCP_CA_Open);
2462 ++ tp->is_sack_reneg = 0;
2463 + return false;
2464 + }
2465 +
2466 +@@ -2494,8 +2497,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2467 + NET_INC_STATS(sock_net(sk),
2468 + LINUX_MIB_TCPSPURIOUSRTOS);
2469 + inet_csk(sk)->icsk_retransmits = 0;
2470 +- if (frto_undo || tcp_is_sack(tp))
2471 ++ if (frto_undo || tcp_is_sack(tp)) {
2472 + tcp_set_ca_state(sk, TCP_CA_Open);
2473 ++ tp->is_sack_reneg = 0;
2474 ++ }
2475 + return true;
2476 + }
2477 + return false;
2478 +@@ -3589,6 +3594,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2479 + struct tcp_sacktag_state sack_state;
2480 + struct rate_sample rs = { .prior_delivered = 0 };
2481 + u32 prior_snd_una = tp->snd_una;
2482 ++ bool is_sack_reneg = tp->is_sack_reneg;
2483 + u32 ack_seq = TCP_SKB_CB(skb)->seq;
2484 + u32 ack = TCP_SKB_CB(skb)->ack_seq;
2485 + bool is_dupack = false;
2486 +@@ -3711,7 +3717,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
2487 + tcp_schedule_loss_probe(sk);
2488 + delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
2489 + lost = tp->lost - lost; /* freshly marked lost */
2490 +- tcp_rate_gen(sk, delivered, lost, &now, &rs);
2491 ++ tcp_rate_gen(sk, delivered, lost, is_sack_reneg, &now, &rs);
2492 + tcp_cong_control(sk, ack, delivered, flag, &rs);
2493 + tcp_xmit_recovery(sk, rexmit);
2494 + return 1;
2495 +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2496 +index d577ec07a0d8..b3960738464e 100644
2497 +--- a/net/ipv4/tcp_ipv4.c
2498 ++++ b/net/ipv4/tcp_ipv4.c
2499 +@@ -828,7 +828,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2500 + tcp_time_stamp,
2501 + req->ts_recent,
2502 + 0,
2503 +- tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
2504 ++ tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
2505 + AF_INET),
2506 + inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
2507 + ip_hdr(skb)->tos);
2508 +diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
2509 +index 9be1581a5a08..18309f58ab8d 100644
2510 +--- a/net/ipv4/tcp_rate.c
2511 ++++ b/net/ipv4/tcp_rate.c
2512 +@@ -106,7 +106,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
2513 +
2514 + /* Update the connection delivery information and generate a rate sample. */
2515 + void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
2516 +- struct skb_mstamp *now, struct rate_sample *rs)
2517 ++ bool is_sack_reneg, struct skb_mstamp *now, struct rate_sample *rs)
2518 + {
2519 + struct tcp_sock *tp = tcp_sk(sk);
2520 + u32 snd_us, ack_us;
2521 +@@ -124,8 +124,12 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
2522 +
2523 + rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
2524 + rs->losses = lost; /* freshly marked lost */
2525 +- /* Return an invalid sample if no timing information is available. */
2526 +- if (!rs->prior_mstamp.v64) {
2527 ++ /* Return an invalid sample if no timing information is available or
2528 ++ * in recovery from loss with SACK reneging. Rate samples taken during
2529 ++ * a SACK reneging event may overestimate bw by including packets that
2530 ++ * were SACKed before the reneg.
2531 ++ */
2532 ++ if (!rs->prior_mstamp.v64 || is_sack_reneg) {
2533 + rs->delivered = -1;
2534 + rs->interval_us = -1;
2535 + return;
2536 +diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
2537 +index 8285a1c108c9..5cad76f87536 100644
2538 +--- a/net/ipv6/af_inet6.c
2539 ++++ b/net/ipv6/af_inet6.c
2540 +@@ -209,7 +209,6 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol,
2541 + np->mcast_hops = IPV6_DEFAULT_MCASTHOPS;
2542 + np->mc_loop = 1;
2543 + np->pmtudisc = IPV6_PMTUDISC_WANT;
2544 +- np->autoflowlabel = ip6_default_np_autolabel(sock_net(sk));
2545 + sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
2546 +
2547 + /* Init the ipv4 part of the socket since we can have sockets
2548 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2549 +index 6e01c9a8dfd3..506efba33a89 100644
2550 +--- a/net/ipv6/ip6_output.c
2551 ++++ b/net/ipv6/ip6_output.c
2552 +@@ -156,6 +156,14 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2553 + !(IP6CB(skb)->flags & IP6SKB_REROUTED));
2554 + }
2555 +
2556 ++static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
2557 ++{
2558 ++ if (!np->autoflowlabel_set)
2559 ++ return ip6_default_np_autolabel(net);
2560 ++ else
2561 ++ return np->autoflowlabel;
2562 ++}
2563 ++
2564 + /*
2565 + * xmit an sk_buff (used by TCP, SCTP and DCCP)
2566 + * Note : socket lock is not held for SYNACK packets, but might be modified
2567 +@@ -219,7 +227,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
2568 + hlimit = ip6_dst_hoplimit(dst);
2569 +
2570 + ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
2571 +- np->autoflowlabel, fl6));
2572 ++ ip6_autoflowlabel(net, np), fl6));
2573 +
2574 + hdr->payload_len = htons(seg_len);
2575 + hdr->nexthdr = proto;
2576 +@@ -1691,7 +1699,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
2577 +
2578 + ip6_flow_hdr(hdr, v6_cork->tclass,
2579 + ip6_make_flowlabel(net, skb, fl6->flowlabel,
2580 +- np->autoflowlabel, fl6));
2581 ++ ip6_autoflowlabel(net, np), fl6));
2582 + hdr->hop_limit = v6_cork->hop_limit;
2583 + hdr->nexthdr = proto;
2584 + hdr->saddr = fl6->saddr;
2585 +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2586 +index 12b2fd512f32..11d22d642488 100644
2587 +--- a/net/ipv6/ip6_tunnel.c
2588 ++++ b/net/ipv6/ip6_tunnel.c
2589 +@@ -911,7 +911,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
2590 + if (t->parms.collect_md) {
2591 + tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
2592 + if (!tun_dst)
2593 +- return 0;
2594 ++ goto drop;
2595 + }
2596 + ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
2597 + log_ecn_error);
2598 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
2599 +index 38bee173dc2b..6e3871c7f8f7 100644
2600 +--- a/net/ipv6/ipv6_sockglue.c
2601 ++++ b/net/ipv6/ipv6_sockglue.c
2602 +@@ -874,6 +874,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
2603 + break;
2604 + case IPV6_AUTOFLOWLABEL:
2605 + np->autoflowlabel = valbool;
2606 ++ np->autoflowlabel_set = 1;
2607 + retv = 0;
2608 + break;
2609 + }
2610 +diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
2611 +index 1bdc703cb966..ca8fac6e5a09 100644
2612 +--- a/net/ipv6/mcast.c
2613 ++++ b/net/ipv6/mcast.c
2614 +@@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
2615 + }
2616 +
2617 + static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2618 +- int type, struct mld2_grec **ppgr)
2619 ++ int type, struct mld2_grec **ppgr, unsigned int mtu)
2620 + {
2621 +- struct net_device *dev = pmc->idev->dev;
2622 + struct mld2_report *pmr;
2623 + struct mld2_grec *pgr;
2624 +
2625 +- if (!skb)
2626 +- skb = mld_newpack(pmc->idev, dev->mtu);
2627 +- if (!skb)
2628 +- return NULL;
2629 ++ if (!skb) {
2630 ++ skb = mld_newpack(pmc->idev, mtu);
2631 ++ if (!skb)
2632 ++ return NULL;
2633 ++ }
2634 + pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec));
2635 + pgr->grec_type = type;
2636 + pgr->grec_auxwords = 0;
2637 +@@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2638 + struct mld2_grec *pgr = NULL;
2639 + struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
2640 + int scount, stotal, first, isquery, truncate;
2641 ++ unsigned int mtu;
2642 +
2643 + if (pmc->mca_flags & MAF_NOREPORT)
2644 + return skb;
2645 +
2646 ++ mtu = READ_ONCE(dev->mtu);
2647 ++ if (mtu < IPV6_MIN_MTU)
2648 ++ return skb;
2649 ++
2650 + isquery = type == MLD2_MODE_IS_INCLUDE ||
2651 + type == MLD2_MODE_IS_EXCLUDE;
2652 + truncate = type == MLD2_MODE_IS_EXCLUDE ||
2653 +@@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2654 + AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
2655 + if (skb)
2656 + mld_sendpack(skb);
2657 +- skb = mld_newpack(idev, dev->mtu);
2658 ++ skb = mld_newpack(idev, mtu);
2659 + }
2660 + }
2661 + first = 1;
2662 +@@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2663 + pgr->grec_nsrcs = htons(scount);
2664 + if (skb)
2665 + mld_sendpack(skb);
2666 +- skb = mld_newpack(idev, dev->mtu);
2667 ++ skb = mld_newpack(idev, mtu);
2668 + first = 1;
2669 + scount = 0;
2670 + }
2671 + if (first) {
2672 +- skb = add_grhead(skb, pmc, type, &pgr);
2673 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
2674 + first = 0;
2675 + }
2676 + if (!skb)
2677 +@@ -1814,7 +1819,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
2678 + mld_sendpack(skb);
2679 + skb = NULL; /* add_grhead will get a new one */
2680 + }
2681 +- skb = add_grhead(skb, pmc, type, &pgr);
2682 ++ skb = add_grhead(skb, pmc, type, &pgr, mtu);
2683 + }
2684 + }
2685 + if (pgr)
2686 +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
2687 +index 7ac2365aa6fb..eb624547382f 100644
2688 +--- a/net/ipv6/tcp_ipv6.c
2689 ++++ b/net/ipv6/tcp_ipv6.c
2690 +@@ -962,7 +962,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
2691 + tcp_rsk(req)->rcv_nxt,
2692 + req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
2693 + tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
2694 +- tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
2695 ++ tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr),
2696 + 0, 0);
2697 + }
2698 +
2699 +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2700 +index 1ff497bd9c20..e1c123d4cdda 100644
2701 +--- a/net/netlink/af_netlink.c
2702 ++++ b/net/netlink/af_netlink.c
2703 +@@ -261,6 +261,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
2704 + struct sock *sk = skb->sk;
2705 + int ret = -ENOMEM;
2706 +
2707 ++ if (!net_eq(dev_net(dev), sock_net(sk)))
2708 ++ return 0;
2709 ++
2710 + dev_hold(dev);
2711 +
2712 + if (is_vmalloc_addr(skb->head))
2713 +diff --git a/net/rds/send.c b/net/rds/send.c
2714 +index ad247dc71ebb..ef53d164e146 100644
2715 +--- a/net/rds/send.c
2716 ++++ b/net/rds/send.c
2717 +@@ -1006,6 +1006,9 @@ static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
2718 + continue;
2719 +
2720 + if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
2721 ++ if (cmsg->cmsg_len <
2722 ++ CMSG_LEN(sizeof(struct rds_rdma_args)))
2723 ++ return -EINVAL;
2724 + args = CMSG_DATA(cmsg);
2725 + *rdma_bytes += args->remote_vec.bytes;
2726 + }
2727 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2728 +index c2ab864da50d..7181ce6c62bf 100644
2729 +--- a/net/sctp/socket.c
2730 ++++ b/net/sctp/socket.c
2731 +@@ -4246,7 +4246,7 @@ static int sctp_init_sock(struct sock *sk)
2732 + SCTP_DBG_OBJCNT_INC(sock);
2733 +
2734 + local_bh_disable();
2735 +- percpu_counter_inc(&sctp_sockets_allocated);
2736 ++ sk_sockets_allocated_inc(sk);
2737 + sock_prot_inuse_add(net, sk->sk_prot, 1);
2738 +
2739 + /* Nothing can fail after this block, otherwise
2740 +@@ -4290,7 +4290,7 @@ static void sctp_destroy_sock(struct sock *sk)
2741 + }
2742 + sctp_endpoint_free(sp->ep);
2743 + local_bh_disable();
2744 +- percpu_counter_dec(&sctp_sockets_allocated);
2745 ++ sk_sockets_allocated_dec(sk);
2746 + sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
2747 + local_bh_enable();
2748 + }
2749 +diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
2750 +index c9af022676c2..47c3e97c3136 100644
2751 +--- a/sound/hda/hdac_i915.c
2752 ++++ b/sound/hda/hdac_i915.c
2753 +@@ -319,7 +319,7 @@ static int hdac_component_master_match(struct device *dev, void *data)
2754 + */
2755 + int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *aops)
2756 + {
2757 +- if (WARN_ON(!hdac_acomp))
2758 ++ if (!hdac_acomp)
2759 + return -ENODEV;
2760 +
2761 + hdac_acomp->audio_ops = aops;
2762 +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
2763 +index ba40596b9d92..4ef3b0067876 100644
2764 +--- a/sound/pci/hda/patch_realtek.c
2765 ++++ b/sound/pci/hda/patch_realtek.c
2766 +@@ -5971,6 +5971,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
2767 + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2768 + {0x1b, 0x01011020},
2769 + {0x21, 0x02211010}),
2770 ++ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2771 ++ {0x12, 0x90a60130},
2772 ++ {0x14, 0x90170110},
2773 ++ {0x1b, 0x01011020},
2774 ++ {0x21, 0x0221101f}),
2775 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
2776 + {0x12, 0x90a60160},
2777 + {0x14, 0x90170120},
2778 +diff --git a/sound/soc/codecs/da7218.c b/sound/soc/codecs/da7218.c
2779 +index c69e97654fc6..f88632426c0a 100644
2780 +--- a/sound/soc/codecs/da7218.c
2781 ++++ b/sound/soc/codecs/da7218.c
2782 +@@ -2519,7 +2519,7 @@ static struct da7218_pdata *da7218_of_to_pdata(struct snd_soc_codec *codec)
2783 + }
2784 +
2785 + if (da7218->dev_id == DA7218_DEV_ID) {
2786 +- hpldet_np = of_find_node_by_name(np, "da7218_hpldet");
2787 ++ hpldet_np = of_get_child_by_name(np, "da7218_hpldet");
2788 + if (!hpldet_np)
2789 + return pdata;
2790 +
2791 +diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
2792 +index 5acd5b69fb83..f9b6c5a81b47 100644
2793 +--- a/sound/soc/codecs/tlv320aic31xx.h
2794 ++++ b/sound/soc/codecs/tlv320aic31xx.h
2795 +@@ -115,7 +115,7 @@ struct aic31xx_pdata {
2796 + /* INT2 interrupt control */
2797 + #define AIC31XX_INT2CTRL AIC31XX_REG(0, 49)
2798 + /* GPIO1 control */
2799 +-#define AIC31XX_GPIO1 AIC31XX_REG(0, 50)
2800 ++#define AIC31XX_GPIO1 AIC31XX_REG(0, 51)
2801 +
2802 + #define AIC31XX_DACPRB AIC31XX_REG(0, 60)
2803 + /* ADC Instruction Set Register */
2804 +diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
2805 +index a2104d68169d..26fd6a664b9b 100644
2806 +--- a/sound/soc/codecs/twl4030.c
2807 ++++ b/sound/soc/codecs/twl4030.c
2808 +@@ -232,7 +232,7 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
2809 + struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev);
2810 + struct device_node *twl4030_codec_node = NULL;
2811 +
2812 +- twl4030_codec_node = of_find_node_by_name(codec->dev->parent->of_node,
2813 ++ twl4030_codec_node = of_get_child_by_name(codec->dev->parent->of_node,
2814 + "codec");
2815 +
2816 + if (!pdata && twl4030_codec_node) {
2817 +@@ -241,9 +241,11 @@ static struct twl4030_codec_data *twl4030_get_pdata(struct snd_soc_codec *codec)
2818 + GFP_KERNEL);
2819 + if (!pdata) {
2820 + dev_err(codec->dev, "Can not allocate memory\n");
2821 ++ of_node_put(twl4030_codec_node);
2822 + return NULL;
2823 + }
2824 + twl4030_setup_pdata_of(pdata, twl4030_codec_node);
2825 ++ of_node_put(twl4030_codec_node);
2826 + }
2827 +
2828 + return pdata;
2829 +diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
2830 +index 757af795cebd..c03c9da076c2 100644
2831 +--- a/sound/soc/codecs/wm_adsp.c
2832 ++++ b/sound/soc/codecs/wm_adsp.c
2833 +@@ -1465,7 +1465,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
2834 + le64_to_cpu(footer->timestamp));
2835 +
2836 + while (pos < firmware->size &&
2837 +- pos - firmware->size > sizeof(*region)) {
2838 ++ sizeof(*region) < firmware->size - pos) {
2839 + region = (void *)&(firmware->data[pos]);
2840 + region_name = "Unknown";
2841 + reg = 0;
2842 +@@ -1526,8 +1526,8 @@ static int wm_adsp_load(struct wm_adsp *dsp)
2843 + regions, le32_to_cpu(region->len), offset,
2844 + region_name);
2845 +
2846 +- if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
2847 +- firmware->size) {
2848 ++ if (le32_to_cpu(region->len) >
2849 ++ firmware->size - pos - sizeof(*region)) {
2850 + adsp_err(dsp,
2851 + "%s.%d: %s region len %d bytes exceeds file length %zu\n",
2852 + file, regions, region_name,
2853 +@@ -1992,7 +1992,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2854 +
2855 + blocks = 0;
2856 + while (pos < firmware->size &&
2857 +- pos - firmware->size > sizeof(*blk)) {
2858 ++ sizeof(*blk) < firmware->size - pos) {
2859 + blk = (void *)(&firmware->data[pos]);
2860 +
2861 + type = le16_to_cpu(blk->type);
2862 +@@ -2066,8 +2066,8 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
2863 + }
2864 +
2865 + if (reg) {
2866 +- if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
2867 +- firmware->size) {
2868 ++ if (le32_to_cpu(blk->len) >
2869 ++ firmware->size - pos - sizeof(*blk)) {
2870 + adsp_err(dsp,
2871 + "%s.%d: %s region len %d bytes exceeds file length %zu\n",
2872 + file, blocks, region_name,
2873 +diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
2874 +index fde08660b63b..1c03490e1182 100644
2875 +--- a/sound/soc/fsl/fsl_ssi.c
2876 ++++ b/sound/soc/fsl/fsl_ssi.c
2877 +@@ -1467,12 +1467,6 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2878 + sizeof(fsl_ssi_ac97_dai));
2879 +
2880 + fsl_ac97_data = ssi_private;
2881 +-
2882 +- ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
2883 +- if (ret) {
2884 +- dev_err(&pdev->dev, "could not set AC'97 ops\n");
2885 +- return ret;
2886 +- }
2887 + } else {
2888 + /* Initialize this copy of the CPU DAI driver structure */
2889 + memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
2890 +@@ -1583,6 +1577,14 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2891 + return ret;
2892 + }
2893 +
2894 ++ if (fsl_ssi_is_ac97(ssi_private)) {
2895 ++ ret = snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
2896 ++ if (ret) {
2897 ++ dev_err(&pdev->dev, "could not set AC'97 ops\n");
2898 ++ goto error_ac97_ops;
2899 ++ }
2900 ++ }
2901 ++
2902 + ret = devm_snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
2903 + &ssi_private->cpu_dai_drv, 1);
2904 + if (ret) {
2905 +@@ -1666,6 +1668,10 @@ static int fsl_ssi_probe(struct platform_device *pdev)
2906 + fsl_ssi_debugfs_remove(&ssi_private->dbg_stats);
2907 +
2908 + error_asoc_register:
2909 ++ if (fsl_ssi_is_ac97(ssi_private))
2910 ++ snd_soc_set_ac97_ops(NULL);
2911 ++
2912 ++error_ac97_ops:
2913 + if (ssi_private->soc->imx)
2914 + fsl_ssi_imx_clean(pdev, ssi_private);
2915 +
2916 +diff --git a/tools/objtool/arch/x86/insn/x86-opcode-map.txt b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
2917 +index 767be7c76034..1754e094bc28 100644
2918 +--- a/tools/objtool/arch/x86/insn/x86-opcode-map.txt
2919 ++++ b/tools/objtool/arch/x86/insn/x86-opcode-map.txt
2920 +@@ -896,7 +896,7 @@ EndTable
2921 +
2922 + GrpTable: Grp3_1
2923 + 0: TEST Eb,Ib
2924 +-1:
2925 ++1: TEST Eb,Ib
2926 + 2: NOT Eb
2927 + 3: NEG Eb
2928 + 4: MUL AL,Eb
2929 +diff --git a/tools/usb/usbip/src/utils.c b/tools/usb/usbip/src/utils.c
2930 +index 2b3d6d235015..3d7b42e77299 100644
2931 +--- a/tools/usb/usbip/src/utils.c
2932 ++++ b/tools/usb/usbip/src/utils.c
2933 +@@ -30,6 +30,7 @@ int modify_match_busid(char *busid, int add)
2934 + char command[SYSFS_BUS_ID_SIZE + 4];
2935 + char match_busid_attr_path[SYSFS_PATH_MAX];
2936 + int rc;
2937 ++ int cmd_size;
2938 +
2939 + snprintf(match_busid_attr_path, sizeof(match_busid_attr_path),
2940 + "%s/%s/%s/%s/%s/%s", SYSFS_MNT_PATH, SYSFS_BUS_NAME,
2941 +@@ -37,12 +38,14 @@ int modify_match_busid(char *busid, int add)
2942 + attr_name);
2943 +
2944 + if (add)
2945 +- snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s", busid);
2946 ++ cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "add %s",
2947 ++ busid);
2948 + else
2949 +- snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s", busid);
2950 ++ cmd_size = snprintf(command, SYSFS_BUS_ID_SIZE + 4, "del %s",
2951 ++ busid);
2952 +
2953 + rc = write_sysfs_attribute(match_busid_attr_path, command,
2954 +- sizeof(command));
2955 ++ cmd_size);
2956 + if (rc < 0) {
2957 + dbg("failed to write match_busid: %s", strerror(errno));
2958 + return -1;