Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.4 commit in: /
Date: Wed, 31 Jan 2018 13:36:09
Message-Id: 1517405726.e87c7949f55854a73016f66184bfc84bc3830824.alicef@gentoo
1 commit: e87c7949f55854a73016f66184bfc84bc3830824
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 31 13:35:26 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 31 13:35:26 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=e87c7949
7
8 linux kernel 4.4.114
9
10 0000_README | 4 +
11 1113_linux-4.4.114.patch | 3501 ++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 3505 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 47159cb..918bb76 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -495,6 +495,10 @@ Patch: 1112_linux-4.4.113.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.4.113
21
22 +Patch: 1113_linux-4.4.114.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.4.114
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1113_linux-4.4.114.patch b/1113_linux-4.4.114.patch
31 new file mode 100644
32 index 0000000..836d94a
33 --- /dev/null
34 +++ b/1113_linux-4.4.114.patch
35 @@ -0,0 +1,3501 @@
36 +diff --git a/Makefile b/Makefile
37 +index 39019c9d205c..153440b1bbb0 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 4
43 +-SUBLEVEL = 113
44 ++SUBLEVEL = 114
45 + EXTRAVERSION =
46 + NAME = Blurry Fish Butt
47 +
48 +diff --git a/arch/um/Makefile b/arch/um/Makefile
49 +index e3abe6f3156d..9ccf462131c4 100644
50 +--- a/arch/um/Makefile
51 ++++ b/arch/um/Makefile
52 +@@ -117,7 +117,7 @@ archheaders:
53 + archprepare: include/generated/user_constants.h
54 +
55 + LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
56 +-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
57 ++LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
58 +
59 + CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
60 + $(call cc-option, -fno-stack-protector,) \
61 +diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
62 +index 112178b401a1..2d359991a273 100644
63 +--- a/arch/x86/entry/vsyscall/vsyscall_64.c
64 ++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
65 +@@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
66 + #else
67 + EMULATE;
68 + #endif
69 ++unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
70 +
71 + static int __init vsyscall_setup(char *str)
72 + {
73 +@@ -336,11 +337,11 @@ void __init map_vsyscall(void)
74 + extern char __vsyscall_page;
75 + unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
76 +
77 ++ if (vsyscall_mode != NATIVE)
78 ++ vsyscall_pgprot = __PAGE_KERNEL_VVAR;
79 + if (vsyscall_mode != NONE)
80 + __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
81 +- vsyscall_mode == NATIVE
82 +- ? PAGE_KERNEL_VSYSCALL
83 +- : PAGE_KERNEL_VVAR);
84 ++ __pgprot(vsyscall_pgprot));
85 +
86 + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
87 + (unsigned long)VSYSCALL_ADDR);
88 +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
89 +index 0fbc98568018..641f0f2c2982 100644
90 +--- a/arch/x86/include/asm/cpufeature.h
91 ++++ b/arch/x86/include/asm/cpufeature.h
92 +@@ -199,6 +199,7 @@
93 + #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
94 + #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
95 + #define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
96 ++#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
97 +
98 + #define X86_FEATURE_RETPOLINE ( 7*32+29) /* Generic Retpoline mitigation for Spectre variant 2 */
99 + #define X86_FEATURE_RETPOLINE_AMD ( 7*32+30) /* AMD Retpoline mitigation for Spectre variant 2 */
100 +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
101 +new file mode 100644
102 +index 000000000000..6999f7d01a0d
103 +--- /dev/null
104 ++++ b/arch/x86/include/asm/intel-family.h
105 +@@ -0,0 +1,68 @@
106 ++#ifndef _ASM_X86_INTEL_FAMILY_H
107 ++#define _ASM_X86_INTEL_FAMILY_H
108 ++
109 ++/*
110 ++ * "Big Core" Processors (Branded as Core, Xeon, etc...)
111 ++ *
112 ++ * The "_X" parts are generally the EP and EX Xeons, or the
113 ++ * "Extreme" ones, like Broadwell-E.
114 ++ *
115 ++ * Things ending in "2" are usually because we have no better
116 ++ * name for them. There's no processor called "WESTMERE2".
117 ++ */
118 ++
119 ++#define INTEL_FAM6_CORE_YONAH 0x0E
120 ++#define INTEL_FAM6_CORE2_MEROM 0x0F
121 ++#define INTEL_FAM6_CORE2_MEROM_L 0x16
122 ++#define INTEL_FAM6_CORE2_PENRYN 0x17
123 ++#define INTEL_FAM6_CORE2_DUNNINGTON 0x1D
124 ++
125 ++#define INTEL_FAM6_NEHALEM 0x1E
126 ++#define INTEL_FAM6_NEHALEM_EP 0x1A
127 ++#define INTEL_FAM6_NEHALEM_EX 0x2E
128 ++#define INTEL_FAM6_WESTMERE 0x25
129 ++#define INTEL_FAM6_WESTMERE2 0x1F
130 ++#define INTEL_FAM6_WESTMERE_EP 0x2C
131 ++#define INTEL_FAM6_WESTMERE_EX 0x2F
132 ++
133 ++#define INTEL_FAM6_SANDYBRIDGE 0x2A
134 ++#define INTEL_FAM6_SANDYBRIDGE_X 0x2D
135 ++#define INTEL_FAM6_IVYBRIDGE 0x3A
136 ++#define INTEL_FAM6_IVYBRIDGE_X 0x3E
137 ++
138 ++#define INTEL_FAM6_HASWELL_CORE 0x3C
139 ++#define INTEL_FAM6_HASWELL_X 0x3F
140 ++#define INTEL_FAM6_HASWELL_ULT 0x45
141 ++#define INTEL_FAM6_HASWELL_GT3E 0x46
142 ++
143 ++#define INTEL_FAM6_BROADWELL_CORE 0x3D
144 ++#define INTEL_FAM6_BROADWELL_XEON_D 0x56
145 ++#define INTEL_FAM6_BROADWELL_GT3E 0x47
146 ++#define INTEL_FAM6_BROADWELL_X 0x4F
147 ++
148 ++#define INTEL_FAM6_SKYLAKE_MOBILE 0x4E
149 ++#define INTEL_FAM6_SKYLAKE_DESKTOP 0x5E
150 ++#define INTEL_FAM6_SKYLAKE_X 0x55
151 ++#define INTEL_FAM6_KABYLAKE_MOBILE 0x8E
152 ++#define INTEL_FAM6_KABYLAKE_DESKTOP 0x9E
153 ++
154 ++/* "Small Core" Processors (Atom) */
155 ++
156 ++#define INTEL_FAM6_ATOM_PINEVIEW 0x1C
157 ++#define INTEL_FAM6_ATOM_LINCROFT 0x26
158 ++#define INTEL_FAM6_ATOM_PENWELL 0x27
159 ++#define INTEL_FAM6_ATOM_CLOVERVIEW 0x35
160 ++#define INTEL_FAM6_ATOM_CEDARVIEW 0x36
161 ++#define INTEL_FAM6_ATOM_SILVERMONT1 0x37 /* BayTrail/BYT / Valleyview */
162 ++#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
163 ++#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
164 ++#define INTEL_FAM6_ATOM_MERRIFIELD1 0x4A /* Tangier */
165 ++#define INTEL_FAM6_ATOM_MERRIFIELD2 0x5A /* Annidale */
166 ++#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
167 ++#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
168 ++
169 ++/* Xeon Phi */
170 ++
171 ++#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
172 ++
173 ++#endif /* _ASM_X86_INTEL_FAMILY_H */
174 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
175 +index c124d6ab4bf9..86bccb4bd4dc 100644
176 +--- a/arch/x86/include/asm/processor.h
177 ++++ b/arch/x86/include/asm/processor.h
178 +@@ -574,7 +574,7 @@ static inline void sync_core(void)
179 + {
180 + int tmp;
181 +
182 +-#ifdef CONFIG_M486
183 ++#ifdef CONFIG_X86_32
184 + /*
185 + * Do a CPUID if available, otherwise do a jump. The jump
186 + * can conveniently enough be the jump around CPUID.
187 +diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
188 +index 751bf4b7bf11..025ecfaba9c9 100644
189 +--- a/arch/x86/include/asm/switch_to.h
190 ++++ b/arch/x86/include/asm/switch_to.h
191 +@@ -1,6 +1,8 @@
192 + #ifndef _ASM_X86_SWITCH_TO_H
193 + #define _ASM_X86_SWITCH_TO_H
194 +
195 ++#include <asm/nospec-branch.h>
196 ++
197 + struct task_struct; /* one of the stranger aspects of C forward declarations */
198 + __visible struct task_struct *__switch_to(struct task_struct *prev,
199 + struct task_struct *next);
200 +@@ -24,6 +26,23 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
201 + #define __switch_canary_iparam
202 + #endif /* CC_STACKPROTECTOR */
203 +
204 ++#ifdef CONFIG_RETPOLINE
205 ++ /*
206 ++ * When switching from a shallower to a deeper call stack
207 ++ * the RSB may either underflow or use entries populated
208 ++ * with userspace addresses. On CPUs where those concerns
209 ++ * exist, overwrite the RSB with entries which capture
210 ++ * speculative execution to prevent attack.
211 ++ */
212 ++#define __retpoline_fill_return_buffer \
213 ++ ALTERNATIVE("jmp 910f", \
214 ++ __stringify(__FILL_RETURN_BUFFER(%%ebx, RSB_CLEAR_LOOPS, %%esp)),\
215 ++ X86_FEATURE_RSB_CTXSW) \
216 ++ "910:\n\t"
217 ++#else
218 ++#define __retpoline_fill_return_buffer
219 ++#endif
220 ++
221 + /*
222 + * Saving eflags is important. It switches not only IOPL between tasks,
223 + * it also protects other tasks from NT leaking through sysenter etc.
224 +@@ -46,6 +65,7 @@ do { \
225 + "movl $1f,%[prev_ip]\n\t" /* save EIP */ \
226 + "pushl %[next_ip]\n\t" /* restore EIP */ \
227 + __switch_canary \
228 ++ __retpoline_fill_return_buffer \
229 + "jmp __switch_to\n" /* regparm call */ \
230 + "1:\t" \
231 + "popl %%ebp\n\t" /* restore EBP */ \
232 +@@ -100,6 +120,23 @@ do { \
233 + #define __switch_canary_iparam
234 + #endif /* CC_STACKPROTECTOR */
235 +
236 ++#ifdef CONFIG_RETPOLINE
237 ++ /*
238 ++ * When switching from a shallower to a deeper call stack
239 ++ * the RSB may either underflow or use entries populated
240 ++ * with userspace addresses. On CPUs where those concerns
241 ++ * exist, overwrite the RSB with entries which capture
242 ++ * speculative execution to prevent attack.
243 ++ */
244 ++#define __retpoline_fill_return_buffer \
245 ++ ALTERNATIVE("jmp 910f", \
246 ++ __stringify(__FILL_RETURN_BUFFER(%%r12, RSB_CLEAR_LOOPS, %%rsp)),\
247 ++ X86_FEATURE_RSB_CTXSW) \
248 ++ "910:\n\t"
249 ++#else
250 ++#define __retpoline_fill_return_buffer
251 ++#endif
252 ++
253 + /*
254 + * There is no need to save or restore flags, because flags are always
255 + * clean in kernel mode, with the possible exception of IOPL. Kernel IOPL
256 +@@ -112,6 +149,7 @@ do { \
257 + "call __switch_to\n\t" \
258 + "movq "__percpu_arg([current_task])",%%rsi\n\t" \
259 + __switch_canary \
260 ++ __retpoline_fill_return_buffer \
261 + "movq %P[thread_info](%%rsi),%%r8\n\t" \
262 + "movq %%rax,%%rdi\n\t" \
263 + "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
264 +diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
265 +index 4865e10dbb55..9ee85066f407 100644
266 +--- a/arch/x86/include/asm/vsyscall.h
267 ++++ b/arch/x86/include/asm/vsyscall.h
268 +@@ -13,6 +13,7 @@ extern void map_vsyscall(void);
269 + */
270 + extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
271 + extern bool vsyscall_enabled(void);
272 ++extern unsigned long vsyscall_pgprot;
273 + #else
274 + static inline void map_vsyscall(void) {}
275 + static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
276 +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
277 +index fc91c98bee01..fd945099fc95 100644
278 +--- a/arch/x86/kernel/apic/io_apic.c
279 ++++ b/arch/x86/kernel/apic/io_apic.c
280 +@@ -2592,8 +2592,8 @@ static struct resource * __init ioapic_setup_resources(void)
281 + res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
282 + snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
283 + mem += IOAPIC_RESOURCE_NAME_SIZE;
284 ++ ioapics[i].iomem_res = &res[num];
285 + num++;
286 +- ioapics[i].iomem_res = res;
287 + }
288 +
289 + ioapic_resources = res;
290 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
291 +index 49d25ddf0e9f..8cacf62ec458 100644
292 +--- a/arch/x86/kernel/cpu/bugs.c
293 ++++ b/arch/x86/kernel/cpu/bugs.c
294 +@@ -22,6 +22,7 @@
295 + #include <asm/alternative.h>
296 + #include <asm/pgtable.h>
297 + #include <asm/cacheflush.h>
298 ++#include <asm/intel-family.h>
299 +
300 + static void __init spectre_v2_select_mitigation(void);
301 +
302 +@@ -154,6 +155,23 @@ disable:
303 + return SPECTRE_V2_CMD_NONE;
304 + }
305 +
306 ++/* Check for Skylake-like CPUs (for RSB handling) */
307 ++static bool __init is_skylake_era(void)
308 ++{
309 ++ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
310 ++ boot_cpu_data.x86 == 6) {
311 ++ switch (boot_cpu_data.x86_model) {
312 ++ case INTEL_FAM6_SKYLAKE_MOBILE:
313 ++ case INTEL_FAM6_SKYLAKE_DESKTOP:
314 ++ case INTEL_FAM6_SKYLAKE_X:
315 ++ case INTEL_FAM6_KABYLAKE_MOBILE:
316 ++ case INTEL_FAM6_KABYLAKE_DESKTOP:
317 ++ return true;
318 ++ }
319 ++ }
320 ++ return false;
321 ++}
322 ++
323 + static void __init spectre_v2_select_mitigation(void)
324 + {
325 + enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
326 +@@ -212,6 +230,24 @@ retpoline_auto:
327 +
328 + spectre_v2_enabled = mode;
329 + pr_info("%s\n", spectre_v2_strings[mode]);
330 ++
331 ++ /*
332 ++ * If neither SMEP or KPTI are available, there is a risk of
333 ++ * hitting userspace addresses in the RSB after a context switch
334 ++ * from a shallow call stack to a deeper one. To prevent this fill
335 ++ * the entire RSB, even when using IBRS.
336 ++ *
337 ++ * Skylake era CPUs have a separate issue with *underflow* of the
338 ++ * RSB, when they will predict 'ret' targets from the generic BTB.
339 ++ * The proper mitigation for this is IBRS. If IBRS is not supported
340 ++ * or deactivated in favour of retpolines the RSB fill on context
341 ++ * switch is required.
342 ++ */
343 ++ if ((!boot_cpu_has(X86_FEATURE_KAISER) &&
344 ++ !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
345 ++ setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
346 ++ pr_info("Filling RSB on context switch\n");
347 ++ }
348 + }
349 +
350 + #undef pr_fmt
351 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
352 +index e38d338a6447..b4ca91cf55b0 100644
353 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
354 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
355 +@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
356 + ci_leaf_init(this_leaf++, &id4_regs);
357 + __cache_cpumap_setup(cpu, idx, &id4_regs);
358 + }
359 ++ this_cpu_ci->cpu_map_populated = true;
360 ++
361 + return 0;
362 + }
363 +
364 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
365 +index b428a8174be1..2c76a1801393 100644
366 +--- a/arch/x86/kernel/cpu/microcode/intel.c
367 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
368 +@@ -39,6 +39,9 @@
369 + #include <asm/setup.h>
370 + #include <asm/msr.h>
371 +
372 ++/* last level cache size per core */
373 ++static int llc_size_per_core;
374 ++
375 + static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
376 + static struct mc_saved_data {
377 + unsigned int mc_saved_count;
378 +@@ -996,15 +999,18 @@ static bool is_blacklisted(unsigned int cpu)
379 +
380 + /*
381 + * Late loading on model 79 with microcode revision less than 0x0b000021
382 +- * may result in a system hang. This behavior is documented in item
383 +- * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
384 ++ * and LLC size per core bigger than 2.5MB may result in a system hang.
385 ++ * This behavior is documented in item BDF90, #334165 (Intel Xeon
386 ++ * Processor E7-8800/4800 v4 Product Family).
387 + */
388 + if (c->x86 == 6 &&
389 + c->x86_model == 79 &&
390 + c->x86_mask == 0x01 &&
391 ++ llc_size_per_core > 2621440 &&
392 + c->microcode < 0x0b000021) {
393 + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
394 + pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
395 ++ return true;
396 + }
397 +
398 + return false;
399 +@@ -1067,6 +1073,15 @@ static struct microcode_ops microcode_intel_ops = {
400 + .microcode_fini_cpu = microcode_fini_cpu,
401 + };
402 +
403 ++static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
404 ++{
405 ++ u64 llc_size = c->x86_cache_size * 1024;
406 ++
407 ++ do_div(llc_size, c->x86_max_cores);
408 ++
409 ++ return (int)llc_size;
410 ++}
411 ++
412 + struct microcode_ops * __init init_intel_microcode(void)
413 + {
414 + struct cpuinfo_x86 *c = &boot_cpu_data;
415 +@@ -1077,6 +1092,8 @@ struct microcode_ops * __init init_intel_microcode(void)
416 + return NULL;
417 + }
418 +
419 ++ llc_size_per_core = calc_llc_size_per_core(c);
420 ++
421 + return &microcode_intel_ops;
422 + }
423 +
424 +diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
425 +index e912b2f6d36e..45772560aceb 100644
426 +--- a/arch/x86/lib/delay.c
427 ++++ b/arch/x86/lib/delay.c
428 +@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
429 + {
430 + u64 start, end, delay, loops = __loops;
431 +
432 ++ /*
433 ++ * Timer value of 0 causes MWAITX to wait indefinitely, unless there
434 ++ * is a store on the memory monitored by MONITORX.
435 ++ */
436 ++ if (loops == 0)
437 ++ return;
438 ++
439 + start = rdtsc_ordered();
440 +
441 + for (;;) {
442 +diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
443 +index 8af98513d36c..2298434f7bdb 100644
444 +--- a/arch/x86/mm/kaiser.c
445 ++++ b/arch/x86/mm/kaiser.c
446 +@@ -345,7 +345,7 @@ void __init kaiser_init(void)
447 + if (vsyscall_enabled())
448 + kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
449 + PAGE_SIZE,
450 +- __PAGE_KERNEL_VSYSCALL);
451 ++ vsyscall_pgprot);
452 +
453 + for_each_possible_cpu(cpu) {
454 + void *percpu_vaddr = __per_cpu_user_mapped_start +
455 +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
456 +index 9f77943653fb..b63a173786d5 100644
457 +--- a/drivers/acpi/acpi_processor.c
458 ++++ b/drivers/acpi/acpi_processor.c
459 +@@ -331,15 +331,6 @@ static int acpi_processor_get_info(struct acpi_device *device)
460 + pr->throttling.duty_width = acpi_gbl_FADT.duty_width;
461 +
462 + pr->pblk = object.processor.pblk_address;
463 +-
464 +- /*
465 +- * We don't care about error returns - we just try to mark
466 +- * these reserved so that nobody else is confused into thinking
467 +- * that this region might be unused..
468 +- *
469 +- * (In particular, allocating the IO range for Cardbus)
470 +- */
471 +- request_region(pr->throttling.address, 6, "ACPI CPU throttle");
472 + }
473 +
474 + /*
475 +diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
476 +index de325ae04ce1..3b3c5b90bd20 100644
477 +--- a/drivers/acpi/acpica/nsutils.c
478 ++++ b/drivers/acpi/acpica/nsutils.c
479 +@@ -593,25 +593,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
480 + void acpi_ns_terminate(void)
481 + {
482 + acpi_status status;
483 ++ union acpi_operand_object *prev;
484 ++ union acpi_operand_object *next;
485 +
486 + ACPI_FUNCTION_TRACE(ns_terminate);
487 +
488 +-#ifdef ACPI_EXEC_APP
489 +- {
490 +- union acpi_operand_object *prev;
491 +- union acpi_operand_object *next;
492 ++ /* Delete any module-level code blocks */
493 +
494 +- /* Delete any module-level code blocks */
495 +-
496 +- next = acpi_gbl_module_code_list;
497 +- while (next) {
498 +- prev = next;
499 +- next = next->method.mutex;
500 +- prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
501 +- acpi_ut_remove_reference(prev);
502 +- }
503 ++ next = acpi_gbl_module_code_list;
504 ++ while (next) {
505 ++ prev = next;
506 ++ next = next->method.mutex;
507 ++ prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
508 ++ acpi_ut_remove_reference(prev);
509 + }
510 +-#endif
511 +
512 + /*
513 + * Free the entire namespace -- all nodes and all objects
514 +diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
515 +index 73c9c7fa9001..f06317d6fc38 100644
516 +--- a/drivers/acpi/glue.c
517 ++++ b/drivers/acpi/glue.c
518 +@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
519 + return -ENODEV;
520 +
521 + /*
522 +- * If the device has a _HID (or _CID) returning a valid ACPI/PNP
523 +- * device ID, it is better to make it look less attractive here, so that
524 +- * the other device with the same _ADR value (that may not have a valid
525 +- * device ID) can be matched going forward. [This means a second spec
526 +- * violation in a row, so whatever we do here is best effort anyway.]
527 ++ * If the device has a _HID returning a valid ACPI/PNP device ID, it is
528 ++ * better to make it look less attractive here, so that the other device
529 ++ * with the same _ADR value (that may not have a valid device ID) can be
530 ++ * matched going forward. [This means a second spec violation in a row,
531 ++ * so whatever we do here is best effort anyway.]
532 + */
533 +- return sta_present && list_empty(&adev->pnp.ids) ?
534 ++ return sta_present && !adev->pnp.type.platform_id ?
535 + FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
536 + }
537 +
538 +diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
539 +index f170d746336d..c72e64893d03 100644
540 +--- a/drivers/acpi/processor_throttling.c
541 ++++ b/drivers/acpi/processor_throttling.c
542 +@@ -676,6 +676,15 @@ static int acpi_processor_get_throttling_fadt(struct acpi_processor *pr)
543 + if (!pr->flags.throttling)
544 + return -ENODEV;
545 +
546 ++ /*
547 ++ * We don't care about error returns - we just try to mark
548 ++ * these reserved so that nobody else is confused into thinking
549 ++ * that this region might be unused..
550 ++ *
551 ++ * (In particular, allocating the IO range for Cardbus)
552 ++ */
553 ++ request_region(pr->throttling.address, 6, "ACPI CPU throttle");
554 ++
555 + pr->throttling.state = 0;
556 +
557 + duty_mask = pr->throttling.state_count - 1;
558 +diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
559 +index e9fd32e91668..70e13cf06ed0 100644
560 +--- a/drivers/base/cacheinfo.c
561 ++++ b/drivers/base/cacheinfo.c
562 +@@ -16,6 +16,7 @@
563 + * You should have received a copy of the GNU General Public License
564 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
565 + */
566 ++#include <linux/acpi.h>
567 + #include <linux/bitops.h>
568 + #include <linux/cacheinfo.h>
569 + #include <linux/compiler.h>
570 +@@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
571 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
572 + struct cacheinfo *this_leaf, *sib_leaf;
573 + unsigned int index;
574 +- int ret;
575 ++ int ret = 0;
576 ++
577 ++ if (this_cpu_ci->cpu_map_populated)
578 ++ return 0;
579 +
580 +- ret = cache_setup_of_node(cpu);
581 ++ if (of_have_populated_dt())
582 ++ ret = cache_setup_of_node(cpu);
583 ++ else if (!acpi_disabled)
584 ++ /* No cache property/hierarchy support yet in ACPI */
585 ++ ret = -ENOTSUPP;
586 + if (ret)
587 + return ret;
588 +
589 +@@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
590 + */
591 + ret = cache_shared_cpu_map_setup(cpu);
592 + if (ret) {
593 +- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
594 +- cpu);
595 ++ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
596 + goto free_ci;
597 + }
598 + return 0;
599 +diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
600 +index a311cfa4c5bd..a6975795e7f3 100644
601 +--- a/drivers/base/power/trace.c
602 ++++ b/drivers/base/power/trace.c
603 +@@ -166,14 +166,14 @@ void generate_pm_trace(const void *tracedata, unsigned int user)
604 + }
605 + EXPORT_SYMBOL(generate_pm_trace);
606 +
607 +-extern char __tracedata_start, __tracedata_end;
608 ++extern char __tracedata_start[], __tracedata_end[];
609 + static int show_file_hash(unsigned int value)
610 + {
611 + int match;
612 + char *tracedata;
613 +
614 + match = 0;
615 +- for (tracedata = &__tracedata_start ; tracedata < &__tracedata_end ;
616 ++ for (tracedata = __tracedata_start ; tracedata < __tracedata_end ;
617 + tracedata += 2 + sizeof(unsigned long)) {
618 + unsigned short lineno = *(unsigned short *)tracedata;
619 + const char *file = *(const char **)(tracedata + 2);
620 +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
621 +index 7e2dc5e56632..0b49f29bf0da 100644
622 +--- a/drivers/input/mouse/trackpoint.c
623 ++++ b/drivers/input/mouse/trackpoint.c
624 +@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
625 + if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
626 + psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
627 + button_info = 0x33;
628 ++ } else if (!button_info) {
629 ++ psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
630 ++ button_info = 0x33;
631 + }
632 +
633 + psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
634 +diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
635 +index 90e94a028a49..83b1226471c1 100644
636 +--- a/drivers/mmc/host/sdhci-of-esdhc.c
637 ++++ b/drivers/mmc/host/sdhci-of-esdhc.c
638 +@@ -584,6 +584,8 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
639 + {
640 + struct sdhci_host *host;
641 + struct device_node *np;
642 ++ struct sdhci_pltfm_host *pltfm_host;
643 ++ struct sdhci_esdhc *esdhc;
644 + int ret;
645 +
646 + np = pdev->dev.of_node;
647 +@@ -600,6 +602,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
648 +
649 + sdhci_get_of_property(pdev);
650 +
651 ++ pltfm_host = sdhci_priv(host);
652 ++ esdhc = pltfm_host->priv;
653 ++ if (esdhc->vendor_ver == VENDOR_V_22)
654 ++ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
655 ++
656 ++ if (esdhc->vendor_ver > VENDOR_V_22)
657 ++ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
658 ++
659 + if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
660 + of_device_is_compatible(np, "fsl,p5020-esdhc") ||
661 + of_device_is_compatible(np, "fsl,p4080-esdhc") ||
662 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
663 +index c5ea1018cb47..24155380e43c 100644
664 +--- a/drivers/net/ethernet/realtek/r8169.c
665 ++++ b/drivers/net/ethernet/realtek/r8169.c
666 +@@ -2205,19 +2205,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
667 + void __iomem *ioaddr = tp->mmio_addr;
668 + dma_addr_t paddr = tp->counters_phys_addr;
669 + u32 cmd;
670 +- bool ret;
671 +
672 + RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
673 ++ RTL_R32(CounterAddrHigh);
674 + cmd = (u64)paddr & DMA_BIT_MASK(32);
675 + RTL_W32(CounterAddrLow, cmd);
676 + RTL_W32(CounterAddrLow, cmd | counter_cmd);
677 +
678 +- ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
679 +-
680 +- RTL_W32(CounterAddrLow, 0);
681 +- RTL_W32(CounterAddrHigh, 0);
682 +-
683 +- return ret;
684 ++ return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
685 + }
686 +
687 + static bool rtl8169_reset_counters(struct net_device *dev)
688 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
689 +index 4e0068e775f9..b7b859c3a0c7 100644
690 +--- a/drivers/net/ppp/pppoe.c
691 ++++ b/drivers/net/ppp/pppoe.c
692 +@@ -860,6 +860,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
693 + struct pppoe_hdr *ph;
694 + struct net_device *dev;
695 + char *start;
696 ++ int hlen;
697 +
698 + lock_sock(sk);
699 + if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
700 +@@ -878,16 +879,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
701 + if (total_len > (dev->mtu + dev->hard_header_len))
702 + goto end;
703 +
704 +-
705 +- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
706 +- 0, GFP_KERNEL);
707 ++ hlen = LL_RESERVED_SPACE(dev);
708 ++ skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
709 ++ dev->needed_tailroom, 0, GFP_KERNEL);
710 + if (!skb) {
711 + error = -ENOMEM;
712 + goto end;
713 + }
714 +
715 + /* Reserve space for headers. */
716 +- skb_reserve(skb, dev->hard_header_len);
717 ++ skb_reserve(skb, hlen);
718 + skb_reset_network_header(skb);
719 +
720 + skb->dev = dev;
721 +@@ -948,7 +949,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
722 + /* Copy the data if there is no space for the header or if it's
723 + * read-only.
724 + */
725 +- if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
726 ++ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
727 + goto abort;
728 +
729 + __skb_push(skb, sizeof(*ph));
730 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
731 +index 41e9ebd7d0a6..ebdee8f01f65 100644
732 +--- a/drivers/net/usb/lan78xx.c
733 ++++ b/drivers/net/usb/lan78xx.c
734 +@@ -1859,6 +1859,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
735 + buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
736 + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
737 + dev->rx_qlen = 4;
738 ++ dev->tx_qlen = 4;
739 + }
740 +
741 + ret = lan78xx_write_reg(dev, BURST_CAP, buf);
742 +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
743 +index 0cbf520cea77..82bf85ae5d08 100644
744 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c
745 ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
746 +@@ -1563,7 +1563,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
747 + rq->rx_ring[i].basePA);
748 + rq->rx_ring[i].base = NULL;
749 + }
750 +- rq->buf_info[i] = NULL;
751 + }
752 +
753 + if (rq->comp_ring.base) {
754 +@@ -1578,6 +1577,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
755 + (rq->rx_ring[0].size + rq->rx_ring[1].size);
756 + dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
757 + rq->buf_info_pa);
758 ++ rq->buf_info[0] = rq->buf_info[1] = NULL;
759 + }
760 + }
761 +
762 +diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
763 +index 3923bed93c7e..a21e229d95e0 100644
764 +--- a/drivers/pci/host/pci-layerscape.c
765 ++++ b/drivers/pci/host/pci-layerscape.c
766 +@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
767 + iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
768 + }
769 +
770 ++/* Drop MSG TLP except for Vendor MSG */
771 ++static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
772 ++{
773 ++ u32 val;
774 ++
775 ++ val = ioread32(pcie->dbi + PCIE_STRFMR1);
776 ++ val &= 0xDFFFFFFF;
777 ++ iowrite32(val, pcie->dbi + PCIE_STRFMR1);
778 ++}
779 ++
780 + static int ls1021_pcie_link_up(struct pcie_port *pp)
781 + {
782 + u32 state;
783 +@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
784 + static void ls1021_pcie_host_init(struct pcie_port *pp)
785 + {
786 + struct ls_pcie *pcie = to_ls_pcie(pp);
787 +- u32 val, index[2];
788 ++ u32 index[2];
789 +
790 + pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
791 + "fsl,pcie-scfg");
792 +@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
793 +
794 + dw_pcie_setup_rc(pp);
795 +
796 +- /*
797 +- * LS1021A Workaround for internal TKT228622
798 +- * to fix the INTx hang issue
799 +- */
800 +- val = ioread32(pcie->dbi + PCIE_STRFMR1);
801 +- val &= 0xffff;
802 +- iowrite32(val, pcie->dbi + PCIE_STRFMR1);
803 ++ ls_pcie_drop_msg_tlp(pcie);
804 + }
805 +
806 + static int ls_pcie_link_up(struct pcie_port *pp)
807 +@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
808 + iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
809 + ls_pcie_fix_class(pcie);
810 + ls_pcie_clear_multifunction(pcie);
811 ++ ls_pcie_drop_msg_tlp(pcie);
812 + iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
813 + }
814 +
815 +@@ -203,6 +208,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
816 + { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
817 + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
818 + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
819 ++ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
820 + { },
821 + };
822 + MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
823 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
824 +index c1ccf1ee99ea..efce04df2109 100644
825 +--- a/drivers/scsi/libiscsi.c
826 ++++ b/drivers/scsi/libiscsi.c
827 +@@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
828 +
829 + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
830 + reason = FAILURE_SESSION_IN_RECOVERY;
831 +- sc->result = DID_REQUEUE;
832 ++ sc->result = DID_REQUEUE << 16;
833 + goto fault;
834 + }
835 +
836 +diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
837 +index a3ec49bdc1e6..ec38370ffcab 100644
838 +--- a/drivers/usb/usbip/stub_dev.c
839 ++++ b/drivers/usb/usbip/stub_dev.c
840 +@@ -163,8 +163,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
841 + * step 1?
842 + */
843 + if (ud->tcp_socket) {
844 +- dev_dbg(&sdev->udev->dev, "shutdown tcp_socket %p\n",
845 +- ud->tcp_socket);
846 ++ dev_dbg(&sdev->udev->dev, "shutdown sockfd %d\n", ud->sockfd);
847 + kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
848 + }
849 +
850 +diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c
851 +index 7de54a66044f..56cacb68040c 100644
852 +--- a/drivers/usb/usbip/stub_rx.c
853 ++++ b/drivers/usb/usbip/stub_rx.c
854 +@@ -338,23 +338,26 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
855 + return priv;
856 + }
857 +
858 +-static int get_pipe(struct stub_device *sdev, int epnum, int dir)
859 ++static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
860 + {
861 + struct usb_device *udev = sdev->udev;
862 + struct usb_host_endpoint *ep;
863 + struct usb_endpoint_descriptor *epd = NULL;
864 ++ int epnum = pdu->base.ep;
865 ++ int dir = pdu->base.direction;
866 ++
867 ++ if (epnum < 0 || epnum > 15)
868 ++ goto err_ret;
869 +
870 + if (dir == USBIP_DIR_IN)
871 + ep = udev->ep_in[epnum & 0x7f];
872 + else
873 + ep = udev->ep_out[epnum & 0x7f];
874 +- if (!ep) {
875 +- dev_err(&sdev->interface->dev, "no such endpoint?, %d\n",
876 +- epnum);
877 +- BUG();
878 +- }
879 ++ if (!ep)
880 ++ goto err_ret;
881 +
882 + epd = &ep->desc;
883 ++
884 + if (usb_endpoint_xfer_control(epd)) {
885 + if (dir == USBIP_DIR_OUT)
886 + return usb_sndctrlpipe(udev, epnum);
887 +@@ -377,15 +380,37 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir)
888 + }
889 +
890 + if (usb_endpoint_xfer_isoc(epd)) {
891 ++ /* validate packet size and number of packets */
892 ++ unsigned int maxp, packets, bytes;
893 ++
894 ++#define USB_EP_MAXP_MULT_SHIFT 11
895 ++#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
896 ++#define USB_EP_MAXP_MULT(m) \
897 ++ (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
898 ++
899 ++ maxp = usb_endpoint_maxp(epd);
900 ++ maxp *= (USB_EP_MAXP_MULT(
901 ++ __le16_to_cpu(epd->wMaxPacketSize)) + 1);
902 ++ bytes = pdu->u.cmd_submit.transfer_buffer_length;
903 ++ packets = DIV_ROUND_UP(bytes, maxp);
904 ++
905 ++ if (pdu->u.cmd_submit.number_of_packets < 0 ||
906 ++ pdu->u.cmd_submit.number_of_packets > packets) {
907 ++ dev_err(&sdev->udev->dev,
908 ++ "CMD_SUBMIT: isoc invalid num packets %d\n",
909 ++ pdu->u.cmd_submit.number_of_packets);
910 ++ return -1;
911 ++ }
912 + if (dir == USBIP_DIR_OUT)
913 + return usb_sndisocpipe(udev, epnum);
914 + else
915 + return usb_rcvisocpipe(udev, epnum);
916 + }
917 +
918 ++err_ret:
919 + /* NOT REACHED */
920 +- dev_err(&sdev->interface->dev, "get pipe, epnum %d\n", epnum);
921 +- return 0;
922 ++ dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
923 ++ return -1;
924 + }
925 +
926 + static void masking_bogus_flags(struct urb *urb)
927 +@@ -449,7 +474,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev,
928 + struct stub_priv *priv;
929 + struct usbip_device *ud = &sdev->ud;
930 + struct usb_device *udev = sdev->udev;
931 +- int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction);
932 ++ int pipe = get_pipe(sdev, pdu);
933 ++
934 ++ if (pipe == -1)
935 ++ return;
936 +
937 + priv = stub_priv_alloc(sdev, pdu);
938 + if (!priv)
939 +diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
940 +index 9752b93f754e..1838f1b2c2fa 100644
941 +--- a/drivers/usb/usbip/usbip_common.c
942 ++++ b/drivers/usb/usbip/usbip_common.c
943 +@@ -317,18 +317,14 @@ int usbip_recv(struct socket *sock, void *buf, int size)
944 + struct msghdr msg;
945 + struct kvec iov;
946 + int total = 0;
947 +-
948 + /* for blocks of if (usbip_dbg_flag_xmit) */
949 + char *bp = buf;
950 + int osize = size;
951 +
952 +- usbip_dbg_xmit("enter\n");
953 +-
954 +- if (!sock || !buf || !size) {
955 +- pr_err("invalid arg, sock %p buff %p size %d\n", sock, buf,
956 +- size);
957 ++ if (!sock || !buf || !size)
958 + return -EINVAL;
959 +- }
960 ++
961 ++ usbip_dbg_xmit("enter\n");
962 +
963 + do {
964 + sock->sk->sk_allocation = GFP_NOIO;
965 +@@ -341,11 +337,8 @@ int usbip_recv(struct socket *sock, void *buf, int size)
966 + msg.msg_flags = MSG_NOSIGNAL;
967 +
968 + result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
969 +- if (result <= 0) {
970 +- pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
971 +- sock, buf, size, result, total);
972 ++ if (result <= 0)
973 + goto err;
974 +- }
975 +
976 + size -= result;
977 + buf += result;
978 +diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
979 +index 86b08475c254..f875ccaa55f9 100644
980 +--- a/drivers/usb/usbip/usbip_common.h
981 ++++ b/drivers/usb/usbip/usbip_common.h
982 +@@ -261,6 +261,7 @@ struct usbip_device {
983 + /* lock for status */
984 + spinlock_t lock;
985 +
986 ++ int sockfd;
987 + struct socket *tcp_socket;
988 +
989 + struct task_struct *tcp_rx;
990 +diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
991 +index 64933b993d7a..2580a32bcdff 100644
992 +--- a/drivers/usb/usbip/usbip_event.c
993 ++++ b/drivers/usb/usbip/usbip_event.c
994 +@@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
995 + int usbip_event_happened(struct usbip_device *ud)
996 + {
997 + int happened = 0;
998 ++ unsigned long flags;
999 +
1000 +- spin_lock(&ud->lock);
1001 ++ spin_lock_irqsave(&ud->lock, flags);
1002 + if (ud->event != 0)
1003 + happened = 1;
1004 +- spin_unlock(&ud->lock);
1005 ++ spin_unlock_irqrestore(&ud->lock, flags);
1006 +
1007 + return happened;
1008 + }
1009 +diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
1010 +index f9af04d7f02f..00d68945548e 100644
1011 +--- a/drivers/usb/usbip/vhci_hcd.c
1012 ++++ b/drivers/usb/usbip/vhci_hcd.c
1013 +@@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
1014 +
1015 + void rh_port_connect(int rhport, enum usb_device_speed speed)
1016 + {
1017 ++ unsigned long flags;
1018 ++
1019 + usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
1020 +
1021 +- spin_lock(&the_controller->lock);
1022 ++ spin_lock_irqsave(&the_controller->lock, flags);
1023 +
1024 + the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
1025 + | (1 << USB_PORT_FEAT_C_CONNECTION);
1026 +@@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
1027 + break;
1028 + }
1029 +
1030 +- spin_unlock(&the_controller->lock);
1031 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1032 +
1033 + usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
1034 + }
1035 +
1036 + static void rh_port_disconnect(int rhport)
1037 + {
1038 ++ unsigned long flags;
1039 ++
1040 + usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
1041 +
1042 +- spin_lock(&the_controller->lock);
1043 ++ spin_lock_irqsave(&the_controller->lock, flags);
1044 +
1045 + the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
1046 + the_controller->port_status[rhport] |=
1047 + (1 << USB_PORT_FEAT_C_CONNECTION);
1048 +
1049 +- spin_unlock(&the_controller->lock);
1050 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1051 + usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
1052 + }
1053 +
1054 +@@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
1055 + int retval;
1056 + int rhport;
1057 + int changed = 0;
1058 ++ unsigned long flags;
1059 +
1060 + retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
1061 + memset(buf, 0, retval);
1062 +
1063 + vhci = hcd_to_vhci(hcd);
1064 +
1065 +- spin_lock(&vhci->lock);
1066 ++ spin_lock_irqsave(&vhci->lock, flags);
1067 + if (!HCD_HW_ACCESSIBLE(hcd)) {
1068 + usbip_dbg_vhci_rh("hw accessible flag not on?\n");
1069 + goto done;
1070 +@@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
1071 + usb_hcd_resume_root_hub(hcd);
1072 +
1073 + done:
1074 +- spin_unlock(&vhci->lock);
1075 ++ spin_unlock_irqrestore(&vhci->lock, flags);
1076 + return changed ? retval : 0;
1077 + }
1078 +
1079 +@@ -236,6 +241,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1080 + struct vhci_hcd *dum;
1081 + int retval = 0;
1082 + int rhport;
1083 ++ unsigned long flags;
1084 +
1085 + u32 prev_port_status[VHCI_NPORTS];
1086 +
1087 +@@ -254,7 +260,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1088 +
1089 + dum = hcd_to_vhci(hcd);
1090 +
1091 +- spin_lock(&dum->lock);
1092 ++ spin_lock_irqsave(&dum->lock, flags);
1093 +
1094 + /* store old status and compare now and old later */
1095 + if (usbip_dbg_flag_vhci_rh) {
1096 +@@ -408,7 +414,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
1097 + }
1098 + usbip_dbg_vhci_rh(" bye\n");
1099 +
1100 +- spin_unlock(&dum->lock);
1101 ++ spin_unlock_irqrestore(&dum->lock, flags);
1102 +
1103 + return retval;
1104 + }
1105 +@@ -431,6 +437,7 @@ static void vhci_tx_urb(struct urb *urb)
1106 + {
1107 + struct vhci_device *vdev = get_vdev(urb->dev);
1108 + struct vhci_priv *priv;
1109 ++ unsigned long flags;
1110 +
1111 + if (!vdev) {
1112 + pr_err("could not get virtual device");
1113 +@@ -443,7 +450,7 @@ static void vhci_tx_urb(struct urb *urb)
1114 + return;
1115 + }
1116 +
1117 +- spin_lock(&vdev->priv_lock);
1118 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1119 +
1120 + priv->seqnum = atomic_inc_return(&the_controller->seqnum);
1121 + if (priv->seqnum == 0xffff)
1122 +@@ -457,7 +464,7 @@ static void vhci_tx_urb(struct urb *urb)
1123 + list_add_tail(&priv->list, &vdev->priv_tx);
1124 +
1125 + wake_up(&vdev->waitq_tx);
1126 +- spin_unlock(&vdev->priv_lock);
1127 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1128 + }
1129 +
1130 + static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1131 +@@ -466,15 +473,16 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1132 + struct device *dev = &urb->dev->dev;
1133 + int ret = 0;
1134 + struct vhci_device *vdev;
1135 ++ unsigned long flags;
1136 +
1137 + /* patch to usb_sg_init() is in 2.5.60 */
1138 + BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
1139 +
1140 +- spin_lock(&the_controller->lock);
1141 ++ spin_lock_irqsave(&the_controller->lock, flags);
1142 +
1143 + if (urb->status != -EINPROGRESS) {
1144 + dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
1145 +- spin_unlock(&the_controller->lock);
1146 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1147 + return urb->status;
1148 + }
1149 +
1150 +@@ -486,7 +494,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1151 + vdev->ud.status == VDEV_ST_ERROR) {
1152 + dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
1153 + spin_unlock(&vdev->ud.lock);
1154 +- spin_unlock(&the_controller->lock);
1155 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1156 + return -ENODEV;
1157 + }
1158 + spin_unlock(&vdev->ud.lock);
1159 +@@ -559,14 +567,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
1160 +
1161 + out:
1162 + vhci_tx_urb(urb);
1163 +- spin_unlock(&the_controller->lock);
1164 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1165 +
1166 + return 0;
1167 +
1168 + no_need_xmit:
1169 + usb_hcd_unlink_urb_from_ep(hcd, urb);
1170 + no_need_unlink:
1171 +- spin_unlock(&the_controller->lock);
1172 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1173 + if (!ret)
1174 + usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
1175 + urb, urb->status);
1176 +@@ -623,14 +631,15 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1177 + {
1178 + struct vhci_priv *priv;
1179 + struct vhci_device *vdev;
1180 ++ unsigned long flags;
1181 +
1182 +- spin_lock(&the_controller->lock);
1183 ++ spin_lock_irqsave(&the_controller->lock, flags);
1184 +
1185 + priv = urb->hcpriv;
1186 + if (!priv) {
1187 + /* URB was never linked! or will be soon given back by
1188 + * vhci_rx. */
1189 +- spin_unlock(&the_controller->lock);
1190 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1191 + return -EIDRM;
1192 + }
1193 +
1194 +@@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1195 +
1196 + ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1197 + if (ret) {
1198 +- spin_unlock(&the_controller->lock);
1199 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1200 + return ret;
1201 + }
1202 + }
1203 +@@ -664,10 +673,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1204 + */
1205 + usb_hcd_unlink_urb_from_ep(hcd, urb);
1206 +
1207 +- spin_unlock(&the_controller->lock);
1208 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1209 + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
1210 + urb->status);
1211 +- spin_lock(&the_controller->lock);
1212 ++ spin_lock_irqsave(&the_controller->lock, flags);
1213 +
1214 + } else {
1215 + /* tcp connection is alive */
1216 +@@ -679,7 +688,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1217 + unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
1218 + if (!unlink) {
1219 + spin_unlock(&vdev->priv_lock);
1220 +- spin_unlock(&the_controller->lock);
1221 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1222 + usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
1223 + return -ENOMEM;
1224 + }
1225 +@@ -698,7 +707,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1226 + spin_unlock(&vdev->priv_lock);
1227 + }
1228 +
1229 +- spin_unlock(&the_controller->lock);
1230 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1231 +
1232 + usbip_dbg_vhci_hc("leave\n");
1233 + return 0;
1234 +@@ -707,8 +716,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1235 + static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
1236 + {
1237 + struct vhci_unlink *unlink, *tmp;
1238 ++ unsigned long flags;
1239 +
1240 +- spin_lock(&the_controller->lock);
1241 ++ spin_lock_irqsave(&the_controller->lock, flags);
1242 + spin_lock(&vdev->priv_lock);
1243 +
1244 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
1245 +@@ -742,19 +752,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
1246 + list_del(&unlink->list);
1247 +
1248 + spin_unlock(&vdev->priv_lock);
1249 +- spin_unlock(&the_controller->lock);
1250 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1251 +
1252 + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
1253 + urb->status);
1254 +
1255 +- spin_lock(&the_controller->lock);
1256 ++ spin_lock_irqsave(&the_controller->lock, flags);
1257 + spin_lock(&vdev->priv_lock);
1258 +
1259 + kfree(unlink);
1260 + }
1261 +
1262 + spin_unlock(&vdev->priv_lock);
1263 +- spin_unlock(&the_controller->lock);
1264 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1265 + }
1266 +
1267 + /*
1268 +@@ -768,7 +778,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
1269 +
1270 + /* need this? see stub_dev.c */
1271 + if (ud->tcp_socket) {
1272 +- pr_debug("shutdown tcp_socket %p\n", ud->tcp_socket);
1273 ++ pr_debug("shutdown sockfd %d\n", ud->sockfd);
1274 + kernel_sock_shutdown(ud->tcp_socket, SHUT_RDWR);
1275 + }
1276 +
1277 +@@ -821,8 +831,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
1278 + static void vhci_device_reset(struct usbip_device *ud)
1279 + {
1280 + struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
1281 ++ unsigned long flags;
1282 +
1283 +- spin_lock(&ud->lock);
1284 ++ spin_lock_irqsave(&ud->lock, flags);
1285 +
1286 + vdev->speed = 0;
1287 + vdev->devid = 0;
1288 +@@ -836,14 +847,16 @@ static void vhci_device_reset(struct usbip_device *ud)
1289 + }
1290 + ud->status = VDEV_ST_NULL;
1291 +
1292 +- spin_unlock(&ud->lock);
1293 ++ spin_unlock_irqrestore(&ud->lock, flags);
1294 + }
1295 +
1296 + static void vhci_device_unusable(struct usbip_device *ud)
1297 + {
1298 +- spin_lock(&ud->lock);
1299 ++ unsigned long flags;
1300 ++
1301 ++ spin_lock_irqsave(&ud->lock, flags);
1302 + ud->status = VDEV_ST_ERROR;
1303 +- spin_unlock(&ud->lock);
1304 ++ spin_unlock_irqrestore(&ud->lock, flags);
1305 + }
1306 +
1307 + static void vhci_device_init(struct vhci_device *vdev)
1308 +@@ -933,12 +946,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
1309 + static int vhci_bus_suspend(struct usb_hcd *hcd)
1310 + {
1311 + struct vhci_hcd *vhci = hcd_to_vhci(hcd);
1312 ++ unsigned long flags;
1313 +
1314 + dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
1315 +
1316 +- spin_lock(&vhci->lock);
1317 ++ spin_lock_irqsave(&vhci->lock, flags);
1318 + hcd->state = HC_STATE_SUSPENDED;
1319 +- spin_unlock(&vhci->lock);
1320 ++ spin_unlock_irqrestore(&vhci->lock, flags);
1321 +
1322 + return 0;
1323 + }
1324 +@@ -947,15 +961,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
1325 + {
1326 + struct vhci_hcd *vhci = hcd_to_vhci(hcd);
1327 + int rc = 0;
1328 ++ unsigned long flags;
1329 +
1330 + dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
1331 +
1332 +- spin_lock(&vhci->lock);
1333 ++ spin_lock_irqsave(&vhci->lock, flags);
1334 + if (!HCD_HW_ACCESSIBLE(hcd))
1335 + rc = -ESHUTDOWN;
1336 + else
1337 + hcd->state = HC_STATE_RUNNING;
1338 +- spin_unlock(&vhci->lock);
1339 ++ spin_unlock_irqrestore(&vhci->lock, flags);
1340 +
1341 + return rc;
1342 + }
1343 +@@ -1053,17 +1068,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
1344 + int rhport = 0;
1345 + int connected = 0;
1346 + int ret = 0;
1347 ++ unsigned long flags;
1348 +
1349 + hcd = platform_get_drvdata(pdev);
1350 +
1351 +- spin_lock(&the_controller->lock);
1352 ++ spin_lock_irqsave(&the_controller->lock, flags);
1353 +
1354 + for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
1355 + if (the_controller->port_status[rhport] &
1356 + USB_PORT_STAT_CONNECTION)
1357 + connected += 1;
1358 +
1359 +- spin_unlock(&the_controller->lock);
1360 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1361 +
1362 + if (connected > 0) {
1363 + dev_info(&pdev->dev,
1364 +diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
1365 +index bc4eb0855314..323aa7789989 100644
1366 +--- a/drivers/usb/usbip/vhci_rx.c
1367 ++++ b/drivers/usb/usbip/vhci_rx.c
1368 +@@ -71,10 +71,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1369 + {
1370 + struct usbip_device *ud = &vdev->ud;
1371 + struct urb *urb;
1372 ++ unsigned long flags;
1373 +
1374 +- spin_lock(&vdev->priv_lock);
1375 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1376 + urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
1377 +- spin_unlock(&vdev->priv_lock);
1378 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1379 +
1380 + if (!urb) {
1381 + pr_err("cannot find a urb of seqnum %u max seqnum %d\n",
1382 +@@ -103,9 +104,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
1383 +
1384 + usbip_dbg_vhci_rx("now giveback urb %u\n", pdu->base.seqnum);
1385 +
1386 +- spin_lock(&the_controller->lock);
1387 ++ spin_lock_irqsave(&the_controller->lock, flags);
1388 + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
1389 +- spin_unlock(&the_controller->lock);
1390 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1391 +
1392 + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
1393 +
1394 +@@ -116,8 +117,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
1395 + struct usbip_header *pdu)
1396 + {
1397 + struct vhci_unlink *unlink, *tmp;
1398 ++ unsigned long flags;
1399 +
1400 +- spin_lock(&vdev->priv_lock);
1401 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1402 +
1403 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
1404 + pr_info("unlink->seqnum %lu\n", unlink->seqnum);
1405 +@@ -126,12 +128,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
1406 + unlink->seqnum);
1407 + list_del(&unlink->list);
1408 +
1409 +- spin_unlock(&vdev->priv_lock);
1410 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1411 + return unlink;
1412 + }
1413 + }
1414 +
1415 +- spin_unlock(&vdev->priv_lock);
1416 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1417 +
1418 + return NULL;
1419 + }
1420 +@@ -141,6 +143,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1421 + {
1422 + struct vhci_unlink *unlink;
1423 + struct urb *urb;
1424 ++ unsigned long flags;
1425 +
1426 + usbip_dump_header(pdu);
1427 +
1428 +@@ -151,9 +154,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1429 + return;
1430 + }
1431 +
1432 +- spin_lock(&vdev->priv_lock);
1433 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1434 + urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
1435 +- spin_unlock(&vdev->priv_lock);
1436 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1437 +
1438 + if (!urb) {
1439 + /*
1440 +@@ -170,9 +173,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1441 + urb->status = pdu->u.ret_unlink.status;
1442 + pr_info("urb->status %d\n", urb->status);
1443 +
1444 +- spin_lock(&the_controller->lock);
1445 ++ spin_lock_irqsave(&the_controller->lock, flags);
1446 + usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
1447 +- spin_unlock(&the_controller->lock);
1448 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1449 +
1450 + usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
1451 + urb->status);
1452 +@@ -184,10 +187,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
1453 + static int vhci_priv_tx_empty(struct vhci_device *vdev)
1454 + {
1455 + int empty = 0;
1456 ++ unsigned long flags;
1457 +
1458 +- spin_lock(&vdev->priv_lock);
1459 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1460 + empty = list_empty(&vdev->priv_rx);
1461 +- spin_unlock(&vdev->priv_lock);
1462 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1463 +
1464 + return empty;
1465 + }
1466 +diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
1467 +index 211f43f67ea2..1c7f41a65565 100644
1468 +--- a/drivers/usb/usbip/vhci_sysfs.c
1469 ++++ b/drivers/usb/usbip/vhci_sysfs.c
1470 +@@ -32,23 +32,28 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
1471 + {
1472 + char *s = out;
1473 + int i = 0;
1474 ++ unsigned long flags;
1475 +
1476 + BUG_ON(!the_controller || !out);
1477 +
1478 +- spin_lock(&the_controller->lock);
1479 ++ spin_lock_irqsave(&the_controller->lock, flags);
1480 +
1481 + /*
1482 + * output example:
1483 +- * prt sta spd dev socket local_busid
1484 +- * 000 004 000 000 c5a7bb80 1-2.3
1485 +- * 001 004 000 000 d8cee980 2-3.4
1486 ++ * port sta spd dev sockfd local_busid
1487 ++ * 0000 004 000 00000000 000003 1-2.3
1488 ++ * 0001 004 000 00000000 000004 2-3.4
1489 + *
1490 +- * IP address can be retrieved from a socket pointer address by looking
1491 +- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
1492 +- * port number and its peer IP address.
1493 ++ * Output includes socket fd instead of socket pointer address to
1494 ++ * avoid leaking kernel memory address in:
1495 ++ * /sys/devices/platform/vhci_hcd.0/status and in debug output.
1496 ++ * The socket pointer address is not used at the moment and it was
1497 ++ * made visible as a convenient way to find IP address from socket
1498 ++ * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
1499 ++ * a security hole, the change is made to use sockfd instead.
1500 + */
1501 + out += sprintf(out,
1502 +- "prt sta spd bus dev socket local_busid\n");
1503 ++ "prt sta spd bus dev sockfd local_busid\n");
1504 +
1505 + for (i = 0; i < VHCI_NPORTS; i++) {
1506 + struct vhci_device *vdev = port_to_vdev(i);
1507 +@@ -60,17 +65,17 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
1508 + out += sprintf(out, "%03u %08x ",
1509 + vdev->speed, vdev->devid);
1510 + out += sprintf(out, "%16p ", vdev->ud.tcp_socket);
1511 ++ out += sprintf(out, "%06u", vdev->ud.sockfd);
1512 + out += sprintf(out, "%s", dev_name(&vdev->udev->dev));
1513 +
1514 +- } else {
1515 +- out += sprintf(out, "000 000 000 0000000000000000 0-0");
1516 +- }
1517 ++ } else
1518 ++ out += sprintf(out, "000 000 000 000000 0-0");
1519 +
1520 + out += sprintf(out, "\n");
1521 + spin_unlock(&vdev->ud.lock);
1522 + }
1523 +
1524 +- spin_unlock(&the_controller->lock);
1525 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1526 +
1527 + return out - s;
1528 + }
1529 +@@ -80,11 +85,12 @@ static DEVICE_ATTR_RO(status);
1530 + static int vhci_port_disconnect(__u32 rhport)
1531 + {
1532 + struct vhci_device *vdev;
1533 ++ unsigned long flags;
1534 +
1535 + usbip_dbg_vhci_sysfs("enter\n");
1536 +
1537 + /* lock */
1538 +- spin_lock(&the_controller->lock);
1539 ++ spin_lock_irqsave(&the_controller->lock, flags);
1540 +
1541 + vdev = port_to_vdev(rhport);
1542 +
1543 +@@ -94,14 +100,14 @@ static int vhci_port_disconnect(__u32 rhport)
1544 +
1545 + /* unlock */
1546 + spin_unlock(&vdev->ud.lock);
1547 +- spin_unlock(&the_controller->lock);
1548 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1549 +
1550 + return -EINVAL;
1551 + }
1552 +
1553 + /* unlock */
1554 + spin_unlock(&vdev->ud.lock);
1555 +- spin_unlock(&the_controller->lock);
1556 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1557 +
1558 + usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
1559 +
1560 +@@ -177,6 +183,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
1561 + int sockfd = 0;
1562 + __u32 rhport = 0, devid = 0, speed = 0;
1563 + int err;
1564 ++ unsigned long flags;
1565 +
1566 + /*
1567 + * @rhport: port number of vhci_hcd
1568 +@@ -202,14 +209,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
1569 + /* now need lock until setting vdev status as used */
1570 +
1571 + /* begin a lock */
1572 +- spin_lock(&the_controller->lock);
1573 ++ spin_lock_irqsave(&the_controller->lock, flags);
1574 + vdev = port_to_vdev(rhport);
1575 + spin_lock(&vdev->ud.lock);
1576 +
1577 + if (vdev->ud.status != VDEV_ST_NULL) {
1578 + /* end of the lock */
1579 + spin_unlock(&vdev->ud.lock);
1580 +- spin_unlock(&the_controller->lock);
1581 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1582 +
1583 + sockfd_put(socket);
1584 +
1585 +@@ -223,11 +230,12 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
1586 +
1587 + vdev->devid = devid;
1588 + vdev->speed = speed;
1589 ++ vdev->ud.sockfd = sockfd;
1590 + vdev->ud.tcp_socket = socket;
1591 + vdev->ud.status = VDEV_ST_NOTASSIGNED;
1592 +
1593 + spin_unlock(&vdev->ud.lock);
1594 +- spin_unlock(&the_controller->lock);
1595 ++ spin_unlock_irqrestore(&the_controller->lock, flags);
1596 + /* end the lock */
1597 +
1598 + vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
1599 +diff --git a/drivers/usb/usbip/vhci_tx.c b/drivers/usb/usbip/vhci_tx.c
1600 +index 3c5796c8633a..a9a663a578b6 100644
1601 +--- a/drivers/usb/usbip/vhci_tx.c
1602 ++++ b/drivers/usb/usbip/vhci_tx.c
1603 +@@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup, struct urb *urb)
1604 + static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
1605 + {
1606 + struct vhci_priv *priv, *tmp;
1607 ++ unsigned long flags;
1608 +
1609 +- spin_lock(&vdev->priv_lock);
1610 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1611 +
1612 + list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
1613 + list_move_tail(&priv->list, &vdev->priv_rx);
1614 +- spin_unlock(&vdev->priv_lock);
1615 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1616 + return priv;
1617 + }
1618 +
1619 +- spin_unlock(&vdev->priv_lock);
1620 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1621 +
1622 + return NULL;
1623 + }
1624 +@@ -137,16 +138,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
1625 + static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
1626 + {
1627 + struct vhci_unlink *unlink, *tmp;
1628 ++ unsigned long flags;
1629 +
1630 +- spin_lock(&vdev->priv_lock);
1631 ++ spin_lock_irqsave(&vdev->priv_lock, flags);
1632 +
1633 + list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
1634 + list_move_tail(&unlink->list, &vdev->unlink_rx);
1635 +- spin_unlock(&vdev->priv_lock);
1636 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1637 + return unlink;
1638 + }
1639 +
1640 +- spin_unlock(&vdev->priv_lock);
1641 ++ spin_unlock_irqrestore(&vdev->priv_lock, flags);
1642 +
1643 + return NULL;
1644 + }
1645 +diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c
1646 +index d6aeb84e90b6..d882d873c5a3 100644
1647 +--- a/fs/ext2/acl.c
1648 ++++ b/fs/ext2/acl.c
1649 +@@ -178,11 +178,8 @@ ext2_get_acl(struct inode *inode, int type)
1650 + return acl;
1651 + }
1652 +
1653 +-/*
1654 +- * inode->i_mutex: down
1655 +- */
1656 +-int
1657 +-ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1658 ++static int
1659 ++__ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1660 + {
1661 + int name_index;
1662 + void *value = NULL;
1663 +@@ -192,13 +189,6 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1664 + switch(type) {
1665 + case ACL_TYPE_ACCESS:
1666 + name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS;
1667 +- if (acl) {
1668 +- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1669 +- if (error)
1670 +- return error;
1671 +- inode->i_ctime = CURRENT_TIME_SEC;
1672 +- mark_inode_dirty(inode);
1673 +- }
1674 + break;
1675 +
1676 + case ACL_TYPE_DEFAULT:
1677 +@@ -224,6 +214,24 @@ ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1678 + return error;
1679 + }
1680 +
1681 ++/*
1682 ++ * inode->i_mutex: down
1683 ++ */
1684 ++int
1685 ++ext2_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1686 ++{
1687 ++ int error;
1688 ++
1689 ++ if (type == ACL_TYPE_ACCESS && acl) {
1690 ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1691 ++ if (error)
1692 ++ return error;
1693 ++ inode->i_ctime = CURRENT_TIME_SEC;
1694 ++ mark_inode_dirty(inode);
1695 ++ }
1696 ++ return __ext2_set_acl(inode, acl, type);
1697 ++}
1698 ++
1699 + /*
1700 + * Initialize the ACLs of a new inode. Called from ext2_new_inode.
1701 + *
1702 +@@ -241,12 +249,12 @@ ext2_init_acl(struct inode *inode, struct inode *dir)
1703 + return error;
1704 +
1705 + if (default_acl) {
1706 +- error = ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
1707 ++ error = __ext2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
1708 + posix_acl_release(default_acl);
1709 + }
1710 + if (acl) {
1711 + if (!error)
1712 +- error = ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
1713 ++ error = __ext2_set_acl(inode, acl, ACL_TYPE_ACCESS);
1714 + posix_acl_release(acl);
1715 + }
1716 + return error;
1717 +diff --git a/fs/fcntl.c b/fs/fcntl.c
1718 +index 62376451bbce..5df914943d96 100644
1719 +--- a/fs/fcntl.c
1720 ++++ b/fs/fcntl.c
1721 +@@ -113,6 +113,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
1722 + int who = arg;
1723 + type = PIDTYPE_PID;
1724 + if (who < 0) {
1725 ++ /* avoid overflow below */
1726 ++ if (who == INT_MIN)
1727 ++ return;
1728 ++
1729 + type = PIDTYPE_PGID;
1730 + who = -who;
1731 + }
1732 +diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
1733 +index a260060042ad..67eb154af881 100644
1734 +--- a/fs/nfsd/auth.c
1735 ++++ b/fs/nfsd/auth.c
1736 +@@ -60,9 +60,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
1737 + else
1738 + GROUP_AT(gi, i) = GROUP_AT(rqgi, i);
1739 +
1740 +- /* Each thread allocates its own gi, no race */
1741 +- groups_sort(gi);
1742 + }
1743 ++
1744 ++ /* Each thread allocates its own gi, no race */
1745 ++ groups_sort(gi);
1746 + } else {
1747 + gi = get_group_info(rqgi);
1748 + }
1749 +diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
1750 +index dc198bc64c61..edc8ef78b63f 100644
1751 +--- a/fs/reiserfs/bitmap.c
1752 ++++ b/fs/reiserfs/bitmap.c
1753 +@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
1754 + "inode has negative prealloc blocks count.");
1755 + #endif
1756 + while (ei->i_prealloc_count > 0) {
1757 +- reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
1758 +- ei->i_prealloc_block++;
1759 ++ b_blocknr_t block_to_free;
1760 ++
1761 ++ /*
1762 ++ * reiserfs_free_prealloc_block can drop the write lock,
1763 ++ * which could allow another caller to free the same block.
1764 ++ * We can protect against it by modifying the prealloc
1765 ++ * state before calling it.
1766 ++ */
1767 ++ block_to_free = ei->i_prealloc_block++;
1768 + ei->i_prealloc_count--;
1769 ++ reiserfs_free_prealloc_block(th, inode, block_to_free);
1770 + dirty = 1;
1771 + }
1772 + if (dirty)
1773 +@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
1774 + hint->prealloc_size = 0;
1775 +
1776 + if (!hint->formatted_node && hint->preallocate) {
1777 +- if (S_ISREG(hint->inode->i_mode)
1778 ++ if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
1779 + && hint->inode->i_size >=
1780 + REISERFS_SB(hint->th->t_super)->s_alloc_options.
1781 + preallocmin * hint->inode->i_sb->s_blocksize)
1782 +diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
1783 +index 9b1824f35501..91b036902a17 100644
1784 +--- a/fs/reiserfs/xattr_acl.c
1785 ++++ b/fs/reiserfs/xattr_acl.c
1786 +@@ -37,7 +37,14 @@ reiserfs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
1787 + error = journal_begin(&th, inode->i_sb, jcreate_blocks);
1788 + reiserfs_write_unlock(inode->i_sb);
1789 + if (error == 0) {
1790 ++ if (type == ACL_TYPE_ACCESS && acl) {
1791 ++ error = posix_acl_update_mode(inode, &inode->i_mode,
1792 ++ &acl);
1793 ++ if (error)
1794 ++ goto unlock;
1795 ++ }
1796 + error = __reiserfs_set_acl(&th, inode, type, acl);
1797 ++unlock:
1798 + reiserfs_write_lock(inode->i_sb);
1799 + error2 = journal_end(&th);
1800 + reiserfs_write_unlock(inode->i_sb);
1801 +@@ -245,11 +252,6 @@ __reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
1802 + switch (type) {
1803 + case ACL_TYPE_ACCESS:
1804 + name = POSIX_ACL_XATTR_ACCESS;
1805 +- if (acl) {
1806 +- error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
1807 +- if (error)
1808 +- return error;
1809 +- }
1810 + break;
1811 + case ACL_TYPE_DEFAULT:
1812 + name = POSIX_ACL_XATTR_DEFAULT;
1813 +diff --git a/fs/select.c b/fs/select.c
1814 +index 015547330e88..f4dd55fc638c 100644
1815 +--- a/fs/select.c
1816 ++++ b/fs/select.c
1817 +@@ -29,6 +29,7 @@
1818 + #include <linux/sched/rt.h>
1819 + #include <linux/freezer.h>
1820 + #include <net/busy_poll.h>
1821 ++#include <linux/vmalloc.h>
1822 +
1823 + #include <asm/uaccess.h>
1824 +
1825 +@@ -550,7 +551,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
1826 + fd_set_bits fds;
1827 + void *bits;
1828 + int ret, max_fds;
1829 +- unsigned int size;
1830 ++ size_t size, alloc_size;
1831 + struct fdtable *fdt;
1832 + /* Allocate small arguments on the stack to save memory and be faster */
1833 + long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
1834 +@@ -577,7 +578,14 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
1835 + if (size > sizeof(stack_fds) / 6) {
1836 + /* Not enough space in on-stack array; must use kmalloc */
1837 + ret = -ENOMEM;
1838 +- bits = kmalloc(6 * size, GFP_KERNEL);
1839 ++ if (size > (SIZE_MAX / 6))
1840 ++ goto out_nofds;
1841 ++
1842 ++ alloc_size = 6 * size;
1843 ++ bits = kmalloc(alloc_size, GFP_KERNEL|__GFP_NOWARN);
1844 ++ if (!bits && alloc_size > PAGE_SIZE)
1845 ++ bits = vmalloc(alloc_size);
1846 ++
1847 + if (!bits)
1848 + goto out_nofds;
1849 + }
1850 +@@ -614,7 +622,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
1851 +
1852 + out:
1853 + if (bits != stack_fds)
1854 +- kfree(bits);
1855 ++ kvfree(bits);
1856 + out_nofds:
1857 + return ret;
1858 + }
1859 +diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
1860 +index 2189935075b4..a951fd10aaaa 100644
1861 +--- a/include/linux/cacheinfo.h
1862 ++++ b/include/linux/cacheinfo.h
1863 +@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
1864 + struct cacheinfo *info_list;
1865 + unsigned int num_levels;
1866 + unsigned int num_leaves;
1867 ++ bool cpu_map_populated;
1868 + };
1869 +
1870 + /*
1871 +diff --git a/include/linux/ktime.h b/include/linux/ktime.h
1872 +index 2b6a204bd8d4..3ffc69ebe967 100644
1873 +--- a/include/linux/ktime.h
1874 ++++ b/include/linux/ktime.h
1875 +@@ -63,6 +63,13 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
1876 + #define ktime_add(lhs, rhs) \
1877 + ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
1878 +
1879 ++/*
1880 ++ * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
1881 ++ * this means that you must check the result for overflow yourself.
1882 ++ */
1883 ++#define ktime_add_unsafe(lhs, rhs) \
1884 ++ ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
1885 ++
1886 + /*
1887 + * Add a ktime_t variable and a scalar nanosecond value.
1888 + * res = kt + nsval:
1889 +diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
1890 +index 04078e8a4803..d6c53fce006b 100644
1891 +--- a/include/linux/netfilter/x_tables.h
1892 ++++ b/include/linux/netfilter/x_tables.h
1893 +@@ -243,6 +243,10 @@ int xt_check_entry_offsets(const void *base, const char *elems,
1894 + unsigned int target_offset,
1895 + unsigned int next_offset);
1896 +
1897 ++unsigned int *xt_alloc_entry_offsets(unsigned int size);
1898 ++bool xt_find_jump_offset(const unsigned int *offsets,
1899 ++ unsigned int target, unsigned int size);
1900 ++
1901 + int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
1902 + bool inv_proto);
1903 + int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
1904 +@@ -377,16 +381,16 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
1905 + * allows us to return 0 for single core systems without forcing
1906 + * callers to deal with SMP vs. NONSMP issues.
1907 + */
1908 +-static inline u64 xt_percpu_counter_alloc(void)
1909 ++static inline unsigned long xt_percpu_counter_alloc(void)
1910 + {
1911 + if (nr_cpu_ids > 1) {
1912 + void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
1913 + sizeof(struct xt_counters));
1914 +
1915 + if (res == NULL)
1916 +- return (u64) -ENOMEM;
1917 ++ return -ENOMEM;
1918 +
1919 +- return (u64) (__force unsigned long) res;
1920 ++ return (__force unsigned long) res;
1921 + }
1922 +
1923 + return 0;
1924 +diff --git a/include/linux/sched.h b/include/linux/sched.h
1925 +index e887c8d6f395..90bea398e5e0 100644
1926 +--- a/include/linux/sched.h
1927 ++++ b/include/linux/sched.h
1928 +@@ -1313,6 +1313,7 @@ struct sched_dl_entity {
1929 + u64 dl_deadline; /* relative deadline of each instance */
1930 + u64 dl_period; /* separation of two instances (period) */
1931 + u64 dl_bw; /* dl_runtime / dl_deadline */
1932 ++ u64 dl_density; /* dl_runtime / dl_deadline */
1933 +
1934 + /*
1935 + * Actual scheduling parameters. Initialized with the values above,
1936 +diff --git a/include/linux/tcp.h b/include/linux/tcp.h
1937 +index 318c24612458..2260f92f1492 100644
1938 +--- a/include/linux/tcp.h
1939 ++++ b/include/linux/tcp.h
1940 +@@ -29,9 +29,14 @@ static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb)
1941 + return (struct tcphdr *)skb_transport_header(skb);
1942 + }
1943 +
1944 ++static inline unsigned int __tcp_hdrlen(const struct tcphdr *th)
1945 ++{
1946 ++ return th->doff * 4;
1947 ++}
1948 ++
1949 + static inline unsigned int tcp_hdrlen(const struct sk_buff *skb)
1950 + {
1951 +- return tcp_hdr(skb)->doff * 4;
1952 ++ return __tcp_hdrlen(tcp_hdr(skb));
1953 + }
1954 +
1955 + static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb)
1956 +diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
1957 +index a3d04934aa96..6f8fbcf10dfb 100644
1958 +--- a/include/linux/vermagic.h
1959 ++++ b/include/linux/vermagic.h
1960 +@@ -24,16 +24,10 @@
1961 + #ifndef MODULE_ARCH_VERMAGIC
1962 + #define MODULE_ARCH_VERMAGIC ""
1963 + #endif
1964 +-#ifdef RETPOLINE
1965 +-#define MODULE_VERMAGIC_RETPOLINE "retpoline "
1966 +-#else
1967 +-#define MODULE_VERMAGIC_RETPOLINE ""
1968 +-#endif
1969 +
1970 + #define VERMAGIC_STRING \
1971 + UTS_RELEASE " " \
1972 + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
1973 + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
1974 +- MODULE_ARCH_VERMAGIC \
1975 +- MODULE_VERMAGIC_RETPOLINE
1976 ++ MODULE_ARCH_VERMAGIC
1977 +
1978 +diff --git a/include/net/arp.h b/include/net/arp.h
1979 +index 5e0f891d476c..1b3f86981757 100644
1980 +--- a/include/net/arp.h
1981 ++++ b/include/net/arp.h
1982 +@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
1983 +
1984 + static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
1985 + {
1986 ++ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1987 ++ key = INADDR_ANY;
1988 ++
1989 + return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
1990 + }
1991 +
1992 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
1993 +index 7a8066b90289..84f0d0602433 100644
1994 +--- a/include/net/ipv6.h
1995 ++++ b/include/net/ipv6.h
1996 +@@ -281,6 +281,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
1997 + int flags);
1998 + int ip6_flowlabel_init(void);
1999 + void ip6_flowlabel_cleanup(void);
2000 ++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
2001 +
2002 + static inline void fl6_sock_release(struct ip6_flowlabel *fl)
2003 + {
2004 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
2005 +index 2dcea635ecce..93328c61934a 100644
2006 +--- a/include/net/net_namespace.h
2007 ++++ b/include/net/net_namespace.h
2008 +@@ -209,6 +209,11 @@ int net_eq(const struct net *net1, const struct net *net2)
2009 + return net1 == net2;
2010 + }
2011 +
2012 ++static inline int check_net(const struct net *net)
2013 ++{
2014 ++ return atomic_read(&net->count) != 0;
2015 ++}
2016 ++
2017 + void net_drop_ns(void *);
2018 +
2019 + #else
2020 +@@ -233,6 +238,11 @@ int net_eq(const struct net *net1, const struct net *net2)
2021 + return 1;
2022 + }
2023 +
2024 ++static inline int check_net(const struct net *net)
2025 ++{
2026 ++ return 1;
2027 ++}
2028 ++
2029 + #define net_drop_ns NULL
2030 + #endif
2031 +
2032 +diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
2033 +index bc81fb2e1f0e..6f04cb419115 100644
2034 +--- a/include/uapi/linux/eventpoll.h
2035 ++++ b/include/uapi/linux/eventpoll.h
2036 +@@ -26,6 +26,19 @@
2037 + #define EPOLL_CTL_DEL 2
2038 + #define EPOLL_CTL_MOD 3
2039 +
2040 ++/* Epoll event masks */
2041 ++#define EPOLLIN 0x00000001
2042 ++#define EPOLLPRI 0x00000002
2043 ++#define EPOLLOUT 0x00000004
2044 ++#define EPOLLERR 0x00000008
2045 ++#define EPOLLHUP 0x00000010
2046 ++#define EPOLLRDNORM 0x00000040
2047 ++#define EPOLLRDBAND 0x00000080
2048 ++#define EPOLLWRNORM 0x00000100
2049 ++#define EPOLLWRBAND 0x00000200
2050 ++#define EPOLLMSG 0x00000400
2051 ++#define EPOLLRDHUP 0x00002000
2052 ++
2053 + /*
2054 + * Request the handling of system wakeup events so as to prevent system suspends
2055 + * from happening while those events are being processed.
2056 +diff --git a/ipc/msg.c b/ipc/msg.c
2057 +index c6521c205cb4..f993f441f852 100644
2058 +--- a/ipc/msg.c
2059 ++++ b/ipc/msg.c
2060 +@@ -742,7 +742,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
2061 + if (*msgtyp == 0)
2062 + return SEARCH_ANY;
2063 + if (*msgtyp < 0) {
2064 +- *msgtyp = -*msgtyp;
2065 ++ if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
2066 ++ *msgtyp = LONG_MAX;
2067 ++ else
2068 ++ *msgtyp = -*msgtyp;
2069 + return SEARCH_LESSEQUAL;
2070 + }
2071 + if (msgflg & MSG_EXCEPT)
2072 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
2073 +index 9d6b3d869592..e6d1173a2046 100644
2074 +--- a/kernel/sched/core.c
2075 ++++ b/kernel/sched/core.c
2076 +@@ -2109,6 +2109,7 @@ void __dl_clear_params(struct task_struct *p)
2077 + dl_se->dl_period = 0;
2078 + dl_se->flags = 0;
2079 + dl_se->dl_bw = 0;
2080 ++ dl_se->dl_density = 0;
2081 +
2082 + dl_se->dl_throttled = 0;
2083 + dl_se->dl_new = 1;
2084 +@@ -3647,6 +3648,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2085 + dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2086 + dl_se->flags = attr->sched_flags;
2087 + dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2088 ++ dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2089 +
2090 + /*
2091 + * Changing the parameters of a task is 'tricky' and we're not doing
2092 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
2093 +index 6be2afd9bfd6..e12b0a4df891 100644
2094 +--- a/kernel/sched/deadline.c
2095 ++++ b/kernel/sched/deadline.c
2096 +@@ -480,13 +480,84 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
2097 + }
2098 +
2099 + /*
2100 +- * When a -deadline entity is queued back on the runqueue, its runtime and
2101 +- * deadline might need updating.
2102 ++ * Revised wakeup rule [1]: For self-suspending tasks, rather then
2103 ++ * re-initializing task's runtime and deadline, the revised wakeup
2104 ++ * rule adjusts the task's runtime to avoid the task to overrun its
2105 ++ * density.
2106 + *
2107 +- * The policy here is that we update the deadline of the entity only if:
2108 +- * - the current deadline is in the past,
2109 +- * - using the remaining runtime with the current deadline would make
2110 +- * the entity exceed its bandwidth.
2111 ++ * Reasoning: a task may overrun the density if:
2112 ++ * runtime / (deadline - t) > dl_runtime / dl_deadline
2113 ++ *
2114 ++ * Therefore, runtime can be adjusted to:
2115 ++ * runtime = (dl_runtime / dl_deadline) * (deadline - t)
2116 ++ *
2117 ++ * In such way that runtime will be equal to the maximum density
2118 ++ * the task can use without breaking any rule.
2119 ++ *
2120 ++ * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
2121 ++ * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
2122 ++ */
2123 ++static void
2124 ++update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
2125 ++{
2126 ++ u64 laxity = dl_se->deadline - rq_clock(rq);
2127 ++
2128 ++ /*
2129 ++ * If the task has deadline < period, and the deadline is in the past,
2130 ++ * it should already be throttled before this check.
2131 ++ *
2132 ++ * See update_dl_entity() comments for further details.
2133 ++ */
2134 ++ WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
2135 ++
2136 ++ dl_se->runtime = (dl_se->dl_density * laxity) >> 20;
2137 ++}
2138 ++
2139 ++/*
2140 ++ * Regarding the deadline, a task with implicit deadline has a relative
2141 ++ * deadline == relative period. A task with constrained deadline has a
2142 ++ * relative deadline <= relative period.
2143 ++ *
2144 ++ * We support constrained deadline tasks. However, there are some restrictions
2145 ++ * applied only for tasks which do not have an implicit deadline. See
2146 ++ * update_dl_entity() to know more about such restrictions.
2147 ++ *
2148 ++ * The dl_is_implicit() returns true if the task has an implicit deadline.
2149 ++ */
2150 ++static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
2151 ++{
2152 ++ return dl_se->dl_deadline == dl_se->dl_period;
2153 ++}
2154 ++
2155 ++/*
2156 ++ * When a deadline entity is placed in the runqueue, its runtime and deadline
2157 ++ * might need to be updated. This is done by a CBS wake up rule. There are two
2158 ++ * different rules: 1) the original CBS; and 2) the Revisited CBS.
2159 ++ *
2160 ++ * When the task is starting a new period, the Original CBS is used. In this
2161 ++ * case, the runtime is replenished and a new absolute deadline is set.
2162 ++ *
2163 ++ * When a task is queued before the begin of the next period, using the
2164 ++ * remaining runtime and deadline could make the entity to overflow, see
2165 ++ * dl_entity_overflow() to find more about runtime overflow. When such case
2166 ++ * is detected, the runtime and deadline need to be updated.
2167 ++ *
2168 ++ * If the task has an implicit deadline, i.e., deadline == period, the Original
2169 ++ * CBS is applied. the runtime is replenished and a new absolute deadline is
2170 ++ * set, as in the previous cases.
2171 ++ *
2172 ++ * However, the Original CBS does not work properly for tasks with
2173 ++ * deadline < period, which are said to have a constrained deadline. By
2174 ++ * applying the Original CBS, a constrained deadline task would be able to run
2175 ++ * runtime/deadline in a period. With deadline < period, the task would
2176 ++ * overrun the runtime/period allowed bandwidth, breaking the admission test.
2177 ++ *
2178 ++ * In order to prevent this misbehave, the Revisited CBS is used for
2179 ++ * constrained deadline tasks when a runtime overflow is detected. In the
2180 ++ * Revisited CBS, rather than replenishing & setting a new absolute deadline,
2181 ++ * the remaining runtime of the task is reduced to avoid runtime overflow.
2182 ++ * Please refer to the comments update_dl_revised_wakeup() function to find
2183 ++ * more about the Revised CBS rule.
2184 + */
2185 + static void update_dl_entity(struct sched_dl_entity *dl_se,
2186 + struct sched_dl_entity *pi_se)
2187 +@@ -505,6 +576,14 @@ static void update_dl_entity(struct sched_dl_entity *dl_se,
2188 +
2189 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2190 + dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
2191 ++
2192 ++ if (unlikely(!dl_is_implicit(dl_se) &&
2193 ++ !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
2194 ++ !dl_se->dl_boosted)){
2195 ++ update_dl_revised_wakeup(dl_se, rq);
2196 ++ return;
2197 ++ }
2198 ++
2199 + dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
2200 + dl_se->runtime = pi_se->dl_runtime;
2201 + }
2202 +@@ -991,11 +1070,6 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
2203 + __dequeue_dl_entity(dl_se);
2204 + }
2205 +
2206 +-static inline bool dl_is_constrained(struct sched_dl_entity *dl_se)
2207 +-{
2208 +- return dl_se->dl_deadline < dl_se->dl_period;
2209 +-}
2210 +-
2211 + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2212 + {
2213 + struct task_struct *pi_task = rt_mutex_get_top_task(p);
2214 +@@ -1027,7 +1101,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
2215 + * If that is the case, the task will be throttled and
2216 + * the replenishment timer will be set to the next period.
2217 + */
2218 +- if (!p->dl.dl_throttled && dl_is_constrained(&p->dl))
2219 ++ if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
2220 + dl_check_constrained_dl(&p->dl);
2221 +
2222 + /*
2223 +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
2224 +index 17f7bcff1e02..323282e63865 100644
2225 +--- a/kernel/time/hrtimer.c
2226 ++++ b/kernel/time/hrtimer.c
2227 +@@ -312,7 +312,7 @@ EXPORT_SYMBOL_GPL(__ktime_divns);
2228 + */
2229 + ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
2230 + {
2231 +- ktime_t res = ktime_add(lhs, rhs);
2232 ++ ktime_t res = ktime_add_unsafe(lhs, rhs);
2233 +
2234 + /*
2235 + * We use KTIME_SEC_MAX here, the maximum timeout which we can
2236 +@@ -669,7 +669,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
2237 + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
2238 + {
2239 + base->expires_next.tv64 = KTIME_MAX;
2240 ++ base->hang_detected = 0;
2241 + base->hres_active = 0;
2242 ++ base->next_timer = NULL;
2243 + }
2244 +
2245 + /*
2246 +@@ -1615,6 +1617,7 @@ static void init_hrtimers_cpu(int cpu)
2247 + timerqueue_init_head(&cpu_base->clock_base[i].active);
2248 + }
2249 +
2250 ++ cpu_base->active_bases = 0;
2251 + cpu_base->cpu = cpu;
2252 + hrtimer_init_hres(cpu_base);
2253 + }
2254 +diff --git a/kernel/time/timer.c b/kernel/time/timer.c
2255 +index 125407144c01..3d7588a2e97c 100644
2256 +--- a/kernel/time/timer.c
2257 ++++ b/kernel/time/timer.c
2258 +@@ -764,8 +764,15 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
2259 + __acquires(timer->base->lock)
2260 + {
2261 + for (;;) {
2262 +- u32 tf = timer->flags;
2263 + struct tvec_base *base;
2264 ++ u32 tf;
2265 ++
2266 ++ /*
2267 ++ * We need to use READ_ONCE() here, otherwise the compiler
2268 ++ * might re-read @tf between the check for TIMER_MIGRATING
2269 ++ * and spin_lock().
2270 ++ */
2271 ++ tf = READ_ONCE(timer->flags);
2272 +
2273 + if (!(tf & TIMER_MIGRATING)) {
2274 + base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
2275 +diff --git a/mm/cma.c b/mm/cma.c
2276 +index bd0e1412475e..43f4a122e969 100644
2277 +--- a/mm/cma.c
2278 ++++ b/mm/cma.c
2279 +@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
2280 + }
2281 +
2282 + static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
2283 +- int align_order)
2284 ++ unsigned int align_order)
2285 + {
2286 + if (align_order <= cma->order_per_bit)
2287 + return 0;
2288 +@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
2289 + }
2290 +
2291 + /*
2292 +- * Find a PFN aligned to the specified order and return an offset represented in
2293 +- * order_per_bits.
2294 ++ * Find the offset of the base PFN from the specified align_order.
2295 ++ * The value returned is represented in order_per_bits.
2296 + */
2297 + static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
2298 +- int align_order)
2299 ++ unsigned int align_order)
2300 + {
2301 +- if (align_order <= cma->order_per_bit)
2302 +- return 0;
2303 +-
2304 +- return (ALIGN(cma->base_pfn, (1UL << align_order))
2305 +- - cma->base_pfn) >> cma->order_per_bit;
2306 ++ return (cma->base_pfn & ((1UL << align_order) - 1))
2307 ++ >> cma->order_per_bit;
2308 + }
2309 +
2310 + static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
2311 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2312 +index e25b93a4267d..55a9facb8e8d 100644
2313 +--- a/mm/memcontrol.c
2314 ++++ b/mm/memcontrol.c
2315 +@@ -5576,7 +5576,7 @@ static void uncharge_list(struct list_head *page_list)
2316 + next = page->lru.next;
2317 +
2318 + VM_BUG_ON_PAGE(PageLRU(page), page);
2319 +- VM_BUG_ON_PAGE(page_count(page), page);
2320 ++ VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
2321 +
2322 + if (!page->mem_cgroup)
2323 + continue;
2324 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
2325 +index 091fe9b06663..92a647957f91 100644
2326 +--- a/mm/memory-failure.c
2327 ++++ b/mm/memory-failure.c
2328 +@@ -539,6 +539,13 @@ static int delete_from_lru_cache(struct page *p)
2329 + */
2330 + ClearPageActive(p);
2331 + ClearPageUnevictable(p);
2332 ++
2333 ++ /*
2334 ++ * Poisoned page might never drop its ref count to 0 so we have
2335 ++ * to uncharge it manually from its memcg.
2336 ++ */
2337 ++ mem_cgroup_uncharge(p);
2338 ++
2339 + /*
2340 + * drop the page count elevated by isolate_lru_page()
2341 + */
2342 +diff --git a/mm/mmap.c b/mm/mmap.c
2343 +index eaa460ddcaf9..cc84b97ca250 100644
2344 +--- a/mm/mmap.c
2345 ++++ b/mm/mmap.c
2346 +@@ -2188,7 +2188,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2347 + gap_addr = TASK_SIZE;
2348 +
2349 + next = vma->vm_next;
2350 +- if (next && next->vm_start < gap_addr) {
2351 ++ if (next && next->vm_start < gap_addr &&
2352 ++ (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2353 + if (!(next->vm_flags & VM_GROWSUP))
2354 + return -ENOMEM;
2355 + /* Check that both stack segments have the same anon_vma? */
2356 +@@ -2273,7 +2274,8 @@ int expand_downwards(struct vm_area_struct *vma,
2357 + if (gap_addr > address)
2358 + return -ENOMEM;
2359 + prev = vma->vm_prev;
2360 +- if (prev && prev->vm_end > gap_addr) {
2361 ++ if (prev && prev->vm_end > gap_addr &&
2362 ++ (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2363 + if (!(prev->vm_flags & VM_GROWSDOWN))
2364 + return -ENOMEM;
2365 + /* Check that both stack segments have the same anon_vma? */
2366 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2367 +index 3c70f03d91ec..a4c9cd80c7b6 100644
2368 +--- a/mm/page_alloc.c
2369 ++++ b/mm/page_alloc.c
2370 +@@ -2468,9 +2468,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2371 + if (!area->nr_free)
2372 + continue;
2373 +
2374 +- if (alloc_harder)
2375 +- return true;
2376 +-
2377 + for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
2378 + if (!list_empty(&area->free_list[mt]))
2379 + return true;
2380 +@@ -2482,6 +2479,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
2381 + return true;
2382 + }
2383 + #endif
2384 ++ if (alloc_harder &&
2385 ++ !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
2386 ++ return true;
2387 + }
2388 + return false;
2389 + }
2390 +diff --git a/net/can/af_can.c b/net/can/af_can.c
2391 +index 928f58064098..c866e761651a 100644
2392 +--- a/net/can/af_can.c
2393 ++++ b/net/can/af_can.c
2394 +@@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
2395 + if (unlikely(!net_eq(dev_net(dev), &init_net)))
2396 + goto drop;
2397 +
2398 +- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
2399 +- skb->len != CAN_MTU ||
2400 +- cfd->len > CAN_MAX_DLEN,
2401 +- "PF_CAN: dropped non conform CAN skbuf: "
2402 +- "dev type %d, len %d, datalen %d\n",
2403 +- dev->type, skb->len, cfd->len))
2404 ++ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
2405 ++ cfd->len > CAN_MAX_DLEN)) {
2406 ++ pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
2407 ++ dev->type, skb->len, cfd->len);
2408 + goto drop;
2409 ++ }
2410 +
2411 + can_receive(skb, dev);
2412 + return NET_RX_SUCCESS;
2413 +@@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
2414 + if (unlikely(!net_eq(dev_net(dev), &init_net)))
2415 + goto drop;
2416 +
2417 +- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
2418 +- skb->len != CANFD_MTU ||
2419 +- cfd->len > CANFD_MAX_DLEN,
2420 +- "PF_CAN: dropped non conform CAN FD skbuf: "
2421 +- "dev type %d, len %d, datalen %d\n",
2422 +- dev->type, skb->len, cfd->len))
2423 ++ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
2424 ++ cfd->len > CANFD_MAX_DLEN)) {
2425 ++ pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
2426 ++ dev->type, skb->len, cfd->len);
2427 + goto drop;
2428 ++ }
2429 +
2430 + can_receive(skb, dev);
2431 + return NET_RX_SUCCESS;
2432 +diff --git a/net/core/dev.c b/net/core/dev.c
2433 +index 3b67c1e5756f..cb58ba15d51e 100644
2434 +--- a/net/core/dev.c
2435 ++++ b/net/core/dev.c
2436 +@@ -2889,10 +2889,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
2437 + hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2438 +
2439 + /* + transport layer */
2440 +- if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2441 +- hdr_len += tcp_hdrlen(skb);
2442 +- else
2443 +- hdr_len += sizeof(struct udphdr);
2444 ++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
2445 ++ const struct tcphdr *th;
2446 ++ struct tcphdr _tcphdr;
2447 ++
2448 ++ th = skb_header_pointer(skb, skb_transport_offset(skb),
2449 ++ sizeof(_tcphdr), &_tcphdr);
2450 ++ if (likely(th))
2451 ++ hdr_len += __tcp_hdrlen(th);
2452 ++ } else {
2453 ++ struct udphdr _udphdr;
2454 ++
2455 ++ if (skb_header_pointer(skb, skb_transport_offset(skb),
2456 ++ sizeof(_udphdr), &_udphdr))
2457 ++ hdr_len += sizeof(struct udphdr);
2458 ++ }
2459 +
2460 + if (shinfo->gso_type & SKB_GSO_DODGY)
2461 + gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2462 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
2463 +index ee9082792530..4d14908afaec 100644
2464 +--- a/net/core/flow_dissector.c
2465 ++++ b/net/core/flow_dissector.c
2466 +@@ -492,8 +492,8 @@ ip_proto_again:
2467 + out_good:
2468 + ret = true;
2469 +
2470 +- key_control->thoff = (u16)nhoff;
2471 + out:
2472 ++ key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
2473 + key_basic->n_proto = proto;
2474 + key_basic->ip_proto = ip_proto;
2475 +
2476 +@@ -501,7 +501,6 @@ out:
2477 +
2478 + out_bad:
2479 + ret = false;
2480 +- key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
2481 + goto out;
2482 + }
2483 + EXPORT_SYMBOL(__skb_flow_dissect);
2484 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2485 +index ae92131c4f89..253c86b78ff0 100644
2486 +--- a/net/core/neighbour.c
2487 ++++ b/net/core/neighbour.c
2488 +@@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
2489 + if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
2490 + nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
2491 +
2492 +- hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
2493 ++ hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
2494 +
2495 + if (n->parms->dead) {
2496 + rc = ERR_PTR(-EINVAL);
2497 +@@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
2498 + n1 != NULL;
2499 + n1 = rcu_dereference_protected(n1->next,
2500 + lockdep_is_held(&tbl->lock))) {
2501 +- if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
2502 ++ if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
2503 + if (want_ref)
2504 + neigh_hold(n1);
2505 + rc = n1;
2506 +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
2507 +index 5e3a7302f774..7753681195c1 100644
2508 +--- a/net/dccp/ccids/ccid2.c
2509 ++++ b/net/dccp/ccids/ccid2.c
2510 +@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
2511 +
2512 + ccid2_pr_debug("RTO_EXPIRE\n");
2513 +
2514 ++ if (sk->sk_state == DCCP_CLOSED)
2515 ++ goto out;
2516 ++
2517 + /* back-off timer */
2518 + hc->tx_rto <<= 1;
2519 + if (hc->tx_rto > DCCP_RTO_MAX)
2520 +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
2521 +index 711b4dfa17c3..cb5eb649ad5f 100644
2522 +--- a/net/ipv4/arp.c
2523 ++++ b/net/ipv4/arp.c
2524 +@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
2525 +
2526 + static int arp_constructor(struct neighbour *neigh)
2527 + {
2528 +- __be32 addr = *(__be32 *)neigh->primary_key;
2529 ++ __be32 addr;
2530 + struct net_device *dev = neigh->dev;
2531 + struct in_device *in_dev;
2532 + struct neigh_parms *parms;
2533 ++ u32 inaddr_any = INADDR_ANY;
2534 +
2535 ++ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
2536 ++ memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
2537 ++
2538 ++ addr = *(__be32 *)neigh->primary_key;
2539 + rcu_read_lock();
2540 + in_dev = __in_dev_get_rcu(dev);
2541 + if (!in_dev) {
2542 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2543 +index b60106d34346..8212ed80da48 100644
2544 +--- a/net/ipv4/igmp.c
2545 ++++ b/net/ipv4/igmp.c
2546 +@@ -338,7 +338,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
2547 + return htonl(INADDR_ANY);
2548 +
2549 + for_ifa(in_dev) {
2550 +- if (inet_ifa_match(fl4->saddr, ifa))
2551 ++ if (fl4->saddr == ifa->ifa_local)
2552 + return fl4->saddr;
2553 + } endfor_ifa(in_dev);
2554 +
2555 +diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
2556 +index 6e3e0e8b1ce3..4cfcc22f7430 100644
2557 +--- a/net/ipv4/netfilter/arp_tables.c
2558 ++++ b/net/ipv4/netfilter/arp_tables.c
2559 +@@ -367,23 +367,12 @@ static inline bool unconditional(const struct arpt_entry *e)
2560 + memcmp(&e->arp, &uncond, sizeof(uncond)) == 0;
2561 + }
2562 +
2563 +-static bool find_jump_target(const struct xt_table_info *t,
2564 +- const struct arpt_entry *target)
2565 +-{
2566 +- struct arpt_entry *iter;
2567 +-
2568 +- xt_entry_foreach(iter, t->entries, t->size) {
2569 +- if (iter == target)
2570 +- return true;
2571 +- }
2572 +- return false;
2573 +-}
2574 +-
2575 + /* Figures out from what hook each rule can be called: returns 0 if
2576 + * there are loops. Puts hook bitmask in comefrom.
2577 + */
2578 + static int mark_source_chains(const struct xt_table_info *newinfo,
2579 +- unsigned int valid_hooks, void *entry0)
2580 ++ unsigned int valid_hooks, void *entry0,
2581 ++ unsigned int *offsets)
2582 + {
2583 + unsigned int hook;
2584 +
2585 +@@ -472,10 +461,11 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
2586 + /* This a jump; chase it. */
2587 + duprintf("Jump rule %u -> %u\n",
2588 + pos, newpos);
2589 ++ if (!xt_find_jump_offset(offsets, newpos,
2590 ++ newinfo->number))
2591 ++ return 0;
2592 + e = (struct arpt_entry *)
2593 + (entry0 + newpos);
2594 +- if (!find_jump_target(newinfo, e))
2595 +- return 0;
2596 + } else {
2597 + /* ... this is a fallthru */
2598 + newpos = pos + e->next_offset;
2599 +@@ -521,11 +511,13 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
2600 + {
2601 + struct xt_entry_target *t;
2602 + struct xt_target *target;
2603 ++ unsigned long pcnt;
2604 + int ret;
2605 +
2606 +- e->counters.pcnt = xt_percpu_counter_alloc();
2607 +- if (IS_ERR_VALUE(e->counters.pcnt))
2608 ++ pcnt = xt_percpu_counter_alloc();
2609 ++ if (IS_ERR_VALUE(pcnt))
2610 + return -ENOMEM;
2611 ++ e->counters.pcnt = pcnt;
2612 +
2613 + t = arpt_get_target(e);
2614 + target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
2615 +@@ -642,6 +634,7 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2616 + const struct arpt_replace *repl)
2617 + {
2618 + struct arpt_entry *iter;
2619 ++ unsigned int *offsets;
2620 + unsigned int i;
2621 + int ret = 0;
2622 +
2623 +@@ -655,6 +648,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2624 + }
2625 +
2626 + duprintf("translate_table: size %u\n", newinfo->size);
2627 ++ offsets = xt_alloc_entry_offsets(newinfo->number);
2628 ++ if (!offsets)
2629 ++ return -ENOMEM;
2630 + i = 0;
2631 +
2632 + /* Walk through entries, checking offsets. */
2633 +@@ -665,7 +661,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2634 + repl->underflow,
2635 + repl->valid_hooks);
2636 + if (ret != 0)
2637 +- break;
2638 ++ goto out_free;
2639 ++ if (i < repl->num_entries)
2640 ++ offsets[i] = (void *)iter - entry0;
2641 + ++i;
2642 + if (strcmp(arpt_get_target(iter)->u.user.name,
2643 + XT_ERROR_TARGET) == 0)
2644 +@@ -673,12 +671,13 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2645 + }
2646 + duprintf("translate_table: ARPT_ENTRY_ITERATE gives %d\n", ret);
2647 + if (ret != 0)
2648 +- return ret;
2649 ++ goto out_free;
2650 +
2651 ++ ret = -EINVAL;
2652 + if (i != repl->num_entries) {
2653 + duprintf("translate_table: %u not %u entries\n",
2654 + i, repl->num_entries);
2655 +- return -EINVAL;
2656 ++ goto out_free;
2657 + }
2658 +
2659 + /* Check hooks all assigned */
2660 +@@ -689,17 +688,20 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2661 + if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
2662 + duprintf("Invalid hook entry %u %u\n",
2663 + i, repl->hook_entry[i]);
2664 +- return -EINVAL;
2665 ++ goto out_free;
2666 + }
2667 + if (newinfo->underflow[i] == 0xFFFFFFFF) {
2668 + duprintf("Invalid underflow %u %u\n",
2669 + i, repl->underflow[i]);
2670 +- return -EINVAL;
2671 ++ goto out_free;
2672 + }
2673 + }
2674 +
2675 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
2676 +- return -ELOOP;
2677 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
2678 ++ ret = -ELOOP;
2679 ++ goto out_free;
2680 ++ }
2681 ++ kvfree(offsets);
2682 +
2683 + /* Finally, each sanity check must pass */
2684 + i = 0;
2685 +@@ -719,6 +721,9 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0,
2686 + return ret;
2687 + }
2688 +
2689 ++ return ret;
2690 ++ out_free:
2691 ++ kvfree(offsets);
2692 + return ret;
2693 + }
2694 +
2695 +@@ -1336,8 +1341,8 @@ static int translate_compat_table(struct xt_table_info **pinfo,
2696 +
2697 + newinfo->number = compatr->num_entries;
2698 + for (i = 0; i < NF_ARP_NUMHOOKS; i++) {
2699 +- newinfo->hook_entry[i] = info->hook_entry[i];
2700 +- newinfo->underflow[i] = info->underflow[i];
2701 ++ newinfo->hook_entry[i] = compatr->hook_entry[i];
2702 ++ newinfo->underflow[i] = compatr->underflow[i];
2703 + }
2704 + entry1 = newinfo->entries;
2705 + pos = entry1;
2706 +diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
2707 +index a399c5419622..a98173d1ea97 100644
2708 +--- a/net/ipv4/netfilter/ip_tables.c
2709 ++++ b/net/ipv4/netfilter/ip_tables.c
2710 +@@ -443,23 +443,12 @@ ipt_do_table(struct sk_buff *skb,
2711 + #endif
2712 + }
2713 +
2714 +-static bool find_jump_target(const struct xt_table_info *t,
2715 +- const struct ipt_entry *target)
2716 +-{
2717 +- struct ipt_entry *iter;
2718 +-
2719 +- xt_entry_foreach(iter, t->entries, t->size) {
2720 +- if (iter == target)
2721 +- return true;
2722 +- }
2723 +- return false;
2724 +-}
2725 +-
2726 + /* Figures out from what hook each rule can be called: returns 0 if
2727 + there are loops. Puts hook bitmask in comefrom. */
2728 + static int
2729 + mark_source_chains(const struct xt_table_info *newinfo,
2730 +- unsigned int valid_hooks, void *entry0)
2731 ++ unsigned int valid_hooks, void *entry0,
2732 ++ unsigned int *offsets)
2733 + {
2734 + unsigned int hook;
2735 +
2736 +@@ -552,10 +541,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
2737 + /* This a jump; chase it. */
2738 + duprintf("Jump rule %u -> %u\n",
2739 + pos, newpos);
2740 ++ if (!xt_find_jump_offset(offsets, newpos,
2741 ++ newinfo->number))
2742 ++ return 0;
2743 + e = (struct ipt_entry *)
2744 + (entry0 + newpos);
2745 +- if (!find_jump_target(newinfo, e))
2746 +- return 0;
2747 + } else {
2748 + /* ... this is a fallthru */
2749 + newpos = pos + e->next_offset;
2750 +@@ -663,10 +653,12 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
2751 + unsigned int j;
2752 + struct xt_mtchk_param mtpar;
2753 + struct xt_entry_match *ematch;
2754 ++ unsigned long pcnt;
2755 +
2756 +- e->counters.pcnt = xt_percpu_counter_alloc();
2757 +- if (IS_ERR_VALUE(e->counters.pcnt))
2758 ++ pcnt = xt_percpu_counter_alloc();
2759 ++ if (IS_ERR_VALUE(pcnt))
2760 + return -ENOMEM;
2761 ++ e->counters.pcnt = pcnt;
2762 +
2763 + j = 0;
2764 + mtpar.net = net;
2765 +@@ -811,6 +803,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
2766 + const struct ipt_replace *repl)
2767 + {
2768 + struct ipt_entry *iter;
2769 ++ unsigned int *offsets;
2770 + unsigned int i;
2771 + int ret = 0;
2772 +
2773 +@@ -824,6 +817,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
2774 + }
2775 +
2776 + duprintf("translate_table: size %u\n", newinfo->size);
2777 ++ offsets = xt_alloc_entry_offsets(newinfo->number);
2778 ++ if (!offsets)
2779 ++ return -ENOMEM;
2780 + i = 0;
2781 + /* Walk through entries, checking offsets. */
2782 + xt_entry_foreach(iter, entry0, newinfo->size) {
2783 +@@ -833,17 +829,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
2784 + repl->underflow,
2785 + repl->valid_hooks);
2786 + if (ret != 0)
2787 +- return ret;
2788 ++ goto out_free;
2789 ++ if (i < repl->num_entries)
2790 ++ offsets[i] = (void *)iter - entry0;
2791 + ++i;
2792 + if (strcmp(ipt_get_target(iter)->u.user.name,
2793 + XT_ERROR_TARGET) == 0)
2794 + ++newinfo->stacksize;
2795 + }
2796 +
2797 ++ ret = -EINVAL;
2798 + if (i != repl->num_entries) {
2799 + duprintf("translate_table: %u not %u entries\n",
2800 + i, repl->num_entries);
2801 +- return -EINVAL;
2802 ++ goto out_free;
2803 + }
2804 +
2805 + /* Check hooks all assigned */
2806 +@@ -854,17 +853,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
2807 + if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
2808 + duprintf("Invalid hook entry %u %u\n",
2809 + i, repl->hook_entry[i]);
2810 +- return -EINVAL;
2811 ++ goto out_free;
2812 + }
2813 + if (newinfo->underflow[i] == 0xFFFFFFFF) {
2814 + duprintf("Invalid underflow %u %u\n",
2815 + i, repl->underflow[i]);
2816 +- return -EINVAL;
2817 ++ goto out_free;
2818 + }
2819 + }
2820 +
2821 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
2822 +- return -ELOOP;
2823 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
2824 ++ ret = -ELOOP;
2825 ++ goto out_free;
2826 ++ }
2827 ++ kvfree(offsets);
2828 +
2829 + /* Finally, each sanity check must pass */
2830 + i = 0;
2831 +@@ -884,6 +886,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
2832 + return ret;
2833 + }
2834 +
2835 ++ return ret;
2836 ++ out_free:
2837 ++ kvfree(offsets);
2838 + return ret;
2839 + }
2840 +
2841 +diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
2842 +index c747b2d9eb77..d4acf38b60fd 100644
2843 +--- a/net/ipv4/netfilter/nf_reject_ipv4.c
2844 ++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
2845 +@@ -124,6 +124,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
2846 + /* ip_route_me_harder expects skb->dst to be set */
2847 + skb_dst_set_noref(nskb, skb_dst(oldskb));
2848 +
2849 ++ nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
2850 ++
2851 + skb_reserve(nskb, LL_MAX_HEADER);
2852 + niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
2853 + ip4_dst_hoplimit(skb_dst(nskb)));
2854 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
2855 +index 5597120c8ffd..37e8966a457b 100644
2856 +--- a/net/ipv4/tcp.c
2857 ++++ b/net/ipv4/tcp.c
2858 +@@ -2176,6 +2176,9 @@ adjudge_to_death:
2859 + tcp_send_active_reset(sk, GFP_ATOMIC);
2860 + NET_INC_STATS_BH(sock_net(sk),
2861 + LINUX_MIB_TCPABORTONMEMORY);
2862 ++ } else if (!check_net(sock_net(sk))) {
2863 ++ /* Not possible to send reset; just close */
2864 ++ tcp_set_state(sk, TCP_CLOSE);
2865 + }
2866 + }
2867 +
2868 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
2869 +index 1ec12a4f327e..35f638cfc675 100644
2870 +--- a/net/ipv4/tcp_timer.c
2871 ++++ b/net/ipv4/tcp_timer.c
2872 +@@ -46,11 +46,19 @@ static void tcp_write_err(struct sock *sk)
2873 + * to prevent DoS attacks. It is called when a retransmission timeout
2874 + * or zero probe timeout occurs on orphaned socket.
2875 + *
2876 ++ * Also close if our net namespace is exiting; in that case there is no
2877 ++ * hope of ever communicating again since all netns interfaces are already
2878 ++ * down (or about to be down), and we need to release our dst references,
2879 ++ * which have been moved to the netns loopback interface, so the namespace
2880 ++ * can finish exiting. This condition is only possible if we are a kernel
2881 ++ * socket, as those do not hold references to the namespace.
2882 ++ *
2883 + * Criteria is still not confirmed experimentally and may change.
2884 + * We kill the socket, if:
2885 + * 1. If number of orphaned sockets exceeds an administratively configured
2886 + * limit.
2887 + * 2. If we have strong memory pressure.
2888 ++ * 3. If our net namespace is exiting.
2889 + */
2890 + static int tcp_out_of_resources(struct sock *sk, bool do_reset)
2891 + {
2892 +@@ -79,6 +87,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
2893 + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
2894 + return 1;
2895 + }
2896 ++
2897 ++ if (!check_net(sock_net(sk))) {
2898 ++ /* Not possible to send reset; just close */
2899 ++ tcp_done(sk);
2900 ++ return 1;
2901 ++ }
2902 ++
2903 + return 0;
2904 + }
2905 +
2906 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2907 +index b809958f7388..3ef81c387923 100644
2908 +--- a/net/ipv6/ip6_output.c
2909 ++++ b/net/ipv6/ip6_output.c
2910 +@@ -148,7 +148,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2911 + !(IP6CB(skb)->flags & IP6SKB_REROUTED));
2912 + }
2913 +
2914 +-static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
2915 ++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
2916 + {
2917 + if (!np->autoflowlabel_set)
2918 + return ip6_default_np_autolabel(net);
2919 +@@ -1246,14 +1246,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
2920 + v6_cork->tclass = tclass;
2921 + if (rt->dst.flags & DST_XFRM_TUNNEL)
2922 + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
2923 +- rt->dst.dev->mtu : dst_mtu(&rt->dst);
2924 ++ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
2925 + else
2926 + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
2927 +- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
2928 ++ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
2929 + if (np->frag_size < mtu) {
2930 + if (np->frag_size)
2931 + mtu = np->frag_size;
2932 + }
2933 ++ if (mtu < IPV6_MIN_MTU)
2934 ++ return -EINVAL;
2935 + cork->base.fragsize = mtu;
2936 + if (dst_allfrag(rt->dst.path))
2937 + cork->base.flags |= IPCORK_ALLFRAG;
2938 +@@ -1783,6 +1785,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
2939 + cork.base.flags = 0;
2940 + cork.base.addr = 0;
2941 + cork.base.opt = NULL;
2942 ++ cork.base.dst = NULL;
2943 + v6_cork.opt = NULL;
2944 + err = ip6_setup_cork(sk, &cork, &v6_cork, hlimit, tclass, opt, rt, fl6);
2945 + if (err) {
2946 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
2947 +index 435e26210587..9011176c8387 100644
2948 +--- a/net/ipv6/ipv6_sockglue.c
2949 ++++ b/net/ipv6/ipv6_sockglue.c
2950 +@@ -1313,7 +1313,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
2951 + break;
2952 +
2953 + case IPV6_AUTOFLOWLABEL:
2954 +- val = np->autoflowlabel;
2955 ++ val = ip6_autoflowlabel(sock_net(sk), np);
2956 + break;
2957 +
2958 + default:
2959 +diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
2960 +index 22f39e00bef3..bb1b5453a7a1 100644
2961 +--- a/net/ipv6/netfilter/ip6_tables.c
2962 ++++ b/net/ipv6/netfilter/ip6_tables.c
2963 +@@ -455,23 +455,12 @@ ip6t_do_table(struct sk_buff *skb,
2964 + #endif
2965 + }
2966 +
2967 +-static bool find_jump_target(const struct xt_table_info *t,
2968 +- const struct ip6t_entry *target)
2969 +-{
2970 +- struct ip6t_entry *iter;
2971 +-
2972 +- xt_entry_foreach(iter, t->entries, t->size) {
2973 +- if (iter == target)
2974 +- return true;
2975 +- }
2976 +- return false;
2977 +-}
2978 +-
2979 + /* Figures out from what hook each rule can be called: returns 0 if
2980 + there are loops. Puts hook bitmask in comefrom. */
2981 + static int
2982 + mark_source_chains(const struct xt_table_info *newinfo,
2983 +- unsigned int valid_hooks, void *entry0)
2984 ++ unsigned int valid_hooks, void *entry0,
2985 ++ unsigned int *offsets)
2986 + {
2987 + unsigned int hook;
2988 +
2989 +@@ -564,10 +553,11 @@ mark_source_chains(const struct xt_table_info *newinfo,
2990 + /* This a jump; chase it. */
2991 + duprintf("Jump rule %u -> %u\n",
2992 + pos, newpos);
2993 ++ if (!xt_find_jump_offset(offsets, newpos,
2994 ++ newinfo->number))
2995 ++ return 0;
2996 + e = (struct ip6t_entry *)
2997 + (entry0 + newpos);
2998 +- if (!find_jump_target(newinfo, e))
2999 +- return 0;
3000 + } else {
3001 + /* ... this is a fallthru */
3002 + newpos = pos + e->next_offset;
3003 +@@ -676,10 +666,12 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
3004 + unsigned int j;
3005 + struct xt_mtchk_param mtpar;
3006 + struct xt_entry_match *ematch;
3007 ++ unsigned long pcnt;
3008 +
3009 +- e->counters.pcnt = xt_percpu_counter_alloc();
3010 +- if (IS_ERR_VALUE(e->counters.pcnt))
3011 ++ pcnt = xt_percpu_counter_alloc();
3012 ++ if (IS_ERR_VALUE(pcnt))
3013 + return -ENOMEM;
3014 ++ e->counters.pcnt = pcnt;
3015 +
3016 + j = 0;
3017 + mtpar.net = net;
3018 +@@ -823,6 +815,7 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3019 + const struct ip6t_replace *repl)
3020 + {
3021 + struct ip6t_entry *iter;
3022 ++ unsigned int *offsets;
3023 + unsigned int i;
3024 + int ret = 0;
3025 +
3026 +@@ -836,6 +829,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3027 + }
3028 +
3029 + duprintf("translate_table: size %u\n", newinfo->size);
3030 ++ offsets = xt_alloc_entry_offsets(newinfo->number);
3031 ++ if (!offsets)
3032 ++ return -ENOMEM;
3033 + i = 0;
3034 + /* Walk through entries, checking offsets. */
3035 + xt_entry_foreach(iter, entry0, newinfo->size) {
3036 +@@ -845,17 +841,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3037 + repl->underflow,
3038 + repl->valid_hooks);
3039 + if (ret != 0)
3040 +- return ret;
3041 ++ goto out_free;
3042 ++ if (i < repl->num_entries)
3043 ++ offsets[i] = (void *)iter - entry0;
3044 + ++i;
3045 + if (strcmp(ip6t_get_target(iter)->u.user.name,
3046 + XT_ERROR_TARGET) == 0)
3047 + ++newinfo->stacksize;
3048 + }
3049 +
3050 ++ ret = -EINVAL;
3051 + if (i != repl->num_entries) {
3052 + duprintf("translate_table: %u not %u entries\n",
3053 + i, repl->num_entries);
3054 +- return -EINVAL;
3055 ++ goto out_free;
3056 + }
3057 +
3058 + /* Check hooks all assigned */
3059 +@@ -866,17 +865,20 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3060 + if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
3061 + duprintf("Invalid hook entry %u %u\n",
3062 + i, repl->hook_entry[i]);
3063 +- return -EINVAL;
3064 ++ goto out_free;
3065 + }
3066 + if (newinfo->underflow[i] == 0xFFFFFFFF) {
3067 + duprintf("Invalid underflow %u %u\n",
3068 + i, repl->underflow[i]);
3069 +- return -EINVAL;
3070 ++ goto out_free;
3071 + }
3072 + }
3073 +
3074 +- if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
3075 +- return -ELOOP;
3076 ++ if (!mark_source_chains(newinfo, repl->valid_hooks, entry0, offsets)) {
3077 ++ ret = -ELOOP;
3078 ++ goto out_free;
3079 ++ }
3080 ++ kvfree(offsets);
3081 +
3082 + /* Finally, each sanity check must pass */
3083 + i = 0;
3084 +@@ -896,6 +898,9 @@ translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
3085 + return ret;
3086 + }
3087 +
3088 ++ return ret;
3089 ++ out_free:
3090 ++ kvfree(offsets);
3091 + return ret;
3092 + }
3093 +
3094 +diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c
3095 +index 6989c70ae29f..4a84b5ad9ecb 100644
3096 +--- a/net/ipv6/netfilter/nf_dup_ipv6.c
3097 ++++ b/net/ipv6/netfilter/nf_dup_ipv6.c
3098 +@@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb,
3099 + fl6.daddr = *gw;
3100 + fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) |
3101 + (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]);
3102 ++ fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH;
3103 + dst = ip6_route_output(net, NULL, &fl6);
3104 + if (dst->error) {
3105 + dst_release(dst);
3106 +diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
3107 +index e0f922b777e3..7117e5bef412 100644
3108 +--- a/net/ipv6/netfilter/nf_reject_ipv6.c
3109 ++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
3110 +@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
3111 + fl6.daddr = oip6h->saddr;
3112 + fl6.fl6_sport = otcph->dest;
3113 + fl6.fl6_dport = otcph->source;
3114 ++ fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
3115 + security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
3116 + dst = ip6_route_output(net, NULL, &fl6);
3117 + if (dst == NULL || dst->error) {
3118 +@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
3119 +
3120 + skb_dst_set(nskb, dst);
3121 +
3122 ++ nskb->mark = fl6.flowi6_mark;
3123 ++
3124 + skb_reserve(nskb, hh_len + dst->header_len);
3125 + ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
3126 + ip6_dst_hoplimit(dst));
3127 +diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
3128 +index 86a3c6f0c871..5f747089024f 100644
3129 +--- a/net/netfilter/nf_conntrack_core.c
3130 ++++ b/net/netfilter/nf_conntrack_core.c
3131 +@@ -719,6 +719,7 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
3132 + * least once for the stats anyway.
3133 + */
3134 + rcu_read_lock_bh();
3135 ++ begin:
3136 + hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash], hnnode) {
3137 + ct = nf_ct_tuplehash_to_ctrack(h);
3138 + if (ct != ignored_conntrack &&
3139 +@@ -730,6 +731,12 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
3140 + }
3141 + NF_CT_STAT_INC(net, searched);
3142 + }
3143 ++
3144 ++ if (get_nulls_value(n) != hash) {
3145 ++ NF_CT_STAT_INC(net, search_restart);
3146 ++ goto begin;
3147 ++ }
3148 ++
3149 + rcu_read_unlock_bh();
3150 +
3151 + return 0;
3152 +diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
3153 +index 7f16d19d6198..a91f8bd51d05 100644
3154 +--- a/net/netfilter/nf_conntrack_expect.c
3155 ++++ b/net/netfilter/nf_conntrack_expect.c
3156 +@@ -560,7 +560,7 @@ static int exp_seq_show(struct seq_file *s, void *v)
3157 + helper = rcu_dereference(nfct_help(expect->master)->helper);
3158 + if (helper) {
3159 + seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
3160 +- if (helper->expect_policy[expect->class].name)
3161 ++ if (helper->expect_policy[expect->class].name[0])
3162 + seq_printf(s, "/%s",
3163 + helper->expect_policy[expect->class].name);
3164 + }
3165 +diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
3166 +index 885b4aba3695..1665c2159e4b 100644
3167 +--- a/net/netfilter/nf_conntrack_sip.c
3168 ++++ b/net/netfilter/nf_conntrack_sip.c
3169 +@@ -1434,9 +1434,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff,
3170 + handler = &sip_handlers[i];
3171 + if (handler->request == NULL)
3172 + continue;
3173 +- if (*datalen < handler->len ||
3174 ++ if (*datalen < handler->len + 2 ||
3175 + strncasecmp(*dptr, handler->method, handler->len))
3176 + continue;
3177 ++ if ((*dptr)[handler->len] != ' ' ||
3178 ++ !isalpha((*dptr)[handler->len+1]))
3179 ++ continue;
3180 +
3181 + if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ,
3182 + &matchoff, &matchlen) <= 0) {
3183 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
3184 +index 8d34a488efc0..ac143ae4f7b6 100644
3185 +--- a/net/netfilter/nfnetlink_cthelper.c
3186 ++++ b/net/netfilter/nfnetlink_cthelper.c
3187 +@@ -17,6 +17,7 @@
3188 + #include <linux/types.h>
3189 + #include <linux/list.h>
3190 + #include <linux/errno.h>
3191 ++#include <linux/capability.h>
3192 + #include <net/netlink.h>
3193 + #include <net/sock.h>
3194 +
3195 +@@ -392,6 +393,9 @@ nfnl_cthelper_new(struct sock *nfnl, struct sk_buff *skb,
3196 + struct nfnl_cthelper *nlcth;
3197 + int ret = 0;
3198 +
3199 ++ if (!capable(CAP_NET_ADMIN))
3200 ++ return -EPERM;
3201 ++
3202 + if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
3203 + return -EINVAL;
3204 +
3205 +@@ -595,6 +599,9 @@ nfnl_cthelper_get(struct sock *nfnl, struct sk_buff *skb,
3206 + struct nfnl_cthelper *nlcth;
3207 + bool tuple_set = false;
3208 +
3209 ++ if (!capable(CAP_NET_ADMIN))
3210 ++ return -EPERM;
3211 ++
3212 + if (nlh->nlmsg_flags & NLM_F_DUMP) {
3213 + struct netlink_dump_control c = {
3214 + .dump = nfnl_cthelper_dump_table,
3215 +@@ -661,6 +668,9 @@ nfnl_cthelper_del(struct sock *nfnl, struct sk_buff *skb,
3216 + struct nfnl_cthelper *nlcth, *n;
3217 + int j = 0, ret;
3218 +
3219 ++ if (!capable(CAP_NET_ADMIN))
3220 ++ return -EPERM;
3221 ++
3222 + if (tb[NFCTH_NAME])
3223 + helper_name = nla_data(tb[NFCTH_NAME]);
3224 +
3225 +diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
3226 +index f6837f9b6d6c..c14d2e8eaec3 100644
3227 +--- a/net/netfilter/nfnetlink_queue.c
3228 ++++ b/net/netfilter/nfnetlink_queue.c
3229 +@@ -1053,10 +1053,8 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
3230 + struct net *net = sock_net(ctnl);
3231 + struct nfnl_queue_net *q = nfnl_queue_pernet(net);
3232 +
3233 +- queue = instance_lookup(q, queue_num);
3234 +- if (!queue)
3235 +- queue = verdict_instance_lookup(q, queue_num,
3236 +- NETLINK_CB(skb).portid);
3237 ++ queue = verdict_instance_lookup(q, queue_num,
3238 ++ NETLINK_CB(skb).portid);
3239 + if (IS_ERR(queue))
3240 + return PTR_ERR(queue);
3241 +
3242 +diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
3243 +index 2fc6ca9d1286..7b42b0ad3f9b 100644
3244 +--- a/net/netfilter/x_tables.c
3245 ++++ b/net/netfilter/x_tables.c
3246 +@@ -701,6 +701,56 @@ int xt_check_entry_offsets(const void *base,
3247 + }
3248 + EXPORT_SYMBOL(xt_check_entry_offsets);
3249 +
3250 ++/**
3251 ++ * xt_alloc_entry_offsets - allocate array to store rule head offsets
3252 ++ *
3253 ++ * @size: number of entries
3254 ++ *
3255 ++ * Return: NULL or kmalloc'd or vmalloc'd array
3256 ++ */
3257 ++unsigned int *xt_alloc_entry_offsets(unsigned int size)
3258 ++{
3259 ++ unsigned int *off;
3260 ++
3261 ++ off = kcalloc(size, sizeof(unsigned int), GFP_KERNEL | __GFP_NOWARN);
3262 ++
3263 ++ if (off)
3264 ++ return off;
3265 ++
3266 ++ if (size < (SIZE_MAX / sizeof(unsigned int)))
3267 ++ off = vmalloc(size * sizeof(unsigned int));
3268 ++
3269 ++ return off;
3270 ++}
3271 ++EXPORT_SYMBOL(xt_alloc_entry_offsets);
3272 ++
3273 ++/**
3274 ++ * xt_find_jump_offset - check if target is a valid jump offset
3275 ++ *
3276 ++ * @offsets: array containing all valid rule start offsets of a rule blob
3277 ++ * @target: the jump target to search for
3278 ++ * @size: entries in @offset
3279 ++ */
3280 ++bool xt_find_jump_offset(const unsigned int *offsets,
3281 ++ unsigned int target, unsigned int size)
3282 ++{
3283 ++ int m, low = 0, hi = size;
3284 ++
3285 ++ while (hi > low) {
3286 ++ m = (low + hi) / 2u;
3287 ++
3288 ++ if (offsets[m] > target)
3289 ++ hi = m;
3290 ++ else if (offsets[m] < target)
3291 ++ low = m + 1;
3292 ++ else
3293 ++ return true;
3294 ++ }
3295 ++
3296 ++ return false;
3297 ++}
3298 ++EXPORT_SYMBOL(xt_find_jump_offset);
3299 ++
3300 + int xt_check_target(struct xt_tgchk_param *par,
3301 + unsigned int size, u_int8_t proto, bool inv_proto)
3302 + {
3303 +diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
3304 +index df8801e02a32..7eae0d0af89a 100644
3305 +--- a/net/netfilter/xt_osf.c
3306 ++++ b/net/netfilter/xt_osf.c
3307 +@@ -19,6 +19,7 @@
3308 + #include <linux/module.h>
3309 + #include <linux/kernel.h>
3310 +
3311 ++#include <linux/capability.h>
3312 + #include <linux/if.h>
3313 + #include <linux/inetdevice.h>
3314 + #include <linux/ip.h>
3315 +@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct sock *ctnl, struct sk_buff *skb,
3316 + struct xt_osf_finger *kf = NULL, *sf;
3317 + int err = 0;
3318 +
3319 ++ if (!capable(CAP_NET_ADMIN))
3320 ++ return -EPERM;
3321 ++
3322 + if (!osf_attrs[OSF_ATTR_FINGER])
3323 + return -EINVAL;
3324 +
3325 +@@ -112,6 +116,9 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
3326 + struct xt_osf_finger *sf;
3327 + int err = -ENOENT;
3328 +
3329 ++ if (!capable(CAP_NET_ADMIN))
3330 ++ return -EPERM;
3331 ++
3332 + if (!osf_attrs[OSF_ATTR_FINGER])
3333 + return -EINVAL;
3334 +
3335 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3336 +index a870d27ca778..e9851198a850 100644
3337 +--- a/net/sctp/socket.c
3338 ++++ b/net/sctp/socket.c
3339 +@@ -83,7 +83,7 @@
3340 + static int sctp_writeable(struct sock *sk);
3341 + static void sctp_wfree(struct sk_buff *skb);
3342 + static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3343 +- size_t msg_len, struct sock **orig_sk);
3344 ++ size_t msg_len);
3345 + static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
3346 + static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
3347 + static int sctp_wait_for_accept(struct sock *sk, long timeo);
3348 +@@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
3349 + if (len < sizeof (struct sockaddr))
3350 + return NULL;
3351 +
3352 ++ if (!opt->pf->af_supported(addr->sa.sa_family, opt))
3353 ++ return NULL;
3354 ++
3355 + /* V4 mapped address are really of AF_INET family */
3356 + if (addr->sa.sa_family == AF_INET6 &&
3357 +- ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
3358 +- if (!opt->pf->af_supported(AF_INET, opt))
3359 +- return NULL;
3360 +- } else {
3361 +- /* Does this PF support this AF? */
3362 +- if (!opt->pf->af_supported(addr->sa.sa_family, opt))
3363 +- return NULL;
3364 +- }
3365 ++ ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
3366 ++ !opt->pf->af_supported(AF_INET, opt))
3367 ++ return NULL;
3368 +
3369 + /* If we get this far, af is valid. */
3370 + af = sctp_get_af_specific(addr->sa.sa_family);
3371 +@@ -1954,7 +1952,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
3372 + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
3373 + if (!sctp_wspace(asoc)) {
3374 + /* sk can be changed by peel off when waiting for buf. */
3375 +- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
3376 ++ err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
3377 + if (err) {
3378 + if (err == -ESRCH) {
3379 + /* asoc is already dead. */
3380 +@@ -6976,12 +6974,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
3381 +
3382 + /* Helper function to wait for space in the sndbuf. */
3383 + static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3384 +- size_t msg_len, struct sock **orig_sk)
3385 ++ size_t msg_len)
3386 + {
3387 + struct sock *sk = asoc->base.sk;
3388 +- int err = 0;
3389 + long current_timeo = *timeo_p;
3390 + DEFINE_WAIT(wait);
3391 ++ int err = 0;
3392 +
3393 + pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
3394 + *timeo_p, msg_len);
3395 +@@ -7010,17 +7008,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3396 + release_sock(sk);
3397 + current_timeo = schedule_timeout(current_timeo);
3398 + lock_sock(sk);
3399 +- if (sk != asoc->base.sk) {
3400 +- release_sock(sk);
3401 +- sk = asoc->base.sk;
3402 +- lock_sock(sk);
3403 +- }
3404 ++ if (sk != asoc->base.sk)
3405 ++ goto do_error;
3406 +
3407 + *timeo_p = current_timeo;
3408 + }
3409 +
3410 + out:
3411 +- *orig_sk = sk;
3412 + finish_wait(&asoc->wait, &wait);
3413 +
3414 + /* Release the association's refcnt. */
3415 +diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c
3416 +index ac73710473de..8000445ff884 100644
3417 +--- a/tools/usb/usbip/libsrc/usbip_common.c
3418 ++++ b/tools/usb/usbip/libsrc/usbip_common.c
3419 +@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
3420 + struct usbip_usb_interface *uinf)
3421 + {
3422 + char busid[SYSFS_BUS_ID_SIZE];
3423 ++ int size;
3424 + struct udev_device *sif;
3425 +
3426 +- sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
3427 ++ size = snprintf(busid, sizeof(busid), "%s:%d.%d",
3428 ++ udev->busid, udev->bConfigurationValue, i);
3429 ++ if (size < 0 || (unsigned int)size >= sizeof(busid)) {
3430 ++ err("busid length %i >= %lu or < 0", size,
3431 ++ (unsigned long)sizeof(busid));
3432 ++ return -1;
3433 ++ }
3434 +
3435 + sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
3436 + if (!sif) {
3437 +diff --git a/tools/usb/usbip/libsrc/usbip_host_driver.c b/tools/usb/usbip/libsrc/usbip_host_driver.c
3438 +index bef08d5c44e8..071b9ce99420 100644
3439 +--- a/tools/usb/usbip/libsrc/usbip_host_driver.c
3440 ++++ b/tools/usb/usbip/libsrc/usbip_host_driver.c
3441 +@@ -39,13 +39,19 @@ struct udev *udev_context;
3442 + static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
3443 + {
3444 + char status_attr_path[SYSFS_PATH_MAX];
3445 ++ int size;
3446 + int fd;
3447 + int length;
3448 + char status;
3449 + int value = 0;
3450 +
3451 +- snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
3452 +- udev->path);
3453 ++ size = snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
3454 ++ udev->path);
3455 ++ if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
3456 ++ err("usbip_status path length %i >= %lu or < 0", size,
3457 ++ (unsigned long)sizeof(status_attr_path));
3458 ++ return -1;
3459 ++ }
3460 +
3461 + fd = open(status_attr_path, O_RDONLY);
3462 + if (fd < 0) {
3463 +@@ -225,6 +231,7 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
3464 + {
3465 + char attr_name[] = "usbip_sockfd";
3466 + char sockfd_attr_path[SYSFS_PATH_MAX];
3467 ++ int size;
3468 + char sockfd_buff[30];
3469 + int ret;
3470 +
3471 +@@ -244,10 +251,20 @@ int usbip_host_export_device(struct usbip_exported_device *edev, int sockfd)
3472 + }
3473 +
3474 + /* only the first interface is true */
3475 +- snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
3476 +- edev->udev.path, attr_name);
3477 ++ size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
3478 ++ edev->udev.path, attr_name);
3479 ++ if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
3480 ++ err("exported device path length %i >= %lu or < 0", size,
3481 ++ (unsigned long)sizeof(sockfd_attr_path));
3482 ++ return -1;
3483 ++ }
3484 +
3485 +- snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
3486 ++ size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
3487 ++ if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
3488 ++ err("socket length %i >= %lu or < 0", size,
3489 ++ (unsigned long)sizeof(sockfd_buff));
3490 ++ return -1;
3491 ++ }
3492 +
3493 + ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
3494 + strlen(sockfd_buff));
3495 +diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
3496 +index ad9204773533..1274f326242c 100644
3497 +--- a/tools/usb/usbip/libsrc/vhci_driver.c
3498 ++++ b/tools/usb/usbip/libsrc/vhci_driver.c
3499 +@@ -55,12 +55,12 @@ static int parse_status(const char *value)
3500 +
3501 + while (*c != '\0') {
3502 + int port, status, speed, devid;
3503 +- unsigned long socket;
3504 ++ int sockfd;
3505 + char lbusid[SYSFS_BUS_ID_SIZE];
3506 +
3507 +- ret = sscanf(c, "%d %d %d %x %lx %31s\n",
3508 ++ ret = sscanf(c, "%d %d %d %x %u %31s\n",
3509 + &port, &status, &speed,
3510 +- &devid, &socket, lbusid);
3511 ++ &devid, &sockfd, lbusid);
3512 +
3513 + if (ret < 5) {
3514 + dbg("sscanf failed: %d", ret);
3515 +@@ -69,7 +69,7 @@ static int parse_status(const char *value)
3516 +
3517 + dbg("port %d status %d speed %d devid %x",
3518 + port, status, speed, devid);
3519 +- dbg("socket %lx lbusid %s", socket, lbusid);
3520 ++ dbg("sockfd %u lbusid %s", sockfd, lbusid);
3521 +
3522 +
3523 + /* if a device is connected, look at it */
3524 +diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
3525 +index d7599d943529..73d8eee8130b 100644
3526 +--- a/tools/usb/usbip/src/usbip.c
3527 ++++ b/tools/usb/usbip/src/usbip.c
3528 +@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
3529 + break;
3530 + case '?':
3531 + printf("usbip: invalid option\n");
3532 ++ /* Terminate after printing error */
3533 ++ /* FALLTHRU */
3534 + default:
3535 + usbip_usage();
3536 + goto out;