Gentoo Archives: gentoo-commits

From: Alice Ferrazzi <alicef@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.9 commit in: /
Date: Wed, 31 Jan 2018 13:31:39
Message-Id: 1517405259.69c2259e3678793f2aa4beddaa1453a039d9ac43.alicef@gentoo
1 commit: 69c2259e3678793f2aa4beddaa1453a039d9ac43
2 Author: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jan 31 13:27:39 2018 +0000
4 Commit: Alice Ferrazzi <alicef <AT> gentoo <DOT> org>
5 CommitDate: Wed Jan 31 13:27:39 2018 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=69c2259e
7
8 linux kernel 4.9.79
9
10 0000_README | 4 +
11 1078_linux-4.9.79.patch | 2352 +++++++++++++++++++++++++++++++++++++++++++++++
12 2 files changed, 2356 insertions(+)
13
14 diff --git a/0000_README b/0000_README
15 index 9048086..d0865d5 100644
16 --- a/0000_README
17 +++ b/0000_README
18 @@ -355,6 +355,10 @@ Patch: 1077_linux-4.9.78.patch
19 From: http://www.kernel.org
20 Desc: Linux 4.9.78
21
22 +Patch: 1078_linux-4.9.79.patch
23 +From: http://www.kernel.org
24 +Desc: Linux 4.9.79
25 +
26 Patch: 1500_XATTR_USER_PREFIX.patch
27 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
28 Desc: Support for namespace user.pax.* on tmpfs.
29
30 diff --git a/1078_linux-4.9.79.patch b/1078_linux-4.9.79.patch
31 new file mode 100644
32 index 0000000..debc1a0
33 --- /dev/null
34 +++ b/1078_linux-4.9.79.patch
35 @@ -0,0 +1,2352 @@
36 +diff --git a/Makefile b/Makefile
37 +index 8a6f158a1176..4a7e6dff1c2e 100644
38 +--- a/Makefile
39 ++++ b/Makefile
40 +@@ -1,6 +1,6 @@
41 + VERSION = 4
42 + PATCHLEVEL = 9
43 +-SUBLEVEL = 78
44 ++SUBLEVEL = 79
45 + EXTRAVERSION =
46 + NAME = Roaring Lionus
47 +
48 +diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
49 +index 2206e0e00934..2a35c1963f6d 100644
50 +--- a/arch/arm/kvm/mmu.c
51 ++++ b/arch/arm/kvm/mmu.c
52 +@@ -1284,7 +1284,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
53 + return -EFAULT;
54 + }
55 +
56 +- if (is_vm_hugetlb_page(vma) && !logging_active) {
57 ++ if (vma_kernel_pagesize(vma) && !logging_active) {
58 + hugetlb = true;
59 + gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
60 + } else {
61 +diff --git a/arch/um/Makefile b/arch/um/Makefile
62 +index 0ca46ededfc7..9c150ccb35d2 100644
63 +--- a/arch/um/Makefile
64 ++++ b/arch/um/Makefile
65 +@@ -117,7 +117,7 @@ archheaders:
66 + archprepare: include/generated/user_constants.h
67 +
68 + LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
69 +-LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib
70 ++LINK-$(CONFIG_LD_SCRIPT_DYN) += -Wl,-rpath,/lib $(call cc-option, -no-pie)
71 +
72 + CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \
73 + $(call cc-option, -fno-stack-protector,) \
74 +diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
75 +index 6bb7e92c6d50..0174290b2857 100644
76 +--- a/arch/x86/entry/vsyscall/vsyscall_64.c
77 ++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
78 +@@ -46,6 +46,7 @@ static enum { EMULATE, NATIVE, NONE } vsyscall_mode =
79 + #else
80 + EMULATE;
81 + #endif
82 ++unsigned long vsyscall_pgprot = __PAGE_KERNEL_VSYSCALL;
83 +
84 + static int __init vsyscall_setup(char *str)
85 + {
86 +@@ -336,11 +337,11 @@ void __init map_vsyscall(void)
87 + extern char __vsyscall_page;
88 + unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
89 +
90 ++ if (vsyscall_mode != NATIVE)
91 ++ vsyscall_pgprot = __PAGE_KERNEL_VVAR;
92 + if (vsyscall_mode != NONE)
93 + __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
94 +- vsyscall_mode == NATIVE
95 +- ? PAGE_KERNEL_VSYSCALL
96 +- : PAGE_KERNEL_VVAR);
97 ++ __pgprot(vsyscall_pgprot));
98 +
99 + BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
100 + (unsigned long)VSYSCALL_ADDR);
101 +diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
102 +index 9842270ed2f2..21a4e4127f43 100644
103 +--- a/arch/x86/events/amd/power.c
104 ++++ b/arch/x86/events/amd/power.c
105 +@@ -277,7 +277,7 @@ static int __init amd_power_pmu_init(void)
106 + int ret;
107 +
108 + if (!x86_match_cpu(cpu_match))
109 +- return 0;
110 ++ return -ENODEV;
111 +
112 + if (!boot_cpu_has(X86_FEATURE_ACC_POWER))
113 + return -ENODEV;
114 +diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
115 +index e40b19ca486e..353f038ec645 100644
116 +--- a/arch/x86/include/asm/processor.h
117 ++++ b/arch/x86/include/asm/processor.h
118 +@@ -596,7 +596,7 @@ static inline void sync_core(void)
119 + {
120 + int tmp;
121 +
122 +-#ifdef CONFIG_M486
123 ++#ifdef CONFIG_X86_32
124 + /*
125 + * Do a CPUID if available, otherwise do a jump. The jump
126 + * can conveniently enough be the jump around CPUID.
127 +diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
128 +index 4865e10dbb55..9ee85066f407 100644
129 +--- a/arch/x86/include/asm/vsyscall.h
130 ++++ b/arch/x86/include/asm/vsyscall.h
131 +@@ -13,6 +13,7 @@ extern void map_vsyscall(void);
132 + */
133 + extern bool emulate_vsyscall(struct pt_regs *regs, unsigned long address);
134 + extern bool vsyscall_enabled(void);
135 ++extern unsigned long vsyscall_pgprot;
136 + #else
137 + static inline void map_vsyscall(void) {}
138 + static inline bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
139 +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
140 +index de6626c18e42..be6337156502 100644
141 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
142 ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
143 +@@ -934,6 +934,8 @@ static int __populate_cache_leaves(unsigned int cpu)
144 + ci_leaf_init(this_leaf++, &id4_regs);
145 + __cache_cpumap_setup(cpu, idx, &id4_regs);
146 + }
147 ++ this_cpu_ci->cpu_map_populated = true;
148 ++
149 + return 0;
150 + }
151 +
152 +diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
153 +index ac3e636ad586..f90f17610f62 100644
154 +--- a/arch/x86/kernel/cpu/microcode/intel.c
155 ++++ b/arch/x86/kernel/cpu/microcode/intel.c
156 +@@ -40,6 +40,9 @@
157 + #include <asm/setup.h>
158 + #include <asm/msr.h>
159 +
160 ++/* last level cache size per core */
161 ++static int llc_size_per_core;
162 ++
163 + /*
164 + * Temporary microcode blobs pointers storage. We note here during early load
165 + * the pointers to microcode blobs we've got from whatever storage (detached
166 +@@ -1053,12 +1056,14 @@ static bool is_blacklisted(unsigned int cpu)
167 +
168 + /*
169 + * Late loading on model 79 with microcode revision less than 0x0b000021
170 +- * may result in a system hang. This behavior is documented in item
171 +- * BDF90, #334165 (Intel Xeon Processor E7-8800/4800 v4 Product Family).
172 ++ * and LLC size per core bigger than 2.5MB may result in a system hang.
173 ++ * This behavior is documented in item BDF90, #334165 (Intel Xeon
174 ++ * Processor E7-8800/4800 v4 Product Family).
175 + */
176 + if (c->x86 == 6 &&
177 + c->x86_model == INTEL_FAM6_BROADWELL_X &&
178 + c->x86_mask == 0x01 &&
179 ++ llc_size_per_core > 2621440 &&
180 + c->microcode < 0x0b000021) {
181 + pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
182 + pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
183 +@@ -1125,6 +1130,15 @@ static struct microcode_ops microcode_intel_ops = {
184 + .microcode_fini_cpu = microcode_fini_cpu,
185 + };
186 +
187 ++static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
188 ++{
189 ++ u64 llc_size = c->x86_cache_size * 1024;
190 ++
191 ++ do_div(llc_size, c->x86_max_cores);
192 ++
193 ++ return (int)llc_size;
194 ++}
195 ++
196 + struct microcode_ops * __init init_intel_microcode(void)
197 + {
198 + struct cpuinfo_x86 *c = &boot_cpu_data;
199 +@@ -1135,6 +1149,8 @@ struct microcode_ops * __init init_intel_microcode(void)
200 + return NULL;
201 + }
202 +
203 ++ llc_size_per_core = calc_llc_size_per_core(c);
204 ++
205 + return &microcode_intel_ops;
206 + }
207 +
208 +diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
209 +index 073d1f1a620b..9758524ee99f 100644
210 +--- a/arch/x86/lib/delay.c
211 ++++ b/arch/x86/lib/delay.c
212 +@@ -93,6 +93,13 @@ static void delay_mwaitx(unsigned long __loops)
213 + {
214 + u64 start, end, delay, loops = __loops;
215 +
216 ++ /*
217 ++ * Timer value of 0 causes MWAITX to wait indefinitely, unless there
218 ++ * is a store on the memory monitored by MONITORX.
219 ++ */
220 ++ if (loops == 0)
221 ++ return;
222 ++
223 + start = rdtsc_ordered();
224 +
225 + for (;;) {
226 +diff --git a/arch/x86/mm/kaiser.c b/arch/x86/mm/kaiser.c
227 +index a8ade08a9bf5..ec678aafa3f8 100644
228 +--- a/arch/x86/mm/kaiser.c
229 ++++ b/arch/x86/mm/kaiser.c
230 +@@ -344,7 +344,7 @@ void __init kaiser_init(void)
231 + if (vsyscall_enabled())
232 + kaiser_add_user_map_early((void *)VSYSCALL_ADDR,
233 + PAGE_SIZE,
234 +- __PAGE_KERNEL_VSYSCALL);
235 ++ vsyscall_pgprot);
236 +
237 + for_each_possible_cpu(cpu) {
238 + void *percpu_vaddr = __per_cpu_user_mapped_start +
239 +diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
240 +index 15f743615923..7840331d3056 100644
241 +--- a/arch/x86/net/bpf_jit_comp.c
242 ++++ b/arch/x86/net/bpf_jit_comp.c
243 +@@ -278,10 +278,10 @@ static void emit_bpf_tail_call(u8 **pprog)
244 + /* if (index >= array->map.max_entries)
245 + * goto out;
246 + */
247 +- EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
248 ++ EMIT2(0x89, 0xD2); /* mov edx, edx */
249 ++ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
250 + offsetof(struct bpf_array, map.max_entries));
251 +- EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
252 +-#define OFFSET1 47 /* number of bytes to jump */
253 ++#define OFFSET1 43 /* number of bytes to jump */
254 + EMIT2(X86_JBE, OFFSET1); /* jbe out */
255 + label1 = cnt;
256 +
257 +@@ -290,21 +290,20 @@ static void emit_bpf_tail_call(u8 **pprog)
258 + */
259 + EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
260 + EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
261 +-#define OFFSET2 36
262 ++#define OFFSET2 32
263 + EMIT2(X86_JA, OFFSET2); /* ja out */
264 + label2 = cnt;
265 + EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
266 + EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
267 +
268 + /* prog = array->ptrs[index]; */
269 +- EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
270 ++ EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
271 + offsetof(struct bpf_array, ptrs));
272 +- EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
273 +
274 + /* if (prog == NULL)
275 + * goto out;
276 + */
277 +- EMIT4(0x48, 0x83, 0xF8, 0x00); /* cmp rax, 0 */
278 ++ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
279 + #define OFFSET3 10
280 + EMIT2(X86_JE, OFFSET3); /* je out */
281 + label3 = cnt;
282 +diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
283 +index 691814dfed31..943702dd9517 100644
284 +--- a/drivers/acpi/acpica/nsutils.c
285 ++++ b/drivers/acpi/acpica/nsutils.c
286 +@@ -594,25 +594,20 @@ struct acpi_namespace_node *acpi_ns_validate_handle(acpi_handle handle)
287 + void acpi_ns_terminate(void)
288 + {
289 + acpi_status status;
290 ++ union acpi_operand_object *prev;
291 ++ union acpi_operand_object *next;
292 +
293 + ACPI_FUNCTION_TRACE(ns_terminate);
294 +
295 +-#ifdef ACPI_EXEC_APP
296 +- {
297 +- union acpi_operand_object *prev;
298 +- union acpi_operand_object *next;
299 ++ /* Delete any module-level code blocks */
300 +
301 +- /* Delete any module-level code blocks */
302 +-
303 +- next = acpi_gbl_module_code_list;
304 +- while (next) {
305 +- prev = next;
306 +- next = next->method.mutex;
307 +- prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
308 +- acpi_ut_remove_reference(prev);
309 +- }
310 ++ next = acpi_gbl_module_code_list;
311 ++ while (next) {
312 ++ prev = next;
313 ++ next = next->method.mutex;
314 ++ prev->method.mutex = NULL; /* Clear the Mutex (cheated) field */
315 ++ acpi_ut_remove_reference(prev);
316 + }
317 +-#endif
318 +
319 + /*
320 + * Free the entire namespace -- all nodes and all objects
321 +diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
322 +index 73c9c7fa9001..f06317d6fc38 100644
323 +--- a/drivers/acpi/glue.c
324 ++++ b/drivers/acpi/glue.c
325 +@@ -99,13 +99,13 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
326 + return -ENODEV;
327 +
328 + /*
329 +- * If the device has a _HID (or _CID) returning a valid ACPI/PNP
330 +- * device ID, it is better to make it look less attractive here, so that
331 +- * the other device with the same _ADR value (that may not have a valid
332 +- * device ID) can be matched going forward. [This means a second spec
333 +- * violation in a row, so whatever we do here is best effort anyway.]
334 ++ * If the device has a _HID returning a valid ACPI/PNP device ID, it is
335 ++ * better to make it look less attractive here, so that the other device
336 ++ * with the same _ADR value (that may not have a valid device ID) can be
337 ++ * matched going forward. [This means a second spec violation in a row,
338 ++ * so whatever we do here is best effort anyway.]
339 + */
340 +- return sta_present && list_empty(&adev->pnp.ids) ?
341 ++ return sta_present && !adev->pnp.type.platform_id ?
342 + FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
343 + }
344 +
345 +diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
346 +index e9fd32e91668..70e13cf06ed0 100644
347 +--- a/drivers/base/cacheinfo.c
348 ++++ b/drivers/base/cacheinfo.c
349 +@@ -16,6 +16,7 @@
350 + * You should have received a copy of the GNU General Public License
351 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
352 + */
353 ++#include <linux/acpi.h>
354 + #include <linux/bitops.h>
355 + #include <linux/cacheinfo.h>
356 + #include <linux/compiler.h>
357 +@@ -104,9 +105,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
358 + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
359 + struct cacheinfo *this_leaf, *sib_leaf;
360 + unsigned int index;
361 +- int ret;
362 ++ int ret = 0;
363 ++
364 ++ if (this_cpu_ci->cpu_map_populated)
365 ++ return 0;
366 +
367 +- ret = cache_setup_of_node(cpu);
368 ++ if (of_have_populated_dt())
369 ++ ret = cache_setup_of_node(cpu);
370 ++ else if (!acpi_disabled)
371 ++ /* No cache property/hierarchy support yet in ACPI */
372 ++ ret = -ENOTSUPP;
373 + if (ret)
374 + return ret;
375 +
376 +@@ -203,8 +211,7 @@ static int detect_cache_attributes(unsigned int cpu)
377 + */
378 + ret = cache_shared_cpu_map_setup(cpu);
379 + if (ret) {
380 +- pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n",
381 +- cpu);
382 ++ pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
383 + goto free_ci;
384 + }
385 + return 0;
386 +diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
387 +index 7e2dc5e56632..0b49f29bf0da 100644
388 +--- a/drivers/input/mouse/trackpoint.c
389 ++++ b/drivers/input/mouse/trackpoint.c
390 +@@ -383,6 +383,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
391 + if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
392 + psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
393 + button_info = 0x33;
394 ++ } else if (!button_info) {
395 ++ psmouse_warn(psmouse, "got 0 in extended button data, assuming 3 buttons\n");
396 ++ button_info = 0x33;
397 + }
398 +
399 + psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
400 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
401 +index 1644896568c4..b2eeecb26939 100644
402 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
403 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
404 +@@ -4733,6 +4733,15 @@ int be_update_queues(struct be_adapter *adapter)
405 +
406 + be_schedule_worker(adapter);
407 +
408 ++ /*
409 ++ * The IF was destroyed and re-created. We need to clear
410 ++ * all promiscuous flags valid for the destroyed IF.
411 ++ * Without this promisc mode is not restored during
412 ++ * be_open() because the driver thinks that it is
413 ++ * already enabled in HW.
414 ++ */
415 ++ adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
416 ++
417 + if (netif_running(netdev))
418 + status = be_open(netdev);
419 +
420 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
421 +index 8aa91ddff287..16556011d571 100644
422 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
423 ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
424 +@@ -765,11 +765,8 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
425 + dipn = htonl(dip);
426 + dev = mlxsw_sp->rifs[rif]->dev;
427 + n = neigh_lookup(&arp_tbl, &dipn, dev);
428 +- if (!n) {
429 +- netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
430 +- &dip);
431 ++ if (!n)
432 + return;
433 +- }
434 +
435 + netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
436 + neigh_event_send(n, NULL);
437 +diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
438 +index 2c4350a1c629..298b74ebc1e9 100644
439 +--- a/drivers/net/ethernet/realtek/r8169.c
440 ++++ b/drivers/net/ethernet/realtek/r8169.c
441 +@@ -2222,19 +2222,14 @@ static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
442 + void __iomem *ioaddr = tp->mmio_addr;
443 + dma_addr_t paddr = tp->counters_phys_addr;
444 + u32 cmd;
445 +- bool ret;
446 +
447 + RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
448 ++ RTL_R32(CounterAddrHigh);
449 + cmd = (u64)paddr & DMA_BIT_MASK(32);
450 + RTL_W32(CounterAddrLow, cmd);
451 + RTL_W32(CounterAddrLow, cmd | counter_cmd);
452 +
453 +- ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
454 +-
455 +- RTL_W32(CounterAddrLow, 0);
456 +- RTL_W32(CounterAddrHigh, 0);
457 +-
458 +- return ret;
459 ++ return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
460 + }
461 +
462 + static bool rtl8169_reset_counters(struct net_device *dev)
463 +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
464 +index b883af93929c..fc4c2ccc3d22 100644
465 +--- a/drivers/net/ppp/ppp_generic.c
466 ++++ b/drivers/net/ppp/ppp_generic.c
467 +@@ -1002,17 +1002,18 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
468 + if (!ifname_is_set)
469 + snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
470 +
471 ++ mutex_unlock(&pn->all_ppp_mutex);
472 ++
473 + ret = register_netdevice(ppp->dev);
474 + if (ret < 0)
475 + goto err_unit;
476 +
477 + atomic_inc(&ppp_unit_count);
478 +
479 +- mutex_unlock(&pn->all_ppp_mutex);
480 +-
481 + return 0;
482 +
483 + err_unit:
484 ++ mutex_lock(&pn->all_ppp_mutex);
485 + unit_put(&pn->units_idr, ppp->file.index);
486 + err:
487 + mutex_unlock(&pn->all_ppp_mutex);
488 +diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
489 +index 4ddae8118c85..dc36c2ec1d10 100644
490 +--- a/drivers/net/ppp/pppoe.c
491 ++++ b/drivers/net/ppp/pppoe.c
492 +@@ -842,6 +842,7 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
493 + struct pppoe_hdr *ph;
494 + struct net_device *dev;
495 + char *start;
496 ++ int hlen;
497 +
498 + lock_sock(sk);
499 + if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED)) {
500 +@@ -860,16 +861,16 @@ static int pppoe_sendmsg(struct socket *sock, struct msghdr *m,
501 + if (total_len > (dev->mtu + dev->hard_header_len))
502 + goto end;
503 +
504 +-
505 +- skb = sock_wmalloc(sk, total_len + dev->hard_header_len + 32,
506 +- 0, GFP_KERNEL);
507 ++ hlen = LL_RESERVED_SPACE(dev);
508 ++ skb = sock_wmalloc(sk, hlen + sizeof(*ph) + total_len +
509 ++ dev->needed_tailroom, 0, GFP_KERNEL);
510 + if (!skb) {
511 + error = -ENOMEM;
512 + goto end;
513 + }
514 +
515 + /* Reserve space for headers. */
516 +- skb_reserve(skb, dev->hard_header_len);
517 ++ skb_reserve(skb, hlen);
518 + skb_reset_network_header(skb);
519 +
520 + skb->dev = dev;
521 +@@ -930,7 +931,7 @@ static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb)
522 + /* Copy the data if there is no space for the header or if it's
523 + * read-only.
524 + */
525 +- if (skb_cow_head(skb, sizeof(*ph) + dev->hard_header_len))
526 ++ if (skb_cow_head(skb, LL_RESERVED_SPACE(dev) + sizeof(*ph)))
527 + goto abort;
528 +
529 + __skb_push(skb, sizeof(*ph));
530 +diff --git a/drivers/net/tun.c b/drivers/net/tun.c
531 +index 518cbfbc8b65..eb6dc28e5e52 100644
532 +--- a/drivers/net/tun.c
533 ++++ b/drivers/net/tun.c
534 +@@ -525,6 +525,14 @@ static void tun_queue_purge(struct tun_file *tfile)
535 + skb_queue_purge(&tfile->sk.sk_error_queue);
536 + }
537 +
538 ++static void tun_cleanup_tx_array(struct tun_file *tfile)
539 ++{
540 ++ if (tfile->tx_array.ring.queue) {
541 ++ skb_array_cleanup(&tfile->tx_array);
542 ++ memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
543 ++ }
544 ++}
545 ++
546 + static void __tun_detach(struct tun_file *tfile, bool clean)
547 + {
548 + struct tun_file *ntfile;
549 +@@ -566,8 +574,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
550 + tun->dev->reg_state == NETREG_REGISTERED)
551 + unregister_netdevice(tun->dev);
552 + }
553 +- if (tun)
554 +- skb_array_cleanup(&tfile->tx_array);
555 ++ tun_cleanup_tx_array(tfile);
556 + sock_put(&tfile->sk);
557 + }
558 + }
559 +@@ -606,11 +613,13 @@ static void tun_detach_all(struct net_device *dev)
560 + /* Drop read queue */
561 + tun_queue_purge(tfile);
562 + sock_put(&tfile->sk);
563 ++ tun_cleanup_tx_array(tfile);
564 + }
565 + list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
566 + tun_enable_queue(tfile);
567 + tun_queue_purge(tfile);
568 + sock_put(&tfile->sk);
569 ++ tun_cleanup_tx_array(tfile);
570 + }
571 + BUG_ON(tun->numdisabled != 0);
572 +
573 +@@ -2363,6 +2372,8 @@ static int tun_chr_open(struct inode *inode, struct file * file)
574 +
575 + sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
576 +
577 ++ memset(&tfile->tx_array, 0, sizeof(tfile->tx_array));
578 ++
579 + return 0;
580 + }
581 +
582 +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
583 +index 9c257ffedb15..c53385a0052f 100644
584 +--- a/drivers/net/usb/lan78xx.c
585 ++++ b/drivers/net/usb/lan78xx.c
586 +@@ -2197,6 +2197,7 @@ static int lan78xx_reset(struct lan78xx_net *dev)
587 + buf = DEFAULT_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
588 + dev->rx_urb_size = DEFAULT_BURST_CAP_SIZE;
589 + dev->rx_qlen = 4;
590 ++ dev->tx_qlen = 4;
591 + }
592 +
593 + ret = lan78xx_write_reg(dev, BURST_CAP, buf);
594 +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
595 +index ef83ae3b0a44..4afba17e2403 100644
596 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c
597 ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
598 +@@ -1616,7 +1616,6 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
599 + rq->rx_ring[i].basePA);
600 + rq->rx_ring[i].base = NULL;
601 + }
602 +- rq->buf_info[i] = NULL;
603 + }
604 +
605 + if (rq->data_ring.base) {
606 +@@ -1638,6 +1637,7 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
607 + (rq->rx_ring[0].size + rq->rx_ring[1].size);
608 + dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
609 + rq->buf_info_pa);
610 ++ rq->buf_info[0] = rq->buf_info[1] = NULL;
611 + }
612 + }
613 +
614 +diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
615 +index a530f08592cd..4abd3fce5ab6 100644
616 +--- a/drivers/scsi/libiscsi.c
617 ++++ b/drivers/scsi/libiscsi.c
618 +@@ -1727,7 +1727,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
619 +
620 + if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
621 + reason = FAILURE_SESSION_IN_RECOVERY;
622 +- sc->result = DID_REQUEUE;
623 ++ sc->result = DID_REQUEUE << 16;
624 + goto fault;
625 + }
626 +
627 +diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
628 +index 9f490375ac92..f0b955f8504e 100644
629 +--- a/drivers/usb/usbip/usbip_common.h
630 ++++ b/drivers/usb/usbip/usbip_common.h
631 +@@ -271,6 +271,7 @@ struct usbip_device {
632 + /* lock for status */
633 + spinlock_t lock;
634 +
635 ++ int sockfd;
636 + struct socket *tcp_socket;
637 +
638 + struct task_struct *tcp_rx;
639 +diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
640 +index b96e5b189269..c287ccc78fde 100644
641 +--- a/drivers/usb/usbip/vhci_sysfs.c
642 ++++ b/drivers/usb/usbip/vhci_sysfs.c
643 +@@ -49,13 +49,17 @@ static ssize_t status_show_vhci(int pdev_nr, char *out)
644 +
645 + /*
646 + * output example:
647 +- * port sta spd dev socket local_busid
648 +- * 0000 004 000 00000000 c5a7bb80 1-2.3
649 +- * 0001 004 000 00000000 d8cee980 2-3.4
650 ++ * port sta spd dev sockfd local_busid
651 ++ * 0000 004 000 00000000 000003 1-2.3
652 ++ * 0001 004 000 00000000 000004 2-3.4
653 + *
654 +- * IP address can be retrieved from a socket pointer address by looking
655 +- * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a
656 +- * port number and its peer IP address.
657 ++ * Output includes socket fd instead of socket pointer address to
658 ++ * avoid leaking kernel memory address in:
659 ++ * /sys/devices/platform/vhci_hcd.0/status and in debug output.
660 ++ * The socket pointer address is not used at the moment and it was
661 ++ * made visible as a convenient way to find IP address from socket
662 ++ * pointer address by looking up /proc/net/{tcp,tcp6}. As this opens
663 ++ * a security hole, the change is made to use sockfd instead.
664 + */
665 + for (i = 0; i < VHCI_HC_PORTS; i++) {
666 + struct vhci_device *vdev = &vhci->vdev[i];
667 +@@ -68,13 +72,13 @@ static ssize_t status_show_vhci(int pdev_nr, char *out)
668 + if (vdev->ud.status == VDEV_ST_USED) {
669 + out += sprintf(out, "%03u %08x ",
670 + vdev->speed, vdev->devid);
671 +- out += sprintf(out, "%16p %s",
672 +- vdev->ud.tcp_socket,
673 ++ out += sprintf(out, "%06u %s",
674 ++ vdev->ud.sockfd,
675 + dev_name(&vdev->udev->dev));
676 +
677 + } else {
678 + out += sprintf(out, "000 00000000 ");
679 +- out += sprintf(out, "0000000000000000 0-0");
680 ++ out += sprintf(out, "000000 0-0");
681 + }
682 +
683 + out += sprintf(out, "\n");
684 +@@ -125,7 +129,7 @@ static ssize_t status_show(struct device *dev,
685 + int pdev_nr;
686 +
687 + out += sprintf(out,
688 +- "port sta spd dev socket local_busid\n");
689 ++ "port sta spd dev sockfd local_busid\n");
690 +
691 + pdev_nr = status_name_to_id(attr->attr.name);
692 + if (pdev_nr < 0)
693 +@@ -324,6 +328,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
694 +
695 + vdev->devid = devid;
696 + vdev->speed = speed;
697 ++ vdev->ud.sockfd = sockfd;
698 + vdev->ud.tcp_socket = socket;
699 + vdev->ud.status = VDEV_ST_NOTASSIGNED;
700 +
701 +diff --git a/fs/fcntl.c b/fs/fcntl.c
702 +index 1493ceb0477d..ec03cf620fd7 100644
703 +--- a/fs/fcntl.c
704 ++++ b/fs/fcntl.c
705 +@@ -114,6 +114,10 @@ void f_setown(struct file *filp, unsigned long arg, int force)
706 + int who = arg;
707 + type = PIDTYPE_PID;
708 + if (who < 0) {
709 ++ /* avoid overflow below */
710 ++ if (who == INT_MIN)
711 ++ return;
712 ++
713 + type = PIDTYPE_PGID;
714 + who = -who;
715 + }
716 +diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
717 +index 75f942ae5176..81c018e5c31e 100644
718 +--- a/fs/nfsd/auth.c
719 ++++ b/fs/nfsd/auth.c
720 +@@ -59,10 +59,10 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
721 + gi->gid[i] = exp->ex_anon_gid;
722 + else
723 + gi->gid[i] = rqgi->gid[i];
724 +-
725 +- /* Each thread allocates its own gi, no race */
726 +- groups_sort(gi);
727 + }
728 ++
729 ++ /* Each thread allocates its own gi, no race */
730 ++ groups_sort(gi);
731 + } else {
732 + gi = get_group_info(rqgi);
733 + }
734 +diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
735 +index fe2cbeb90772..939aa066e1ca 100644
736 +--- a/fs/orangefs/devorangefs-req.c
737 ++++ b/fs/orangefs/devorangefs-req.c
738 +@@ -161,7 +161,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
739 + struct orangefs_kernel_op_s *op, *temp;
740 + __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
741 + static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
742 +- struct orangefs_kernel_op_s *cur_op = NULL;
743 ++ struct orangefs_kernel_op_s *cur_op;
744 + unsigned long ret;
745 +
746 + /* We do not support blocking IO. */
747 +@@ -181,6 +181,7 @@ static ssize_t orangefs_devreq_read(struct file *file,
748 + }
749 +
750 + restart:
751 ++ cur_op = NULL;
752 + /* Get next op (if any) from top of list. */
753 + spin_lock(&orangefs_request_list_lock);
754 + list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
755 +diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
756 +index 02cc6139ec90..5b2cbe567365 100644
757 +--- a/fs/orangefs/file.c
758 ++++ b/fs/orangefs/file.c
759 +@@ -446,7 +446,7 @@ ssize_t orangefs_inode_read(struct inode *inode,
760 + static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
761 + {
762 + struct file *file = iocb->ki_filp;
763 +- loff_t pos = *(&iocb->ki_pos);
764 ++ loff_t pos = iocb->ki_pos;
765 + ssize_t rc = 0;
766 +
767 + BUG_ON(iocb->private);
768 +@@ -485,9 +485,6 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
769 + }
770 + }
771 +
772 +- if (file->f_pos > i_size_read(file->f_mapping->host))
773 +- orangefs_i_size_write(file->f_mapping->host, file->f_pos);
774 +-
775 + rc = generic_write_checks(iocb, iter);
776 +
777 + if (rc <= 0) {
778 +@@ -501,7 +498,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite
779 + * pos to the end of the file, so we will wait till now to set
780 + * pos...
781 + */
782 +- pos = *(&iocb->ki_pos);
783 ++ pos = iocb->ki_pos;
784 +
785 + rc = do_readv_writev(ORANGEFS_IO_WRITE,
786 + file,
787 +diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
788 +index 45dd8f27b2ac..f28381a7cd12 100644
789 +--- a/fs/orangefs/orangefs-kernel.h
790 ++++ b/fs/orangefs/orangefs-kernel.h
791 +@@ -570,17 +570,6 @@ do { \
792 + sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE; \
793 + } while (0)
794 +
795 +-static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
796 +-{
797 +-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
798 +- inode_lock(inode);
799 +-#endif
800 +- i_size_write(inode, i_size);
801 +-#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
802 +- inode_unlock(inode);
803 +-#endif
804 +-}
805 +-
806 + static inline void orangefs_set_timeout(struct dentry *dentry)
807 + {
808 + unsigned long time = jiffies + orangefs_dcache_timeout_msecs*HZ/1000;
809 +diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
810 +index abcfa3fa9992..f61b00887481 100644
811 +--- a/fs/orangefs/waitqueue.c
812 ++++ b/fs/orangefs/waitqueue.c
813 +@@ -28,10 +28,10 @@ static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s
814 + */
815 + void purge_waiting_ops(void)
816 + {
817 +- struct orangefs_kernel_op_s *op;
818 ++ struct orangefs_kernel_op_s *op, *tmp;
819 +
820 + spin_lock(&orangefs_request_list_lock);
821 +- list_for_each_entry(op, &orangefs_request_list, list) {
822 ++ list_for_each_entry_safe(op, tmp, &orangefs_request_list, list) {
823 + gossip_debug(GOSSIP_WAIT_DEBUG,
824 + "pvfs2-client-core: purging op tag %llu %s\n",
825 + llu(op->tag),
826 +diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
827 +index dc198bc64c61..edc8ef78b63f 100644
828 +--- a/fs/reiserfs/bitmap.c
829 ++++ b/fs/reiserfs/bitmap.c
830 +@@ -513,9 +513,17 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
831 + "inode has negative prealloc blocks count.");
832 + #endif
833 + while (ei->i_prealloc_count > 0) {
834 +- reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
835 +- ei->i_prealloc_block++;
836 ++ b_blocknr_t block_to_free;
837 ++
838 ++ /*
839 ++ * reiserfs_free_prealloc_block can drop the write lock,
840 ++ * which could allow another caller to free the same block.
841 ++ * We can protect against it by modifying the prealloc
842 ++ * state before calling it.
843 ++ */
844 ++ block_to_free = ei->i_prealloc_block++;
845 + ei->i_prealloc_count--;
846 ++ reiserfs_free_prealloc_block(th, inode, block_to_free);
847 + dirty = 1;
848 + }
849 + if (dirty)
850 +@@ -1128,7 +1136,7 @@ static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
851 + hint->prealloc_size = 0;
852 +
853 + if (!hint->formatted_node && hint->preallocate) {
854 +- if (S_ISREG(hint->inode->i_mode)
855 ++ if (S_ISREG(hint->inode->i_mode) && !IS_PRIVATE(hint->inode)
856 + && hint->inode->i_size >=
857 + REISERFS_SB(hint->th->t_super)->s_alloc_options.
858 + preallocmin * hint->inode->i_sb->s_blocksize)
859 +diff --git a/include/linux/bpf.h b/include/linux/bpf.h
860 +index 75ffd3b2149e..7995940d4187 100644
861 +--- a/include/linux/bpf.h
862 ++++ b/include/linux/bpf.h
863 +@@ -36,7 +36,10 @@ struct bpf_map_ops {
864 + };
865 +
866 + struct bpf_map {
867 +- atomic_t refcnt;
868 ++ /* 1st cacheline with read-mostly members of which some
869 ++ * are also accessed in fast-path (e.g. ops, max_entries).
870 ++ */
871 ++ const struct bpf_map_ops *ops ____cacheline_aligned;
872 + enum bpf_map_type map_type;
873 + u32 key_size;
874 + u32 value_size;
875 +@@ -44,10 +47,15 @@ struct bpf_map {
876 + u32 map_flags;
877 + u32 pages;
878 + bool unpriv_array;
879 +- struct user_struct *user;
880 +- const struct bpf_map_ops *ops;
881 +- struct work_struct work;
882 ++ /* 7 bytes hole */
883 ++
884 ++ /* 2nd cacheline with misc members to avoid false sharing
885 ++ * particularly with refcounting.
886 ++ */
887 ++ struct user_struct *user ____cacheline_aligned;
888 ++ atomic_t refcnt;
889 + atomic_t usercnt;
890 ++ struct work_struct work;
891 + };
892 +
893 + struct bpf_map_type_list {
894 +diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
895 +index 2189935075b4..a951fd10aaaa 100644
896 +--- a/include/linux/cacheinfo.h
897 ++++ b/include/linux/cacheinfo.h
898 +@@ -71,6 +71,7 @@ struct cpu_cacheinfo {
899 + struct cacheinfo *info_list;
900 + unsigned int num_levels;
901 + unsigned int num_leaves;
902 ++ bool cpu_map_populated;
903 + };
904 +
905 + /*
906 +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
907 +index 490f5a83f947..e3d7754f25f0 100644
908 +--- a/include/linux/mmzone.h
909 ++++ b/include/linux/mmzone.h
910 +@@ -633,6 +633,8 @@ typedef struct pglist_data {
911 + int kswapd_order;
912 + enum zone_type kswapd_classzone_idx;
913 +
914 ++ int kswapd_failures; /* Number of 'reclaimed == 0' runs */
915 ++
916 + #ifdef CONFIG_COMPACTION
917 + int kcompactd_max_order;
918 + enum zone_type kcompactd_classzone_idx;
919 +diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
920 +index a3d04934aa96..6f8fbcf10dfb 100644
921 +--- a/include/linux/vermagic.h
922 ++++ b/include/linux/vermagic.h
923 +@@ -24,16 +24,10 @@
924 + #ifndef MODULE_ARCH_VERMAGIC
925 + #define MODULE_ARCH_VERMAGIC ""
926 + #endif
927 +-#ifdef RETPOLINE
928 +-#define MODULE_VERMAGIC_RETPOLINE "retpoline "
929 +-#else
930 +-#define MODULE_VERMAGIC_RETPOLINE ""
931 +-#endif
932 +
933 + #define VERMAGIC_STRING \
934 + UTS_RELEASE " " \
935 + MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
936 + MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
937 +- MODULE_ARCH_VERMAGIC \
938 +- MODULE_VERMAGIC_RETPOLINE
939 ++ MODULE_ARCH_VERMAGIC
940 +
941 +diff --git a/include/net/arp.h b/include/net/arp.h
942 +index 5e0f891d476c..1b3f86981757 100644
943 +--- a/include/net/arp.h
944 ++++ b/include/net/arp.h
945 +@@ -19,6 +19,9 @@ static inline u32 arp_hashfn(const void *pkey, const struct net_device *dev, u32
946 +
947 + static inline struct neighbour *__ipv4_neigh_lookup_noref(struct net_device *dev, u32 key)
948 + {
949 ++ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
950 ++ key = INADDR_ANY;
951 ++
952 + return ___neigh_lookup_noref(&arp_tbl, neigh_key_eq32, arp_hashfn, &key, dev);
953 + }
954 +
955 +diff --git a/include/net/ipv6.h b/include/net/ipv6.h
956 +index 615ce0abba9c..e64210c98c2b 100644
957 +--- a/include/net/ipv6.h
958 ++++ b/include/net/ipv6.h
959 +@@ -290,6 +290,7 @@ int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
960 + int flags);
961 + int ip6_flowlabel_init(void);
962 + void ip6_flowlabel_cleanup(void);
963 ++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np);
964 +
965 + static inline void fl6_sock_release(struct ip6_flowlabel *fl)
966 + {
967 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
968 +index 0940598c002f..23102da24dd9 100644
969 +--- a/include/net/net_namespace.h
970 ++++ b/include/net/net_namespace.h
971 +@@ -213,6 +213,11 @@ int net_eq(const struct net *net1, const struct net *net2)
972 + return net1 == net2;
973 + }
974 +
975 ++static inline int check_net(const struct net *net)
976 ++{
977 ++ return atomic_read(&net->count) != 0;
978 ++}
979 ++
980 + void net_drop_ns(void *);
981 +
982 + #else
983 +@@ -237,6 +242,11 @@ int net_eq(const struct net *net1, const struct net *net2)
984 + return 1;
985 + }
986 +
987 ++static inline int check_net(const struct net *net)
988 ++{
989 ++ return 1;
990 ++}
991 ++
992 + #define net_drop_ns NULL
993 + #endif
994 +
995 +diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
996 +index 1c3154913a39..bc96b14dfb2c 100644
997 +--- a/include/uapi/linux/eventpoll.h
998 ++++ b/include/uapi/linux/eventpoll.h
999 +@@ -26,6 +26,19 @@
1000 + #define EPOLL_CTL_DEL 2
1001 + #define EPOLL_CTL_MOD 3
1002 +
1003 ++/* Epoll event masks */
1004 ++#define EPOLLIN 0x00000001
1005 ++#define EPOLLPRI 0x00000002
1006 ++#define EPOLLOUT 0x00000004
1007 ++#define EPOLLERR 0x00000008
1008 ++#define EPOLLHUP 0x00000010
1009 ++#define EPOLLRDNORM 0x00000040
1010 ++#define EPOLLRDBAND 0x00000080
1011 ++#define EPOLLWRNORM 0x00000100
1012 ++#define EPOLLWRBAND 0x00000200
1013 ++#define EPOLLMSG 0x00000400
1014 ++#define EPOLLRDHUP 0x00002000
1015 ++
1016 + /* Set exclusive wakeup mode for the target file descriptor */
1017 + #define EPOLLEXCLUSIVE (1 << 28)
1018 +
1019 +diff --git a/init/Kconfig b/init/Kconfig
1020 +index 34407f15e6d3..b331feeabda4 100644
1021 +--- a/init/Kconfig
1022 ++++ b/init/Kconfig
1023 +@@ -1609,6 +1609,13 @@ config BPF_SYSCALL
1024 + Enable the bpf() system call that allows to manipulate eBPF
1025 + programs and maps via file descriptors.
1026 +
1027 ++config BPF_JIT_ALWAYS_ON
1028 ++ bool "Permanently enable BPF JIT and remove BPF interpreter"
1029 ++ depends on BPF_SYSCALL && HAVE_EBPF_JIT && BPF_JIT
1030 ++ help
1031 ++ Enables BPF JIT and removes BPF interpreter to avoid
1032 ++ speculative execution of BPF instructions by the interpreter
1033 ++
1034 + config SHMEM
1035 + bool "Use full shmem filesystem" if EXPERT
1036 + default y
1037 +diff --git a/ipc/msg.c b/ipc/msg.c
1038 +index e12307d0c920..ff10d43b5184 100644
1039 +--- a/ipc/msg.c
1040 ++++ b/ipc/msg.c
1041 +@@ -763,7 +763,10 @@ static inline int convert_mode(long *msgtyp, int msgflg)
1042 + if (*msgtyp == 0)
1043 + return SEARCH_ANY;
1044 + if (*msgtyp < 0) {
1045 +- *msgtyp = -*msgtyp;
1046 ++ if (*msgtyp == LONG_MIN) /* -LONG_MIN is undefined */
1047 ++ *msgtyp = LONG_MAX;
1048 ++ else
1049 ++ *msgtyp = -*msgtyp;
1050 + return SEARCH_LESSEQUAL;
1051 + }
1052 + if (msgflg & MSG_EXCEPT)
1053 +diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
1054 +index aa6d98154106..879ca844ba1d 100644
1055 +--- a/kernel/bpf/core.c
1056 ++++ b/kernel/bpf/core.c
1057 +@@ -458,6 +458,7 @@ noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1058 + }
1059 + EXPORT_SYMBOL_GPL(__bpf_call_base);
1060 +
1061 ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1062 + /**
1063 + * __bpf_prog_run - run eBPF program on a given context
1064 + * @ctx: is the data we are operating on
1065 +@@ -641,7 +642,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
1066 + DST = tmp;
1067 + CONT;
1068 + ALU_MOD_X:
1069 +- if (unlikely(SRC == 0))
1070 ++ if (unlikely((u32)SRC == 0))
1071 + return 0;
1072 + tmp = (u32) DST;
1073 + DST = do_div(tmp, (u32) SRC);
1074 +@@ -660,7 +661,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
1075 + DST = div64_u64(DST, SRC);
1076 + CONT;
1077 + ALU_DIV_X:
1078 +- if (unlikely(SRC == 0))
1079 ++ if (unlikely((u32)SRC == 0))
1080 + return 0;
1081 + tmp = (u32) DST;
1082 + do_div(tmp, (u32) SRC);
1083 +@@ -715,7 +716,7 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
1084 + struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2;
1085 + struct bpf_array *array = container_of(map, struct bpf_array, map);
1086 + struct bpf_prog *prog;
1087 +- u64 index = BPF_R3;
1088 ++ u32 index = BPF_R3;
1089 +
1090 + if (unlikely(index >= array->map.max_entries))
1091 + goto out;
1092 +@@ -923,6 +924,13 @@ static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
1093 + }
1094 + STACK_FRAME_NON_STANDARD(__bpf_prog_run); /* jump table */
1095 +
1096 ++#else
1097 ++static unsigned int __bpf_prog_ret0(void *ctx, const struct bpf_insn *insn)
1098 ++{
1099 ++ return 0;
1100 ++}
1101 ++#endif
1102 ++
1103 + bool bpf_prog_array_compatible(struct bpf_array *array,
1104 + const struct bpf_prog *fp)
1105 + {
1106 +@@ -970,7 +978,11 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
1107 + */
1108 + struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1109 + {
1110 ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1111 + fp->bpf_func = (void *) __bpf_prog_run;
1112 ++#else
1113 ++ fp->bpf_func = (void *) __bpf_prog_ret0;
1114 ++#endif
1115 +
1116 + /* eBPF JITs can rewrite the program in case constant
1117 + * blinding is active. However, in case of error during
1118 +@@ -979,6 +991,12 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
1119 + * be JITed, but falls back to the interpreter.
1120 + */
1121 + fp = bpf_int_jit_compile(fp);
1122 ++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
1123 ++ if (!fp->jited) {
1124 ++ *err = -ENOTSUPP;
1125 ++ return fp;
1126 ++ }
1127 ++#endif
1128 + bpf_prog_lock_ro(fp);
1129 +
1130 + /* The tail call compatibility check can only be done at
1131 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1132 +index 19c44cf59bb2..076e4a0ff95e 100644
1133 +--- a/kernel/bpf/verifier.c
1134 ++++ b/kernel/bpf/verifier.c
1135 +@@ -702,6 +702,13 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1136 + return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
1137 + }
1138 +
1139 ++static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1140 ++{
1141 ++ const struct bpf_reg_state *reg = &env->cur_state.regs[regno];
1142 ++
1143 ++ return reg->type == PTR_TO_CTX;
1144 ++}
1145 ++
1146 + static int check_ptr_alignment(struct bpf_verifier_env *env,
1147 + struct bpf_reg_state *reg, int off, int size)
1148 + {
1149 +@@ -896,6 +903,12 @@ static int check_xadd(struct bpf_verifier_env *env, struct bpf_insn *insn)
1150 + return -EACCES;
1151 + }
1152 +
1153 ++ if (is_ctx_reg(env, insn->dst_reg)) {
1154 ++ verbose("BPF_XADD stores into R%d context is not allowed\n",
1155 ++ insn->dst_reg);
1156 ++ return -EACCES;
1157 ++ }
1158 ++
1159 + /* check whether atomic_add can read the memory */
1160 + err = check_mem_access(env, insn->dst_reg, insn->off,
1161 + BPF_SIZE(insn->code), BPF_READ, -1);
1162 +@@ -1843,6 +1856,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1163 + return -EINVAL;
1164 + }
1165 +
1166 ++ if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
1167 ++ verbose("BPF_ARSH not supported for 32 bit ALU\n");
1168 ++ return -EINVAL;
1169 ++ }
1170 ++
1171 + if ((opcode == BPF_LSH || opcode == BPF_RSH ||
1172 + opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
1173 + int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
1174 +@@ -3007,6 +3025,12 @@ static int do_check(struct bpf_verifier_env *env)
1175 + if (err)
1176 + return err;
1177 +
1178 ++ if (is_ctx_reg(env, insn->dst_reg)) {
1179 ++ verbose("BPF_ST stores into R%d context is not allowed\n",
1180 ++ insn->dst_reg);
1181 ++ return -EACCES;
1182 ++ }
1183 ++
1184 + /* check that memory (dst_reg + off) is writeable */
1185 + err = check_mem_access(env, insn->dst_reg, insn->off,
1186 + BPF_SIZE(insn->code), BPF_WRITE,
1187 +@@ -3386,6 +3410,24 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
1188 +
1189 +
1190 + for (i = 0; i < insn_cnt; i++, insn++) {
1191 ++ if (insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
1192 ++ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
1193 ++ /* due to JIT bugs clear upper 32-bits of src register
1194 ++ * before div/mod operation
1195 ++ */
1196 ++ insn_buf[0] = BPF_MOV32_REG(insn->src_reg, insn->src_reg);
1197 ++ insn_buf[1] = *insn;
1198 ++ cnt = 2;
1199 ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
1200 ++ if (!new_prog)
1201 ++ return -ENOMEM;
1202 ++
1203 ++ delta += cnt - 1;
1204 ++ env->prog = prog = new_prog;
1205 ++ insn = new_prog->insnsi + i + delta;
1206 ++ continue;
1207 ++ }
1208 ++
1209 + if (insn->code != (BPF_JMP | BPF_CALL))
1210 + continue;
1211 +
1212 +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
1213 +index eeb7f2f5698d..54fd2fed36e9 100644
1214 +--- a/kernel/time/hrtimer.c
1215 ++++ b/kernel/time/hrtimer.c
1216 +@@ -652,7 +652,9 @@ static void hrtimer_reprogram(struct hrtimer *timer,
1217 + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
1218 + {
1219 + base->expires_next.tv64 = KTIME_MAX;
1220 ++ base->hang_detected = 0;
1221 + base->hres_active = 0;
1222 ++ base->next_timer = NULL;
1223 + }
1224 +
1225 + /*
1226 +@@ -1610,6 +1612,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
1227 + timerqueue_init_head(&cpu_base->clock_base[i].active);
1228 + }
1229 +
1230 ++ cpu_base->active_bases = 0;
1231 + cpu_base->cpu = cpu;
1232 + hrtimer_init_hres(cpu_base);
1233 + return 0;
1234 +diff --git a/lib/test_bpf.c b/lib/test_bpf.c
1235 +index 2e385026915c..98da7520a6aa 100644
1236 +--- a/lib/test_bpf.c
1237 ++++ b/lib/test_bpf.c
1238 +@@ -5646,9 +5646,8 @@ static struct bpf_prog *generate_filter(int which, int *err)
1239 + return NULL;
1240 + }
1241 + }
1242 +- /* We don't expect to fail. */
1243 + if (*err) {
1244 +- pr_cont("FAIL to attach err=%d len=%d\n",
1245 ++ pr_cont("FAIL to prog_create err=%d len=%d\n",
1246 + *err, fprog.len);
1247 + return NULL;
1248 + }
1249 +@@ -5671,6 +5670,10 @@ static struct bpf_prog *generate_filter(int which, int *err)
1250 + * checks.
1251 + */
1252 + fp = bpf_prog_select_runtime(fp, err);
1253 ++ if (*err) {
1254 ++ pr_cont("FAIL to select_runtime err=%d\n", *err);
1255 ++ return NULL;
1256 ++ }
1257 + break;
1258 + }
1259 +
1260 +@@ -5856,8 +5859,8 @@ static __init int test_bpf(void)
1261 + pass_cnt++;
1262 + continue;
1263 + }
1264 +-
1265 +- return err;
1266 ++ err_cnt++;
1267 ++ continue;
1268 + }
1269 +
1270 + pr_cont("jited:%u ", fp->jited);
1271 +diff --git a/mm/cma.c b/mm/cma.c
1272 +index c960459eda7e..397687fc51f9 100644
1273 +--- a/mm/cma.c
1274 ++++ b/mm/cma.c
1275 +@@ -54,7 +54,7 @@ unsigned long cma_get_size(const struct cma *cma)
1276 + }
1277 +
1278 + static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
1279 +- int align_order)
1280 ++ unsigned int align_order)
1281 + {
1282 + if (align_order <= cma->order_per_bit)
1283 + return 0;
1284 +@@ -62,17 +62,14 @@ static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
1285 + }
1286 +
1287 + /*
1288 +- * Find a PFN aligned to the specified order and return an offset represented in
1289 +- * order_per_bits.
1290 ++ * Find the offset of the base PFN from the specified align_order.
1291 ++ * The value returned is represented in order_per_bits.
1292 + */
1293 + static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
1294 +- int align_order)
1295 ++ unsigned int align_order)
1296 + {
1297 +- if (align_order <= cma->order_per_bit)
1298 +- return 0;
1299 +-
1300 +- return (ALIGN(cma->base_pfn, (1UL << align_order))
1301 +- - cma->base_pfn) >> cma->order_per_bit;
1302 ++ return (cma->base_pfn & ((1UL << align_order) - 1))
1303 ++ >> cma->order_per_bit;
1304 + }
1305 +
1306 + static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
1307 +diff --git a/mm/internal.h b/mm/internal.h
1308 +index 34a5459e5989..3e2d01694747 100644
1309 +--- a/mm/internal.h
1310 ++++ b/mm/internal.h
1311 +@@ -73,6 +73,12 @@ static inline void set_page_refcounted(struct page *page)
1312 +
1313 + extern unsigned long highest_memmap_pfn;
1314 +
1315 ++/*
1316 ++ * Maximum number of reclaim retries without progress before the OOM
1317 ++ * killer is consider the only way forward.
1318 ++ */
1319 ++#define MAX_RECLAIM_RETRIES 16
1320 ++
1321 + /*
1322 + * in mm/vmscan.c:
1323 + */
1324 +diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1325 +index 2a800c4a39bd..50088150fc17 100644
1326 +--- a/mm/memcontrol.c
1327 ++++ b/mm/memcontrol.c
1328 +@@ -5531,7 +5531,7 @@ static void uncharge_list(struct list_head *page_list)
1329 + next = page->lru.next;
1330 +
1331 + VM_BUG_ON_PAGE(PageLRU(page), page);
1332 +- VM_BUG_ON_PAGE(page_count(page), page);
1333 ++ VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page);
1334 +
1335 + if (!page->mem_cgroup)
1336 + continue;
1337 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
1338 +index ce7d416edab7..5aa71a82ca73 100644
1339 +--- a/mm/memory-failure.c
1340 ++++ b/mm/memory-failure.c
1341 +@@ -535,6 +535,13 @@ static int delete_from_lru_cache(struct page *p)
1342 + */
1343 + ClearPageActive(p);
1344 + ClearPageUnevictable(p);
1345 ++
1346 ++ /*
1347 ++ * Poisoned page might never drop its ref count to 0 so we have
1348 ++ * to uncharge it manually from its memcg.
1349 ++ */
1350 ++ mem_cgroup_uncharge(p);
1351 ++
1352 + /*
1353 + * drop the page count elevated by isolate_lru_page()
1354 + */
1355 +diff --git a/mm/mmap.c b/mm/mmap.c
1356 +index 5b48adb4aa56..45ac5b973459 100644
1357 +--- a/mm/mmap.c
1358 ++++ b/mm/mmap.c
1359 +@@ -2240,7 +2240,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1360 + gap_addr = TASK_SIZE;
1361 +
1362 + next = vma->vm_next;
1363 +- if (next && next->vm_start < gap_addr) {
1364 ++ if (next && next->vm_start < gap_addr &&
1365 ++ (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
1366 + if (!(next->vm_flags & VM_GROWSUP))
1367 + return -ENOMEM;
1368 + /* Check that both stack segments have the same anon_vma? */
1369 +@@ -2324,7 +2325,8 @@ int expand_downwards(struct vm_area_struct *vma,
1370 + if (gap_addr > address)
1371 + return -ENOMEM;
1372 + prev = vma->vm_prev;
1373 +- if (prev && prev->vm_end > gap_addr) {
1374 ++ if (prev && prev->vm_end > gap_addr &&
1375 ++ (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
1376 + if (!(prev->vm_flags & VM_GROWSDOWN))
1377 + return -ENOMEM;
1378 + /* Check that both stack segments have the same anon_vma? */
1379 +diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1380 +index fbc38888252b..94018ea5f935 100644
1381 +--- a/mm/page_alloc.c
1382 ++++ b/mm/page_alloc.c
1383 +@@ -2821,9 +2821,6 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1384 + if (!area->nr_free)
1385 + continue;
1386 +
1387 +- if (alloc_harder)
1388 +- return true;
1389 +-
1390 + for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
1391 + if (!list_empty(&area->free_list[mt]))
1392 + return true;
1393 +@@ -2835,6 +2832,9 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
1394 + return true;
1395 + }
1396 + #endif
1397 ++ if (alloc_harder &&
1398 ++ !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
1399 ++ return true;
1400 + }
1401 + return false;
1402 + }
1403 +@@ -3421,12 +3421,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
1404 + return false;
1405 + }
1406 +
1407 +-/*
1408 +- * Maximum number of reclaim retries without any progress before OOM killer
1409 +- * is consider as the only way to move forward.
1410 +- */
1411 +-#define MAX_RECLAIM_RETRIES 16
1412 +-
1413 + /*
1414 + * Checks whether it makes sense to retry the reclaim to make a forward progress
1415 + * for the given allocation request.
1416 +@@ -4385,7 +4379,8 @@ void show_free_areas(unsigned int filter)
1417 + K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
1418 + K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
1419 + node_page_state(pgdat, NR_PAGES_SCANNED),
1420 +- !pgdat_reclaimable(pgdat) ? "yes" : "no");
1421 ++ pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
1422 ++ "yes" : "no");
1423 + }
1424 +
1425 + for_each_populated_zone(zone) {
1426 +diff --git a/mm/vmscan.c b/mm/vmscan.c
1427 +index 30a88b945a44..f118dc23f662 100644
1428 +--- a/mm/vmscan.c
1429 ++++ b/mm/vmscan.c
1430 +@@ -2606,6 +2606,15 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
1431 + } while (should_continue_reclaim(pgdat, sc->nr_reclaimed - nr_reclaimed,
1432 + sc->nr_scanned - nr_scanned, sc));
1433 +
1434 ++ /*
1435 ++ * Kswapd gives up on balancing particular nodes after too
1436 ++ * many failures to reclaim anything from them and goes to
1437 ++ * sleep. On reclaim progress, reset the failure counter. A
1438 ++ * successful direct reclaim run will revive a dormant kswapd.
1439 ++ */
1440 ++ if (reclaimable)
1441 ++ pgdat->kswapd_failures = 0;
1442 ++
1443 + return reclaimable;
1444 + }
1445 +
1446 +@@ -2680,10 +2689,6 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
1447 + GFP_KERNEL | __GFP_HARDWALL))
1448 + continue;
1449 +
1450 +- if (sc->priority != DEF_PRIORITY &&
1451 +- !pgdat_reclaimable(zone->zone_pgdat))
1452 +- continue; /* Let kswapd poll it */
1453 +-
1454 + /*
1455 + * If we already have plenty of memory free for
1456 + * compaction in this zone, don't free any more.
1457 +@@ -2820,7 +2825,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
1458 + return 0;
1459 + }
1460 +
1461 +-static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
1462 ++static bool allow_direct_reclaim(pg_data_t *pgdat)
1463 + {
1464 + struct zone *zone;
1465 + unsigned long pfmemalloc_reserve = 0;
1466 +@@ -2828,6 +2833,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
1467 + int i;
1468 + bool wmark_ok;
1469 +
1470 ++ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
1471 ++ return true;
1472 ++
1473 + for (i = 0; i <= ZONE_NORMAL; i++) {
1474 + zone = &pgdat->node_zones[i];
1475 + if (!managed_zone(zone) ||
1476 +@@ -2908,7 +2916,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
1477 +
1478 + /* Throttle based on the first usable node */
1479 + pgdat = zone->zone_pgdat;
1480 +- if (pfmemalloc_watermark_ok(pgdat))
1481 ++ if (allow_direct_reclaim(pgdat))
1482 + goto out;
1483 + break;
1484 + }
1485 +@@ -2930,14 +2938,14 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
1486 + */
1487 + if (!(gfp_mask & __GFP_FS)) {
1488 + wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
1489 +- pfmemalloc_watermark_ok(pgdat), HZ);
1490 ++ allow_direct_reclaim(pgdat), HZ);
1491 +
1492 + goto check_pending;
1493 + }
1494 +
1495 + /* Throttle until kswapd wakes the process */
1496 + wait_event_killable(zone->zone_pgdat->pfmemalloc_wait,
1497 +- pfmemalloc_watermark_ok(pgdat));
1498 ++ allow_direct_reclaim(pgdat));
1499 +
1500 + check_pending:
1501 + if (fatal_signal_pending(current))
1502 +@@ -3116,7 +3124,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
1503 +
1504 + /*
1505 + * The throttled processes are normally woken up in balance_pgdat() as
1506 +- * soon as pfmemalloc_watermark_ok() is true. But there is a potential
1507 ++ * soon as allow_direct_reclaim() is true. But there is a potential
1508 + * race between when kswapd checks the watermarks and a process gets
1509 + * throttled. There is also a potential race if processes get
1510 + * throttled, kswapd wakes, a large process exits thereby balancing the
1511 +@@ -3130,6 +3138,10 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, int classzone_idx)
1512 + if (waitqueue_active(&pgdat->pfmemalloc_wait))
1513 + wake_up_all(&pgdat->pfmemalloc_wait);
1514 +
1515 ++ /* Hopeless node, leave it to direct reclaim */
1516 ++ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
1517 ++ return true;
1518 ++
1519 + for (i = 0; i <= classzone_idx; i++) {
1520 + struct zone *zone = pgdat->node_zones + i;
1521 +
1522 +@@ -3216,9 +3228,9 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
1523 + count_vm_event(PAGEOUTRUN);
1524 +
1525 + do {
1526 ++ unsigned long nr_reclaimed = sc.nr_reclaimed;
1527 + bool raise_priority = true;
1528 +
1529 +- sc.nr_reclaimed = 0;
1530 + sc.reclaim_idx = classzone_idx;
1531 +
1532 + /*
1533 +@@ -3297,7 +3309,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
1534 + * able to safely make forward progress. Wake them
1535 + */
1536 + if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
1537 +- pfmemalloc_watermark_ok(pgdat))
1538 ++ allow_direct_reclaim(pgdat))
1539 + wake_up_all(&pgdat->pfmemalloc_wait);
1540 +
1541 + /* Check if kswapd should be suspending */
1542 +@@ -3308,10 +3320,14 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
1543 + * Raise priority if scanning rate is too low or there was no
1544 + * progress in reclaiming pages
1545 + */
1546 +- if (raise_priority || !sc.nr_reclaimed)
1547 ++ nr_reclaimed = sc.nr_reclaimed - nr_reclaimed;
1548 ++ if (raise_priority || !nr_reclaimed)
1549 + sc.priority--;
1550 + } while (sc.priority >= 1);
1551 +
1552 ++ if (!sc.nr_reclaimed)
1553 ++ pgdat->kswapd_failures++;
1554 ++
1555 + out:
1556 + /*
1557 + * Return the order kswapd stopped reclaiming at as
1558 +@@ -3511,6 +3527,10 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
1559 + if (!waitqueue_active(&pgdat->kswapd_wait))
1560 + return;
1561 +
1562 ++ /* Hopeless node, leave it to direct reclaim */
1563 ++ if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
1564 ++ return;
1565 ++
1566 + /* Only wake kswapd if all zones are unbalanced */
1567 + for (z = 0; z <= classzone_idx; z++) {
1568 + zone = pgdat->node_zones + z;
1569 +@@ -3781,9 +3801,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
1570 + sum_zone_node_page_state(pgdat->node_id, NR_SLAB_RECLAIMABLE) <= pgdat->min_slab_pages)
1571 + return NODE_RECLAIM_FULL;
1572 +
1573 +- if (!pgdat_reclaimable(pgdat))
1574 +- return NODE_RECLAIM_FULL;
1575 +-
1576 + /*
1577 + * Do not scan if the allocation should not be delayed.
1578 + */
1579 +diff --git a/mm/vmstat.c b/mm/vmstat.c
1580 +index 6a088df04b29..3863b5d6d598 100644
1581 +--- a/mm/vmstat.c
1582 ++++ b/mm/vmstat.c
1583 +@@ -1421,7 +1421,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1584 + "\n node_unreclaimable: %u"
1585 + "\n start_pfn: %lu"
1586 + "\n node_inactive_ratio: %u",
1587 +- !pgdat_reclaimable(zone->zone_pgdat),
1588 ++ pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES,
1589 + zone->zone_start_pfn,
1590 + zone->zone_pgdat->inactive_ratio);
1591 + seq_putc(m, '\n');
1592 +diff --git a/net/can/af_can.c b/net/can/af_can.c
1593 +index 5488e4a6ccd0..ac1552d8b4ad 100644
1594 +--- a/net/can/af_can.c
1595 ++++ b/net/can/af_can.c
1596 +@@ -722,13 +722,12 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
1597 + if (unlikely(!net_eq(dev_net(dev), &init_net)))
1598 + goto drop;
1599 +
1600 +- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
1601 +- skb->len != CAN_MTU ||
1602 +- cfd->len > CAN_MAX_DLEN,
1603 +- "PF_CAN: dropped non conform CAN skbuf: "
1604 +- "dev type %d, len %d, datalen %d\n",
1605 +- dev->type, skb->len, cfd->len))
1606 ++ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CAN_MTU ||
1607 ++ cfd->len > CAN_MAX_DLEN)) {
1608 ++ pr_warn_once("PF_CAN: dropped non conform CAN skbuf: dev type %d, len %d, datalen %d\n",
1609 ++ dev->type, skb->len, cfd->len);
1610 + goto drop;
1611 ++ }
1612 +
1613 + can_receive(skb, dev);
1614 + return NET_RX_SUCCESS;
1615 +@@ -746,13 +745,12 @@ static int canfd_rcv(struct sk_buff *skb, struct net_device *dev,
1616 + if (unlikely(!net_eq(dev_net(dev), &init_net)))
1617 + goto drop;
1618 +
1619 +- if (WARN_ONCE(dev->type != ARPHRD_CAN ||
1620 +- skb->len != CANFD_MTU ||
1621 +- cfd->len > CANFD_MAX_DLEN,
1622 +- "PF_CAN: dropped non conform CAN FD skbuf: "
1623 +- "dev type %d, len %d, datalen %d\n",
1624 +- dev->type, skb->len, cfd->len))
1625 ++ if (unlikely(dev->type != ARPHRD_CAN || skb->len != CANFD_MTU ||
1626 ++ cfd->len > CANFD_MAX_DLEN)) {
1627 ++ pr_warn_once("PF_CAN: dropped non conform CAN FD skbuf: dev type %d, len %d, datalen %d\n",
1628 ++ dev->type, skb->len, cfd->len);
1629 + goto drop;
1630 ++ }
1631 +
1632 + can_receive(skb, dev);
1633 + return NET_RX_SUCCESS;
1634 +diff --git a/net/core/dev.c b/net/core/dev.c
1635 +index 09007a71c8dd..67b5d4d8acb1 100644
1636 +--- a/net/core/dev.c
1637 ++++ b/net/core/dev.c
1638 +@@ -3083,10 +3083,21 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
1639 + hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
1640 +
1641 + /* + transport layer */
1642 +- if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
1643 +- hdr_len += tcp_hdrlen(skb);
1644 +- else
1645 +- hdr_len += sizeof(struct udphdr);
1646 ++ if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
1647 ++ const struct tcphdr *th;
1648 ++ struct tcphdr _tcphdr;
1649 ++
1650 ++ th = skb_header_pointer(skb, skb_transport_offset(skb),
1651 ++ sizeof(_tcphdr), &_tcphdr);
1652 ++ if (likely(th))
1653 ++ hdr_len += __tcp_hdrlen(th);
1654 ++ } else {
1655 ++ struct udphdr _udphdr;
1656 ++
1657 ++ if (skb_header_pointer(skb, skb_transport_offset(skb),
1658 ++ sizeof(_udphdr), &_udphdr))
1659 ++ hdr_len += sizeof(struct udphdr);
1660 ++ }
1661 +
1662 + if (shinfo->gso_type & SKB_GSO_DODGY)
1663 + gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
1664 +diff --git a/net/core/filter.c b/net/core/filter.c
1665 +index 4eb4ce0aeef4..e8c89d2d2bc0 100644
1666 +--- a/net/core/filter.c
1667 ++++ b/net/core/filter.c
1668 +@@ -441,6 +441,10 @@ static int bpf_convert_filter(struct sock_filter *prog, int len,
1669 + convert_bpf_extensions(fp, &insn))
1670 + break;
1671 +
1672 ++ if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
1673 ++ fp->code == (BPF_ALU | BPF_MOD | BPF_X))
1674 ++ *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
1675 ++
1676 + *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
1677 + break;
1678 +
1679 +@@ -1005,11 +1009,9 @@ static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1680 + */
1681 + goto out_err_free;
1682 +
1683 +- /* We are guaranteed to never error here with cBPF to eBPF
1684 +- * transitions, since there's no issue with type compatibility
1685 +- * checks on program arrays.
1686 +- */
1687 + fp = bpf_prog_select_runtime(fp, &err);
1688 ++ if (err)
1689 ++ goto out_err_free;
1690 +
1691 + kfree(old_prog);
1692 + return fp;
1693 +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1694 +index 32e4e0158846..862d63ec56e4 100644
1695 +--- a/net/core/flow_dissector.c
1696 ++++ b/net/core/flow_dissector.c
1697 +@@ -550,8 +550,8 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
1698 + out_good:
1699 + ret = true;
1700 +
1701 +- key_control->thoff = (u16)nhoff;
1702 + out:
1703 ++ key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1704 + key_basic->n_proto = proto;
1705 + key_basic->ip_proto = ip_proto;
1706 +
1707 +@@ -559,7 +559,6 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
1708 +
1709 + out_bad:
1710 + ret = false;
1711 +- key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen);
1712 + goto out;
1713 + }
1714 + EXPORT_SYMBOL(__skb_flow_dissect);
1715 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1716 +index f45f6198851f..7b315663f840 100644
1717 +--- a/net/core/neighbour.c
1718 ++++ b/net/core/neighbour.c
1719 +@@ -496,7 +496,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
1720 + if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
1721 + nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
1722 +
1723 +- hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
1724 ++ hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
1725 +
1726 + if (n->parms->dead) {
1727 + rc = ERR_PTR(-EINVAL);
1728 +@@ -508,7 +508,7 @@ struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
1729 + n1 != NULL;
1730 + n1 = rcu_dereference_protected(n1->next,
1731 + lockdep_is_held(&tbl->lock))) {
1732 +- if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
1733 ++ if (dev == n1->dev && !memcmp(n1->primary_key, n->primary_key, key_len)) {
1734 + if (want_ref)
1735 + neigh_hold(n1);
1736 + rc = n1;
1737 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
1738 +index a7f05f0130e8..1b4619008c4e 100644
1739 +--- a/net/core/sysctl_net_core.c
1740 ++++ b/net/core/sysctl_net_core.c
1741 +@@ -292,7 +292,13 @@ static struct ctl_table net_core_table[] = {
1742 + .data = &bpf_jit_enable,
1743 + .maxlen = sizeof(int),
1744 + .mode = 0644,
1745 ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
1746 + .proc_handler = proc_dointvec
1747 ++#else
1748 ++ .proc_handler = proc_dointvec_minmax,
1749 ++ .extra1 = &one,
1750 ++ .extra2 = &one,
1751 ++#endif
1752 + },
1753 + # ifdef CONFIG_HAVE_EBPF_JIT
1754 + {
1755 +diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
1756 +index 5e3a7302f774..7753681195c1 100644
1757 +--- a/net/dccp/ccids/ccid2.c
1758 ++++ b/net/dccp/ccids/ccid2.c
1759 +@@ -140,6 +140,9 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
1760 +
1761 + ccid2_pr_debug("RTO_EXPIRE\n");
1762 +
1763 ++ if (sk->sk_state == DCCP_CLOSED)
1764 ++ goto out;
1765 ++
1766 + /* back-off timer */
1767 + hc->tx_rto <<= 1;
1768 + if (hc->tx_rto > DCCP_RTO_MAX)
1769 +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
1770 +index 51b27ae09fbd..e60517eb1c3a 100644
1771 +--- a/net/ipv4/arp.c
1772 ++++ b/net/ipv4/arp.c
1773 +@@ -223,11 +223,16 @@ static bool arp_key_eq(const struct neighbour *neigh, const void *pkey)
1774 +
1775 + static int arp_constructor(struct neighbour *neigh)
1776 + {
1777 +- __be32 addr = *(__be32 *)neigh->primary_key;
1778 ++ __be32 addr;
1779 + struct net_device *dev = neigh->dev;
1780 + struct in_device *in_dev;
1781 + struct neigh_parms *parms;
1782 ++ u32 inaddr_any = INADDR_ANY;
1783 +
1784 ++ if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT))
1785 ++ memcpy(neigh->primary_key, &inaddr_any, arp_tbl.key_len);
1786 ++
1787 ++ addr = *(__be32 *)neigh->primary_key;
1788 + rcu_read_lock();
1789 + in_dev = __in_dev_get_rcu(dev);
1790 + if (!in_dev) {
1791 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1792 +index 7bff0c65046f..9c7a4cea1628 100644
1793 +--- a/net/ipv4/igmp.c
1794 ++++ b/net/ipv4/igmp.c
1795 +@@ -332,7 +332,7 @@ static __be32 igmpv3_get_srcaddr(struct net_device *dev,
1796 + return htonl(INADDR_ANY);
1797 +
1798 + for_ifa(in_dev) {
1799 +- if (inet_ifa_match(fl4->saddr, ifa))
1800 ++ if (fl4->saddr == ifa->ifa_local)
1801 + return fl4->saddr;
1802 + } endfor_ifa(in_dev);
1803 +
1804 +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1805 +index 05d2bde00864..7efa6b062049 100644
1806 +--- a/net/ipv4/tcp.c
1807 ++++ b/net/ipv4/tcp.c
1808 +@@ -2215,6 +2215,9 @@ void tcp_close(struct sock *sk, long timeout)
1809 + tcp_send_active_reset(sk, GFP_ATOMIC);
1810 + __NET_INC_STATS(sock_net(sk),
1811 + LINUX_MIB_TCPABORTONMEMORY);
1812 ++ } else if (!check_net(sock_net(sk))) {
1813 ++ /* Not possible to send reset; just close */
1814 ++ tcp_set_state(sk, TCP_CLOSE);
1815 + }
1816 + }
1817 +
1818 +diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
1819 +index bc68da38ea86..366b1becff9d 100644
1820 +--- a/net/ipv4/tcp_offload.c
1821 ++++ b/net/ipv4/tcp_offload.c
1822 +@@ -32,6 +32,9 @@ static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
1823 + static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
1824 + netdev_features_t features)
1825 + {
1826 ++ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
1827 ++ return ERR_PTR(-EINVAL);
1828 ++
1829 + if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1830 + return ERR_PTR(-EINVAL);
1831 +
1832 +diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1833 +index 74db43b47917..69523389f067 100644
1834 +--- a/net/ipv4/tcp_timer.c
1835 ++++ b/net/ipv4/tcp_timer.c
1836 +@@ -50,11 +50,19 @@ static void tcp_write_err(struct sock *sk)
1837 + * to prevent DoS attacks. It is called when a retransmission timeout
1838 + * or zero probe timeout occurs on orphaned socket.
1839 + *
1840 ++ * Also close if our net namespace is exiting; in that case there is no
1841 ++ * hope of ever communicating again since all netns interfaces are already
1842 ++ * down (or about to be down), and we need to release our dst references,
1843 ++ * which have been moved to the netns loopback interface, so the namespace
1844 ++ * can finish exiting. This condition is only possible if we are a kernel
1845 ++ * socket, as those do not hold references to the namespace.
1846 ++ *
1847 + * Criteria is still not confirmed experimentally and may change.
1848 + * We kill the socket, if:
1849 + * 1. If number of orphaned sockets exceeds an administratively configured
1850 + * limit.
1851 + * 2. If we have strong memory pressure.
1852 ++ * 3. If our net namespace is exiting.
1853 + */
1854 + static int tcp_out_of_resources(struct sock *sk, bool do_reset)
1855 + {
1856 +@@ -83,6 +91,13 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
1857 + __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
1858 + return 1;
1859 + }
1860 ++
1861 ++ if (!check_net(sock_net(sk))) {
1862 ++ /* Not possible to send reset; just close */
1863 ++ tcp_done(sk);
1864 ++ return 1;
1865 ++ }
1866 ++
1867 + return 0;
1868 + }
1869 +
1870 +diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
1871 +index 6401574cd638..f4f616eaaeb8 100644
1872 +--- a/net/ipv4/udp_offload.c
1873 ++++ b/net/ipv4/udp_offload.c
1874 +@@ -205,6 +205,9 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
1875 + goto out;
1876 + }
1877 +
1878 ++ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
1879 ++ goto out;
1880 ++
1881 + if (!pskb_may_pull(skb, sizeof(struct udphdr)))
1882 + goto out;
1883 +
1884 +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
1885 +index c46066c5dc27..db2613b4a049 100644
1886 +--- a/net/ipv6/ip6_gre.c
1887 ++++ b/net/ipv6/ip6_gre.c
1888 +@@ -337,11 +337,12 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
1889 +
1890 + nt->dev = dev;
1891 + nt->net = dev_net(dev);
1892 +- ip6gre_tnl_link_config(nt, 1);
1893 +
1894 + if (register_netdevice(dev) < 0)
1895 + goto failed_free;
1896 +
1897 ++ ip6gre_tnl_link_config(nt, 1);
1898 ++
1899 + /* Can use a lockless transmit, unless we generate output sequences */
1900 + if (!(nt->parms.o_flags & TUNNEL_SEQ))
1901 + dev->features |= NETIF_F_LLTX;
1902 +@@ -1263,7 +1264,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
1903 +
1904 + static int ip6gre_tap_init(struct net_device *dev)
1905 + {
1906 +- struct ip6_tnl *tunnel;
1907 + int ret;
1908 +
1909 + ret = ip6gre_tunnel_init_common(dev);
1910 +@@ -1272,10 +1272,6 @@ static int ip6gre_tap_init(struct net_device *dev)
1911 +
1912 + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1913 +
1914 +- tunnel = netdev_priv(dev);
1915 +-
1916 +- ip6gre_tnl_link_config(tunnel, 1);
1917 +-
1918 + return 0;
1919 + }
1920 +
1921 +@@ -1370,7 +1366,6 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1922 +
1923 + nt->dev = dev;
1924 + nt->net = dev_net(dev);
1925 +- ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1926 +
1927 + dev->features |= GRE6_FEATURES;
1928 + dev->hw_features |= GRE6_FEATURES;
1929 +@@ -1396,6 +1391,11 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
1930 + if (err)
1931 + goto out;
1932 +
1933 ++ ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
1934 ++
1935 ++ if (tb[IFLA_MTU])
1936 ++ ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
1937 ++
1938 + dev_hold(dev);
1939 + ip6gre_tunnel_link(ign, nt);
1940 +
1941 +diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1942 +index 388584b8ff31..2e3db3619858 100644
1943 +--- a/net/ipv6/ip6_output.c
1944 ++++ b/net/ipv6/ip6_output.c
1945 +@@ -156,7 +156,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
1946 + !(IP6CB(skb)->flags & IP6SKB_REROUTED));
1947 + }
1948 +
1949 +-static bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
1950 ++bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
1951 + {
1952 + if (!np->autoflowlabel_set)
1953 + return ip6_default_np_autolabel(net);
1954 +@@ -1260,14 +1260,16 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
1955 + v6_cork->tclass = ipc6->tclass;
1956 + if (rt->dst.flags & DST_XFRM_TUNNEL)
1957 + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1958 +- rt->dst.dev->mtu : dst_mtu(&rt->dst);
1959 ++ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(&rt->dst);
1960 + else
1961 + mtu = np->pmtudisc >= IPV6_PMTUDISC_PROBE ?
1962 +- rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1963 ++ READ_ONCE(rt->dst.dev->mtu) : dst_mtu(rt->dst.path);
1964 + if (np->frag_size < mtu) {
1965 + if (np->frag_size)
1966 + mtu = np->frag_size;
1967 + }
1968 ++ if (mtu < IPV6_MIN_MTU)
1969 ++ return -EINVAL;
1970 + cork->base.fragsize = mtu;
1971 + if (dst_allfrag(rt->dst.path))
1972 + cork->base.flags |= IPCORK_ALLFRAG;
1973 +@@ -1798,6 +1800,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
1974 + cork.base.flags = 0;
1975 + cork.base.addr = 0;
1976 + cork.base.opt = NULL;
1977 ++ cork.base.dst = NULL;
1978 + v6_cork.opt = NULL;
1979 + err = ip6_setup_cork(sk, &cork, &v6_cork, ipc6, rt, fl6);
1980 + if (err) {
1981 +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
1982 +index 6e3871c7f8f7..bcea985dd76b 100644
1983 +--- a/net/ipv6/ipv6_sockglue.c
1984 ++++ b/net/ipv6/ipv6_sockglue.c
1985 +@@ -1316,7 +1316,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1986 + break;
1987 +
1988 + case IPV6_AUTOFLOWLABEL:
1989 +- val = np->autoflowlabel;
1990 ++ val = ip6_autoflowlabel(sock_net(sk), np);
1991 + break;
1992 +
1993 + default:
1994 +diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c
1995 +index d883c9204c01..278e49cd67d4 100644
1996 +--- a/net/ipv6/tcpv6_offload.c
1997 ++++ b/net/ipv6/tcpv6_offload.c
1998 +@@ -46,6 +46,9 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
1999 + {
2000 + struct tcphdr *th;
2001 +
2002 ++ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
2003 ++ return ERR_PTR(-EINVAL);
2004 ++
2005 + if (!pskb_may_pull(skb, sizeof(*th)))
2006 + return ERR_PTR(-EINVAL);
2007 +
2008 +diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
2009 +index e7d378c032cb..2bd2087bd105 100644
2010 +--- a/net/ipv6/udp_offload.c
2011 ++++ b/net/ipv6/udp_offload.c
2012 +@@ -55,6 +55,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
2013 + const struct ipv6hdr *ipv6h;
2014 + struct udphdr *uh;
2015 +
2016 ++ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
2017 ++ goto out;
2018 ++
2019 + if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2020 + goto out;
2021 +
2022 +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
2023 +index 28d065394c09..3f499126727c 100644
2024 +--- a/net/netfilter/nfnetlink_cthelper.c
2025 ++++ b/net/netfilter/nfnetlink_cthelper.c
2026 +@@ -17,6 +17,7 @@
2027 + #include <linux/types.h>
2028 + #include <linux/list.h>
2029 + #include <linux/errno.h>
2030 ++#include <linux/capability.h>
2031 + #include <net/netlink.h>
2032 + #include <net/sock.h>
2033 +
2034 +@@ -392,6 +393,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl,
2035 + struct nfnl_cthelper *nlcth;
2036 + int ret = 0;
2037 +
2038 ++ if (!capable(CAP_NET_ADMIN))
2039 ++ return -EPERM;
2040 ++
2041 + if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE])
2042 + return -EINVAL;
2043 +
2044 +@@ -595,6 +599,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl,
2045 + struct nfnl_cthelper *nlcth;
2046 + bool tuple_set = false;
2047 +
2048 ++ if (!capable(CAP_NET_ADMIN))
2049 ++ return -EPERM;
2050 ++
2051 + if (nlh->nlmsg_flags & NLM_F_DUMP) {
2052 + struct netlink_dump_control c = {
2053 + .dump = nfnl_cthelper_dump_table,
2054 +@@ -661,6 +668,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
2055 + struct nfnl_cthelper *nlcth, *n;
2056 + int j = 0, ret;
2057 +
2058 ++ if (!capable(CAP_NET_ADMIN))
2059 ++ return -EPERM;
2060 ++
2061 + if (tb[NFCTH_NAME])
2062 + helper_name = nla_data(tb[NFCTH_NAME]);
2063 +
2064 +diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
2065 +index 2455b69b5810..b589a62e68a2 100644
2066 +--- a/net/netfilter/xt_osf.c
2067 ++++ b/net/netfilter/xt_osf.c
2068 +@@ -19,6 +19,7 @@
2069 + #include <linux/module.h>
2070 + #include <linux/kernel.h>
2071 +
2072 ++#include <linux/capability.h>
2073 + #include <linux/if.h>
2074 + #include <linux/inetdevice.h>
2075 + #include <linux/ip.h>
2076 +@@ -69,6 +70,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl,
2077 + struct xt_osf_finger *kf = NULL, *sf;
2078 + int err = 0;
2079 +
2080 ++ if (!capable(CAP_NET_ADMIN))
2081 ++ return -EPERM;
2082 ++
2083 + if (!osf_attrs[OSF_ATTR_FINGER])
2084 + return -EINVAL;
2085 +
2086 +@@ -113,6 +117,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl,
2087 + struct xt_osf_finger *sf;
2088 + int err = -ENOENT;
2089 +
2090 ++ if (!capable(CAP_NET_ADMIN))
2091 ++ return -EPERM;
2092 ++
2093 + if (!osf_attrs[OSF_ATTR_FINGER])
2094 + return -EINVAL;
2095 +
2096 +diff --git a/net/sctp/offload.c b/net/sctp/offload.c
2097 +index 4f5a2b580aa5..6300f28c9588 100644
2098 +--- a/net/sctp/offload.c
2099 ++++ b/net/sctp/offload.c
2100 +@@ -44,6 +44,9 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
2101 + struct sk_buff *segs = ERR_PTR(-EINVAL);
2102 + struct sctphdr *sh;
2103 +
2104 ++ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_SCTP))
2105 ++ goto out;
2106 ++
2107 + sh = sctp_hdr(skb);
2108 + if (!pskb_may_pull(skb, sizeof(*sh)))
2109 + goto out;
2110 +diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2111 +index 7181ce6c62bf..c472b8391dde 100644
2112 +--- a/net/sctp/socket.c
2113 ++++ b/net/sctp/socket.c
2114 +@@ -83,7 +83,7 @@
2115 + static int sctp_writeable(struct sock *sk);
2116 + static void sctp_wfree(struct sk_buff *skb);
2117 + static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
2118 +- size_t msg_len, struct sock **orig_sk);
2119 ++ size_t msg_len);
2120 + static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
2121 + static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
2122 + static int sctp_wait_for_accept(struct sock *sk, long timeo);
2123 +@@ -332,16 +332,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
2124 + if (len < sizeof (struct sockaddr))
2125 + return NULL;
2126 +
2127 ++ if (!opt->pf->af_supported(addr->sa.sa_family, opt))
2128 ++ return NULL;
2129 ++
2130 + /* V4 mapped address are really of AF_INET family */
2131 + if (addr->sa.sa_family == AF_INET6 &&
2132 +- ipv6_addr_v4mapped(&addr->v6.sin6_addr)) {
2133 +- if (!opt->pf->af_supported(AF_INET, opt))
2134 +- return NULL;
2135 +- } else {
2136 +- /* Does this PF support this AF? */
2137 +- if (!opt->pf->af_supported(addr->sa.sa_family, opt))
2138 +- return NULL;
2139 +- }
2140 ++ ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
2141 ++ !opt->pf->af_supported(AF_INET, opt))
2142 ++ return NULL;
2143 +
2144 + /* If we get this far, af is valid. */
2145 + af = sctp_get_af_specific(addr->sa.sa_family);
2146 +@@ -1958,7 +1956,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
2147 + timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
2148 + if (!sctp_wspace(asoc)) {
2149 + /* sk can be changed by peel off when waiting for buf. */
2150 +- err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
2151 ++ err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
2152 + if (err) {
2153 + if (err == -ESRCH) {
2154 + /* asoc is already dead. */
2155 +@@ -7441,12 +7439,12 @@ void sctp_sock_rfree(struct sk_buff *skb)
2156 +
2157 + /* Helper function to wait for space in the sndbuf. */
2158 + static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
2159 +- size_t msg_len, struct sock **orig_sk)
2160 ++ size_t msg_len)
2161 + {
2162 + struct sock *sk = asoc->base.sk;
2163 +- int err = 0;
2164 + long current_timeo = *timeo_p;
2165 + DEFINE_WAIT(wait);
2166 ++ int err = 0;
2167 +
2168 + pr_debug("%s: asoc:%p, timeo:%ld, msg_len:%zu\n", __func__, asoc,
2169 + *timeo_p, msg_len);
2170 +@@ -7475,17 +7473,13 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
2171 + release_sock(sk);
2172 + current_timeo = schedule_timeout(current_timeo);
2173 + lock_sock(sk);
2174 +- if (sk != asoc->base.sk) {
2175 +- release_sock(sk);
2176 +- sk = asoc->base.sk;
2177 +- lock_sock(sk);
2178 +- }
2179 ++ if (sk != asoc->base.sk)
2180 ++ goto do_error;
2181 +
2182 + *timeo_p = current_timeo;
2183 + }
2184 +
2185 + out:
2186 +- *orig_sk = sk;
2187 + finish_wait(&asoc->wait, &wait);
2188 +
2189 + /* Release the association's refcnt. */
2190 +diff --git a/net/socket.c b/net/socket.c
2191 +index 05f13b24572c..bd3b33988ee0 100644
2192 +--- a/net/socket.c
2193 ++++ b/net/socket.c
2194 +@@ -2548,6 +2548,15 @@ static int __init sock_init(void)
2195 +
2196 + core_initcall(sock_init); /* early initcall */
2197 +
2198 ++static int __init jit_init(void)
2199 ++{
2200 ++#ifdef CONFIG_BPF_JIT_ALWAYS_ON
2201 ++ bpf_jit_enable = 1;
2202 ++#endif
2203 ++ return 0;
2204 ++}
2205 ++pure_initcall(jit_init);
2206 ++
2207 + #ifdef CONFIG_PROC_FS
2208 + void socket_seq_show(struct seq_file *seq)
2209 + {
2210 +diff --git a/net/tipc/node.c b/net/tipc/node.c
2211 +index 27753325e06e..5b3e1ea37b6d 100644
2212 +--- a/net/tipc/node.c
2213 ++++ b/net/tipc/node.c
2214 +@@ -1848,36 +1848,38 @@ int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2215 +
2216 + if (strcmp(name, tipc_bclink_name) == 0) {
2217 + err = tipc_nl_add_bc_link(net, &msg);
2218 +- if (err) {
2219 +- nlmsg_free(msg.skb);
2220 +- return err;
2221 +- }
2222 ++ if (err)
2223 ++ goto err_free;
2224 + } else {
2225 + int bearer_id;
2226 + struct tipc_node *node;
2227 + struct tipc_link *link;
2228 +
2229 + node = tipc_node_find_by_name(net, name, &bearer_id);
2230 +- if (!node)
2231 +- return -EINVAL;
2232 ++ if (!node) {
2233 ++ err = -EINVAL;
2234 ++ goto err_free;
2235 ++ }
2236 +
2237 + tipc_node_read_lock(node);
2238 + link = node->links[bearer_id].link;
2239 + if (!link) {
2240 + tipc_node_read_unlock(node);
2241 +- nlmsg_free(msg.skb);
2242 +- return -EINVAL;
2243 ++ err = -EINVAL;
2244 ++ goto err_free;
2245 + }
2246 +
2247 + err = __tipc_nl_add_link(net, &msg, link, 0);
2248 + tipc_node_read_unlock(node);
2249 +- if (err) {
2250 +- nlmsg_free(msg.skb);
2251 +- return err;
2252 +- }
2253 ++ if (err)
2254 ++ goto err_free;
2255 + }
2256 +
2257 + return genlmsg_reply(msg.skb, info);
2258 ++
2259 ++err_free:
2260 ++ nlmsg_free(msg.skb);
2261 ++ return err;
2262 + }
2263 +
2264 + int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2265 +diff --git a/tools/usb/usbip/libsrc/usbip_common.c b/tools/usb/usbip/libsrc/usbip_common.c
2266 +index ac73710473de..1517a232ab18 100644
2267 +--- a/tools/usb/usbip/libsrc/usbip_common.c
2268 ++++ b/tools/usb/usbip/libsrc/usbip_common.c
2269 +@@ -215,9 +215,16 @@ int read_usb_interface(struct usbip_usb_device *udev, int i,
2270 + struct usbip_usb_interface *uinf)
2271 + {
2272 + char busid[SYSFS_BUS_ID_SIZE];
2273 ++ int size;
2274 + struct udev_device *sif;
2275 +
2276 +- sprintf(busid, "%s:%d.%d", udev->busid, udev->bConfigurationValue, i);
2277 ++ size = snprintf(busid, sizeof(busid), "%s:%d.%d",
2278 ++ udev->busid, udev->bConfigurationValue, i);
2279 ++ if (size < 0 || (unsigned int)size >= sizeof(busid)) {
2280 ++ err("busid length %i >= %lu or < 0", size,
2281 ++ (long unsigned)sizeof(busid));
2282 ++ return -1;
2283 ++ }
2284 +
2285 + sif = udev_device_new_from_subsystem_sysname(udev_context, "usb", busid);
2286 + if (!sif) {
2287 +diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
2288 +index 9d415228883d..6ff7b601f854 100644
2289 +--- a/tools/usb/usbip/libsrc/usbip_host_common.c
2290 ++++ b/tools/usb/usbip/libsrc/usbip_host_common.c
2291 +@@ -40,13 +40,20 @@ struct udev *udev_context;
2292 + static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
2293 + {
2294 + char status_attr_path[SYSFS_PATH_MAX];
2295 ++ int size;
2296 + int fd;
2297 + int length;
2298 + char status;
2299 + int value = 0;
2300 +
2301 +- snprintf(status_attr_path, SYSFS_PATH_MAX, "%s/usbip_status",
2302 +- udev->path);
2303 ++ size = snprintf(status_attr_path, sizeof(status_attr_path),
2304 ++ "%s/usbip_status", udev->path);
2305 ++ if (size < 0 || (unsigned int)size >= sizeof(status_attr_path)) {
2306 ++ err("usbip_status path length %i >= %lu or < 0", size,
2307 ++ (long unsigned)sizeof(status_attr_path));
2308 ++ return -1;
2309 ++ }
2310 ++
2311 +
2312 + fd = open(status_attr_path, O_RDONLY);
2313 + if (fd < 0) {
2314 +@@ -218,6 +225,7 @@ int usbip_export_device(struct usbip_exported_device *edev, int sockfd)
2315 + {
2316 + char attr_name[] = "usbip_sockfd";
2317 + char sockfd_attr_path[SYSFS_PATH_MAX];
2318 ++ int size;
2319 + char sockfd_buff[30];
2320 + int ret;
2321 +
2322 +@@ -237,10 +245,20 @@ int usbip_export_device(struct usbip_exported_device *edev, int sockfd)
2323 + }
2324 +
2325 + /* only the first interface is true */
2326 +- snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
2327 +- edev->udev.path, attr_name);
2328 ++ size = snprintf(sockfd_attr_path, sizeof(sockfd_attr_path), "%s/%s",
2329 ++ edev->udev.path, attr_name);
2330 ++ if (size < 0 || (unsigned int)size >= sizeof(sockfd_attr_path)) {
2331 ++ err("exported device path length %i >= %lu or < 0", size,
2332 ++ (long unsigned)sizeof(sockfd_attr_path));
2333 ++ return -1;
2334 ++ }
2335 +
2336 +- snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
2337 ++ size = snprintf(sockfd_buff, sizeof(sockfd_buff), "%d\n", sockfd);
2338 ++ if (size < 0 || (unsigned int)size >= sizeof(sockfd_buff)) {
2339 ++ err("socket length %i >= %lu or < 0", size,
2340 ++ (long unsigned)sizeof(sockfd_buff));
2341 ++ return -1;
2342 ++ }
2343 +
2344 + ret = write_sysfs_attribute(sockfd_attr_path, sockfd_buff,
2345 + strlen(sockfd_buff));
2346 +diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
2347 +index ad9204773533..1274f326242c 100644
2348 +--- a/tools/usb/usbip/libsrc/vhci_driver.c
2349 ++++ b/tools/usb/usbip/libsrc/vhci_driver.c
2350 +@@ -55,12 +55,12 @@ static int parse_status(const char *value)
2351 +
2352 + while (*c != '\0') {
2353 + int port, status, speed, devid;
2354 +- unsigned long socket;
2355 ++ int sockfd;
2356 + char lbusid[SYSFS_BUS_ID_SIZE];
2357 +
2358 +- ret = sscanf(c, "%d %d %d %x %lx %31s\n",
2359 ++ ret = sscanf(c, "%d %d %d %x %u %31s\n",
2360 + &port, &status, &speed,
2361 +- &devid, &socket, lbusid);
2362 ++ &devid, &sockfd, lbusid);
2363 +
2364 + if (ret < 5) {
2365 + dbg("sscanf failed: %d", ret);
2366 +@@ -69,7 +69,7 @@ static int parse_status(const char *value)
2367 +
2368 + dbg("port %d status %d speed %d devid %x",
2369 + port, status, speed, devid);
2370 +- dbg("socket %lx lbusid %s", socket, lbusid);
2371 ++ dbg("sockfd %u lbusid %s", sockfd, lbusid);
2372 +
2373 +
2374 + /* if a device is connected, look at it */
2375 +diff --git a/tools/usb/usbip/src/usbip.c b/tools/usb/usbip/src/usbip.c
2376 +index d7599d943529..73d8eee8130b 100644
2377 +--- a/tools/usb/usbip/src/usbip.c
2378 ++++ b/tools/usb/usbip/src/usbip.c
2379 +@@ -176,6 +176,8 @@ int main(int argc, char *argv[])
2380 + break;
2381 + case '?':
2382 + printf("usbip: invalid option\n");
2383 ++ /* Terminate after printing error */
2384 ++ /* FALLTHRU */
2385 + default:
2386 + usbip_usage();
2387 + goto out;