Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.14 commit in: /
Date: Mon, 05 Sep 2022 12:06:56
Message-Id: 1662379602.7eeb8020c0893360ca00fa632e9a81c67d78b57b.mpagano@gentoo
1 commit: 7eeb8020c0893360ca00fa632e9a81c67d78b57b
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 5 12:06:42 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 5 12:06:42 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7eeb8020
7
8 Linux patch 4.14.292
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1291_linux-4.14.292.patch | 1371 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 1375 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 772d458f..0994044d 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1211,6 +1211,10 @@ Patch: 1290_linux-4.14.291.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.14.291
23
24 +Patch: 1291_linux-4.14.292.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.14.292
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1291_linux-4.14.292.patch b/1291_linux-4.14.292.patch
33 new file mode 100644
34 index 00000000..fb4ec90b
35 --- /dev/null
36 +++ b/1291_linux-4.14.292.patch
37 @@ -0,0 +1,1371 @@
38 +diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
39 +index 9393c50b5afc9..c98fd11907cc8 100644
40 +--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
41 ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
42 +@@ -230,6 +230,20 @@ The possible values in this file are:
43 + * - 'Mitigation: Clear CPU buffers'
44 + - The processor is vulnerable and the CPU buffer clearing mitigation is
45 + enabled.
46 ++ * - 'Unknown: No mitigations'
47 ++ - The processor vulnerability status is unknown because it is
48 ++ out of Servicing period. Mitigation is not attempted.
49 ++
50 ++Definitions:
51 ++------------
52 ++
53 ++Servicing period: The process of providing functional and security updates to
54 ++Intel processors or platforms, utilizing the Intel Platform Update (IPU)
55 ++process or other similar mechanisms.
56 ++
57 ++End of Servicing Updates (ESU): ESU is the date at which Intel will no
58 ++longer provide Servicing, such as through IPU or other similar update
59 ++processes. ESU dates will typically be aligned to end of quarter.
60 +
61 + If the processor is vulnerable then the following information is appended to
62 + the above information:
63 +diff --git a/Makefile b/Makefile
64 +index d5e2bea38d6cb..7fa724cacd233 100644
65 +--- a/Makefile
66 ++++ b/Makefile
67 +@@ -1,7 +1,7 @@
68 + # SPDX-License-Identifier: GPL-2.0
69 + VERSION = 4
70 + PATCHLEVEL = 14
71 +-SUBLEVEL = 291
72 ++SUBLEVEL = 292
73 + EXTRAVERSION =
74 + NAME = Petit Gorille
75 +
76 +diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
77 +index 5a77dc775cc3c..9494e35bf3a2a 100644
78 +--- a/arch/arm64/include/asm/mmu.h
79 ++++ b/arch/arm64/include/asm/mmu.h
80 +@@ -91,7 +91,7 @@ extern void init_mem_pgprot(void);
81 + extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
82 + unsigned long virt, phys_addr_t size,
83 + pgprot_t prot, bool page_mappings_only);
84 +-extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
85 ++extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
86 + extern void mark_linear_text_alias_ro(void);
87 +
88 + #endif /* !__ASSEMBLY__ */
89 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
90 +index ae72782860949..17fa1d363fff2 100644
91 +--- a/arch/arm64/kernel/kaslr.c
92 ++++ b/arch/arm64/kernel/kaslr.c
93 +@@ -65,9 +65,6 @@ out:
94 + return default_cmdline;
95 + }
96 +
97 +-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
98 +- pgprot_t prot);
99 +-
100 + /*
101 + * This routine will be executed with the kernel mapped at its default virtual
102 + * address, and if it returns successfully, the kernel will be remapped, and
103 +@@ -96,7 +93,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
104 + * attempt at mapping the FDT in setup_machine()
105 + */
106 + early_fixmap_init();
107 +- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
108 ++ fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
109 + if (!fdt)
110 + return 0;
111 +
112 +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
113 +index d4b740538ad57..01b15d9dd8d62 100644
114 +--- a/arch/arm64/kernel/setup.c
115 ++++ b/arch/arm64/kernel/setup.c
116 +@@ -179,9 +179,13 @@ static void __init smp_build_mpidr_hash(void)
117 +
118 + static void __init setup_machine_fdt(phys_addr_t dt_phys)
119 + {
120 +- void *dt_virt = fixmap_remap_fdt(dt_phys);
121 ++ int size;
122 ++ void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
123 + const char *name;
124 +
125 ++ if (dt_virt)
126 ++ memblock_reserve(dt_phys, size);
127 ++
128 + if (!dt_virt || !early_init_dt_scan(dt_virt)) {
129 + pr_crit("\n"
130 + "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
131 +@@ -193,6 +197,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
132 + cpu_relax();
133 + }
134 +
135 ++ /* Early fixups are done, map the FDT as read-only now */
136 ++ fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
137 ++
138 + name = of_flat_dt_get_machine_name();
139 + if (!name)
140 + return;
141 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
142 +index 4d472907194dd..ce8c57d70e5fc 100644
143 +--- a/arch/arm64/mm/mmu.c
144 ++++ b/arch/arm64/mm/mmu.c
145 +@@ -836,7 +836,7 @@ void __set_fixmap(enum fixed_addresses idx,
146 + }
147 + }
148 +
149 +-void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
150 ++void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
151 + {
152 + const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
153 + int offset;
154 +@@ -889,19 +889,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
155 + return dt_virt;
156 + }
157 +
158 +-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
159 +-{
160 +- void *dt_virt;
161 +- int size;
162 +-
163 +- dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
164 +- if (!dt_virt)
165 +- return NULL;
166 +-
167 +- memblock_reserve(dt_phys, size);
168 +- return dt_virt;
169 +-}
170 +-
171 + int __init arch_ioremap_pud_supported(void)
172 + {
173 + /*
174 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
175 +index b3c19ab2485ce..5a20f3eb3f859 100644
176 +--- a/arch/parisc/kernel/unaligned.c
177 ++++ b/arch/parisc/kernel/unaligned.c
178 +@@ -121,7 +121,7 @@
179 + #define R1(i) (((i)>>21)&0x1f)
180 + #define R2(i) (((i)>>16)&0x1f)
181 + #define R3(i) ((i)&0x1f)
182 +-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
183 ++#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
184 + #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
185 + #define IM5_2(i) IM((i)>>16,5)
186 + #define IM5_3(i) IM((i),5)
187 +diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
188 +index be8cc53204b50..46338c65c75bf 100644
189 +--- a/arch/s390/hypfs/hypfs_diag.c
190 ++++ b/arch/s390/hypfs/hypfs_diag.c
191 +@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
192 + int rc;
193 +
194 + if (diag204_probe()) {
195 +- pr_err("The hardware system does not support hypfs\n");
196 ++ pr_info("The hardware system does not support hypfs\n");
197 + return -ENODATA;
198 + }
199 + if (diag204_info_type == DIAG204_INFO_EXT) {
200 +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
201 +index 32f5b3fb069f3..2a34c075fef66 100644
202 +--- a/arch/s390/hypfs/inode.c
203 ++++ b/arch/s390/hypfs/inode.c
204 +@@ -494,9 +494,9 @@ fail_hypfs_vm_exit:
205 + hypfs_vm_exit();
206 + fail_hypfs_diag_exit:
207 + hypfs_diag_exit();
208 ++ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
209 + fail_dbfs_exit:
210 + hypfs_dbfs_exit();
211 +- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
212 + return rc;
213 + }
214 + device_initcall(hypfs_init)
215 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
216 +index 40f1888bc4ab7..8456e941d4838 100644
217 +--- a/arch/s390/mm/fault.c
218 ++++ b/arch/s390/mm/fault.c
219 +@@ -433,7 +433,9 @@ static inline int do_exception(struct pt_regs *regs, int access)
220 + flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
221 + if (user_mode(regs))
222 + flags |= FAULT_FLAG_USER;
223 +- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
224 ++ if ((trans_exc_code & store_indication) == 0x400)
225 ++ access = VM_WRITE;
226 ++ if (access == VM_WRITE)
227 + flags |= FAULT_FLAG_WRITE;
228 + down_read(&mm->mmap_sem);
229 +
230 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
231 +index e874b1709d9a2..d56634d6b10cf 100644
232 +--- a/arch/x86/include/asm/cpufeatures.h
233 ++++ b/arch/x86/include/asm/cpufeatures.h
234 +@@ -394,5 +394,6 @@
235 + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
236 + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
237 + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
238 ++#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
239 +
240 + #endif /* _ASM_X86_CPUFEATURES_H */
241 +diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
242 +index 98823250a5211..05d2d7169ab8f 100644
243 +--- a/arch/x86/include/asm/intel-family.h
244 ++++ b/arch/x86/include/asm/intel-family.h
245 +@@ -71,6 +71,9 @@
246 + #define INTEL_FAM6_ALDERLAKE 0x97
247 + #define INTEL_FAM6_ALDERLAKE_L 0x9A
248 +
249 ++#define INTEL_FAM6_TIGERLAKE_L 0x8C
250 ++#define INTEL_FAM6_TIGERLAKE 0x8D
251 ++
252 + /* "Small Core" Processors (Atom) */
253 +
254 + #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
255 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
256 +index 4037317f55e7c..68056ee5dff9f 100644
257 +--- a/arch/x86/kernel/cpu/bugs.c
258 ++++ b/arch/x86/kernel/cpu/bugs.c
259 +@@ -396,7 +396,8 @@ static void __init mmio_select_mitigation(void)
260 + u64 ia32_cap;
261 +
262 + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
263 +- cpu_mitigations_off()) {
264 ++ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
265 ++ cpu_mitigations_off()) {
266 + mmio_mitigation = MMIO_MITIGATION_OFF;
267 + return;
268 + }
269 +@@ -501,6 +502,8 @@ out:
270 + pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
271 + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
272 + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
273 ++ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
274 ++ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
275 + }
276 +
277 + static void __init md_clear_select_mitigation(void)
278 +@@ -1823,6 +1826,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
279 +
280 + static ssize_t mmio_stale_data_show_state(char *buf)
281 + {
282 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
283 ++ return sysfs_emit(buf, "Unknown: No mitigations\n");
284 ++
285 + if (mmio_mitigation == MMIO_MITIGATION_OFF)
286 + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
287 +
288 +@@ -1933,6 +1939,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
289 + return srbds_show_state(buf);
290 +
291 + case X86_BUG_MMIO_STALE_DATA:
292 ++ case X86_BUG_MMIO_UNKNOWN:
293 + return mmio_stale_data_show_state(buf);
294 +
295 + default:
296 +@@ -1989,6 +1996,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
297 +
298 + ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
299 + {
300 +- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
301 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
302 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
303 ++ else
304 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
305 + }
306 + #endif
307 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
308 +index 608f37ac9c7bd..e72a21c207724 100644
309 +--- a/arch/x86/kernel/cpu/common.c
310 ++++ b/arch/x86/kernel/cpu/common.c
311 +@@ -905,6 +905,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
312 + #define MSBDS_ONLY BIT(5)
313 + #define NO_SWAPGS BIT(6)
314 + #define NO_ITLB_MULTIHIT BIT(7)
315 ++#define NO_MMIO BIT(8)
316 +
317 + #define VULNWL(_vendor, _family, _model, _whitelist) \
318 + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
319 +@@ -922,6 +923,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
320 + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
321 +
322 + /* Intel Family 6 */
323 ++ VULNWL_INTEL(TIGERLAKE, NO_MMIO),
324 ++ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
325 ++ VULNWL_INTEL(ALDERLAKE, NO_MMIO),
326 ++ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
327 ++
328 + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
329 + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
330 + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
331 +@@ -939,9 +945,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
332 +
333 + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
334 +
335 +- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
336 +- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
337 +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
338 ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
339 ++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
340 ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
341 +
342 + /*
343 + * Technically, swapgs isn't serializing on AMD (despite it previously
344 +@@ -954,13 +960,13 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
345 + VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT),
346 +
347 + /* AMD Family 0xf - 0x12 */
348 +- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
349 +- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
350 +- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
351 +- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
352 ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
353 ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
354 ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
355 ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
356 +
357 + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
358 +- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
359 ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
360 + {}
361 + };
362 +
363 +@@ -1100,10 +1106,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
364 + * Affected CPU list is generally enough to enumerate the vulnerability,
365 + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
366 + * not want the guest to enumerate the bug.
367 ++ *
368 ++ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
369 ++ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
370 + */
371 +- if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
372 +- !arch_cap_mmio_immune(ia32_cap))
373 +- setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
374 ++ if (!arch_cap_mmio_immune(ia32_cap)) {
375 ++ if (cpu_matches(cpu_vuln_blacklist, MMIO))
376 ++ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
377 ++ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
378 ++ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
379 ++ }
380 +
381 + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
382 + return;
383 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
384 +index 4c115c1e9209d..6acb174c9a12d 100644
385 +--- a/drivers/block/loop.c
386 ++++ b/drivers/block/loop.c
387 +@@ -1212,6 +1212,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
388 + info->lo_number = lo->lo_number;
389 + info->lo_offset = lo->lo_offset;
390 + info->lo_sizelimit = lo->lo_sizelimit;
391 ++
392 ++ /* loff_t vars have been assigned __u64 */
393 ++ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
394 ++ return -EOVERFLOW;
395 ++
396 + info->lo_flags = lo->lo_flags;
397 + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
398 + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
399 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
400 +index 5243c41208191..7b3135c9c7848 100644
401 +--- a/drivers/hid/hidraw.c
402 ++++ b/drivers/hid/hidraw.c
403 +@@ -354,10 +354,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
404 + unsigned int minor = iminor(inode);
405 + struct hidraw_list *list = file->private_data;
406 + unsigned long flags;
407 ++ int i;
408 +
409 + mutex_lock(&minors_lock);
410 +
411 + spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
412 ++ for (i = list->tail; i < list->head; i++)
413 ++ kfree(list->buffer[i].value);
414 + list_del(&list->node);
415 + spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
416 + kfree(list);
417 +diff --git a/drivers/md/md.c b/drivers/md/md.c
418 +index 36d4cc1d74293..72f64ec88602f 100644
419 +--- a/drivers/md/md.c
420 ++++ b/drivers/md/md.c
421 +@@ -5908,6 +5908,7 @@ void md_stop(struct mddev *mddev)
422 + /* stop the array and free an attached data structures.
423 + * This is called from dm-raid
424 + */
425 ++ __md_stop_writes(mddev);
426 + __md_stop(mddev);
427 + if (mddev->bio_set)
428 + bioset_free(mddev->bio_set);
429 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
430 +index 4b0d44e253968..0abe50f1965aa 100644
431 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
432 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
433 +@@ -2604,6 +2604,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
434 + del_timer_sync(&hdw->encoder_run_timer);
435 + del_timer_sync(&hdw->encoder_wait_timer);
436 + flush_work(&hdw->workpoll);
437 ++ v4l2_device_unregister(&hdw->v4l2_dev);
438 + usb_free_urb(hdw->ctl_read_urb);
439 + usb_free_urb(hdw->ctl_write_urb);
440 + kfree(hdw->ctl_read_buffer);
441 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
442 +index b3eaef31b7673..a6bb7e915f74f 100644
443 +--- a/drivers/net/bonding/bond_3ad.c
444 ++++ b/drivers/net/bonding/bond_3ad.c
445 +@@ -1977,30 +1977,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
446 + */
447 + void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
448 + {
449 +- /* check that the bond is not initialized yet */
450 +- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
451 +- bond->dev->dev_addr)) {
452 +-
453 +- BOND_AD_INFO(bond).aggregator_identifier = 0;
454 +-
455 +- BOND_AD_INFO(bond).system.sys_priority =
456 +- bond->params.ad_actor_sys_prio;
457 +- if (is_zero_ether_addr(bond->params.ad_actor_system))
458 +- BOND_AD_INFO(bond).system.sys_mac_addr =
459 +- *((struct mac_addr *)bond->dev->dev_addr);
460 +- else
461 +- BOND_AD_INFO(bond).system.sys_mac_addr =
462 +- *((struct mac_addr *)bond->params.ad_actor_system);
463 ++ BOND_AD_INFO(bond).aggregator_identifier = 0;
464 ++ BOND_AD_INFO(bond).system.sys_priority =
465 ++ bond->params.ad_actor_sys_prio;
466 ++ if (is_zero_ether_addr(bond->params.ad_actor_system))
467 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
468 ++ *((struct mac_addr *)bond->dev->dev_addr);
469 ++ else
470 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
471 ++ *((struct mac_addr *)bond->params.ad_actor_system);
472 +
473 +- /* initialize how many times this module is called in one
474 +- * second (should be about every 100ms)
475 +- */
476 +- ad_ticks_per_sec = tick_resolution;
477 ++ /* initialize how many times this module is called in one
478 ++ * second (should be about every 100ms)
479 ++ */
480 ++ ad_ticks_per_sec = tick_resolution;
481 +
482 +- bond_3ad_initiate_agg_selection(bond,
483 +- AD_AGGREGATOR_SELECTION_TIMER *
484 +- ad_ticks_per_sec);
485 +- }
486 ++ bond_3ad_initiate_agg_selection(bond,
487 ++ AD_AGGREGATOR_SELECTION_TIMER *
488 ++ ad_ticks_per_sec);
489 + }
490 +
491 + /**
492 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
493 +index 86d6924a2b714..ad51b521e693a 100644
494 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
495 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
496 +@@ -1090,7 +1090,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
497 + struct cyclecounter cc;
498 + unsigned long flags;
499 + u32 incval = 0;
500 +- u32 tsauxc = 0;
501 + u32 fuse0 = 0;
502 +
503 + /* For some of the boards below this mask is technically incorrect.
504 +@@ -1125,18 +1124,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
505 + case ixgbe_mac_x550em_a:
506 + case ixgbe_mac_X550:
507 + cc.read = ixgbe_ptp_read_X550;
508 +-
509 +- /* enable SYSTIME counter */
510 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
511 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
512 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
513 +- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
514 +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
515 +- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
516 +- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
517 +- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
518 +-
519 +- IXGBE_WRITE_FLUSH(hw);
520 + break;
521 + case ixgbe_mac_X540:
522 + cc.read = ixgbe_ptp_read_82599;
523 +@@ -1168,6 +1155,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
524 + spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
525 + }
526 +
527 ++/**
528 ++ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
529 ++ * @adapter: the ixgbe private board structure
530 ++ *
531 ++ * Initialize and start the SYSTIME registers.
532 ++ */
533 ++static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
534 ++{
535 ++ struct ixgbe_hw *hw = &adapter->hw;
536 ++ u32 tsauxc;
537 ++
538 ++ switch (hw->mac.type) {
539 ++ case ixgbe_mac_X550EM_x:
540 ++ case ixgbe_mac_x550em_a:
541 ++ case ixgbe_mac_X550:
542 ++ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
543 ++
544 ++ /* Reset SYSTIME registers to 0 */
545 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
546 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
547 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
548 ++
549 ++ /* Reset interrupt settings */
550 ++ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
551 ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
552 ++
553 ++ /* Activate the SYSTIME counter */
554 ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
555 ++ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
556 ++ break;
557 ++ case ixgbe_mac_X540:
558 ++ case ixgbe_mac_82599EB:
559 ++ /* Reset SYSTIME registers to 0 */
560 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
561 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
562 ++ break;
563 ++ default:
564 ++ /* Other devices aren't supported */
565 ++ return;
566 ++ };
567 ++
568 ++ IXGBE_WRITE_FLUSH(hw);
569 ++}
570 ++
571 + /**
572 + * ixgbe_ptp_reset
573 + * @adapter: the ixgbe private board structure
574 +@@ -1194,6 +1225,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
575 +
576 + ixgbe_ptp_start_cyclecounter(adapter);
577 +
578 ++ ixgbe_ptp_init_systime(adapter);
579 ++
580 + spin_lock_irqsave(&adapter->tmreg_lock, flags);
581 + timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
582 + ktime_to_ns(ktime_get_real()));
583 +diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
584 +index 0bcc07f346c3e..2e517e30c5ac1 100644
585 +--- a/drivers/net/ipvlan/ipvtap.c
586 ++++ b/drivers/net/ipvlan/ipvtap.c
587 +@@ -193,7 +193,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
588 + .notifier_call = ipvtap_device_event,
589 + };
590 +
591 +-static int ipvtap_init(void)
592 ++static int __init ipvtap_init(void)
593 + {
594 + int err;
595 +
596 +@@ -227,7 +227,7 @@ out1:
597 + }
598 + module_init(ipvtap_init);
599 +
600 +-static void ipvtap_exit(void)
601 ++static void __exit ipvtap_exit(void)
602 + {
603 + rtnl_link_unregister(&ipvtap_link_ops);
604 + unregister_netdevice_notifier(&ipvtap_notifier_block);
605 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
606 +index 34d9148d27660..c57f91f484235 100644
607 +--- a/drivers/pinctrl/pinctrl-amd.c
608 ++++ b/drivers/pinctrl/pinctrl-amd.c
609 +@@ -753,6 +753,7 @@ int amd_gpio_suspend(struct device *dev)
610 + struct platform_device *pdev = to_platform_device(dev);
611 + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
612 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
613 ++ unsigned long flags;
614 + int i;
615 +
616 + for (i = 0; i < desc->npins; i++) {
617 +@@ -761,7 +762,9 @@ int amd_gpio_suspend(struct device *dev)
618 + if (!amd_gpio_should_save(gpio_dev, pin))
619 + continue;
620 +
621 +- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
622 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
623 ++ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
624 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
625 + }
626 +
627 + return 0;
628 +@@ -772,6 +775,7 @@ int amd_gpio_resume(struct device *dev)
629 + struct platform_device *pdev = to_platform_device(dev);
630 + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
631 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
632 ++ unsigned long flags;
633 + int i;
634 +
635 + for (i = 0; i < desc->npins; i++) {
636 +@@ -780,7 +784,10 @@ int amd_gpio_resume(struct device *dev)
637 + if (!amd_gpio_should_save(gpio_dev, pin))
638 + continue;
639 +
640 +- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
641 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
642 ++ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
643 ++ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
644 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
645 + }
646 +
647 + return 0;
648 +diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
649 +index bd6c2f5f6095d..a5375b09415a6 100644
650 +--- a/drivers/video/fbdev/pm2fb.c
651 ++++ b/drivers/video/fbdev/pm2fb.c
652 +@@ -614,6 +614,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
653 + return -EINVAL;
654 + }
655 +
656 ++ if (!var->pixclock) {
657 ++ DPRINTK("pixclock is zero\n");
658 ++ return -EINVAL;
659 ++ }
660 ++
661 + if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
662 + DPRINTK("pixclock too high (%ldKHz)\n",
663 + PICOS2KHZ(var->pixclock));
664 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
665 +index 2c7e53f9ff1b9..3b9c06ee472b2 100644
666 +--- a/fs/btrfs/xattr.c
667 ++++ b/fs/btrfs/xattr.c
668 +@@ -378,6 +378,9 @@ static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
669 + struct dentry *unused, struct inode *inode,
670 + const char *name, void *buffer, size_t size)
671 + {
672 ++ if (btrfs_root_readonly(BTRFS_I(inode)->root))
673 ++ return -EROFS;
674 ++
675 + name = xattr_full_name(handler, name);
676 + return __btrfs_getxattr(inode, name, buffer, size);
677 + }
678 +diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
679 +index 6d95769310842..adb680f18d818 100644
680 +--- a/include/asm-generic/sections.h
681 ++++ b/include/asm-generic/sections.h
682 +@@ -92,7 +92,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
683 + /**
684 + * memory_intersects - checks if the region occupied by an object intersects
685 + * with another memory region
686 +- * @begin: virtual address of the beginning of the memory regien
687 ++ * @begin: virtual address of the beginning of the memory region
688 + * @end: virtual address of the end of the memory region
689 + * @virt: virtual address of the memory object
690 + * @size: size of the memory object
691 +@@ -105,7 +105,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
692 + {
693 + void *vend = virt + size;
694 +
695 +- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
696 ++ if (virt < end && vend > begin)
697 ++ return true;
698 ++
699 ++ return false;
700 + }
701 +
702 + /**
703 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
704 +index 0773b5a032f1f..f014aee2f7184 100644
705 +--- a/include/linux/netfilter_bridge/ebtables.h
706 ++++ b/include/linux/netfilter_bridge/ebtables.h
707 +@@ -98,10 +98,6 @@ struct ebt_table {
708 + struct ebt_replace_kernel *table;
709 + unsigned int valid_hooks;
710 + rwlock_t lock;
711 +- /* e.g. could be the table explicitly only allows certain
712 +- * matches, targets, ... 0 == let it in */
713 +- int (*check)(const struct ebt_table_info *info,
714 +- unsigned int valid_hooks);
715 + /* the data used by the kernel */
716 + struct ebt_table_info *private;
717 + struct module *me;
718 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h
719 +index 91ccae9467164..c80bd129e9399 100644
720 +--- a/include/linux/rmap.h
721 ++++ b/include/linux/rmap.h
722 +@@ -39,12 +39,15 @@ struct anon_vma {
723 + atomic_t refcount;
724 +
725 + /*
726 +- * Count of child anon_vmas and VMAs which points to this anon_vma.
727 ++ * Count of child anon_vmas. Equals to the count of all anon_vmas that
728 ++ * have ->parent pointing to this one, including itself.
729 + *
730 + * This counter is used for making decision about reusing anon_vma
731 + * instead of forking new one. See comments in function anon_vma_clone.
732 + */
733 +- unsigned degree;
734 ++ unsigned long num_children;
735 ++ /* Count of VMAs whose ->anon_vma pointer points to this object. */
736 ++ unsigned long num_active_vmas;
737 +
738 + struct anon_vma *parent; /* Parent of this anon_vma */
739 +
740 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
741 +index 5dd22b740f9ce..4a9fc96317a9e 100644
742 +--- a/include/net/busy_poll.h
743 ++++ b/include/net/busy_poll.h
744 +@@ -43,7 +43,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
745 +
746 + static inline bool net_busy_loop_on(void)
747 + {
748 +- return sysctl_net_busy_poll;
749 ++ return READ_ONCE(sysctl_net_busy_poll);
750 + }
751 +
752 + static inline bool sk_can_busy_loop(const struct sock *sk)
753 +diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
754 +index 52f368b6561e9..1520962b840cb 100644
755 +--- a/kernel/audit_fsnotify.c
756 ++++ b/kernel/audit_fsnotify.c
757 +@@ -111,6 +111,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
758 +
759 + ret = fsnotify_add_mark(&audit_mark->mark, inode, NULL, true);
760 + if (ret < 0) {
761 ++ audit_mark->path = NULL;
762 + fsnotify_put_mark(&audit_mark->mark);
763 + audit_mark = ERR_PTR(ret);
764 + }
765 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
766 +index b8e14aa6d4967..384b083f2a7ef 100644
767 +--- a/kernel/kprobes.c
768 ++++ b/kernel/kprobes.c
769 +@@ -1687,12 +1687,14 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
770 + /* Try to disarm and disable this/parent probe */
771 + if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
772 + /*
773 +- * If kprobes_all_disarmed is set, orig_p
774 +- * should have already been disarmed, so
775 +- * skip unneed disarming process.
776 ++ * Don't be lazy here. Even if 'kprobes_all_disarmed'
777 ++ * is false, 'orig_p' might not have been armed yet.
778 ++ * Note arm_all_kprobes() __tries__ to arm all kprobes
779 ++ * on the best effort basis.
780 + */
781 +- if (!kprobes_all_disarmed)
782 ++ if (!kprobes_all_disarmed && !kprobe_disabled(orig_p))
783 + disarm_kprobe(orig_p, true);
784 ++
785 + orig_p->flags |= KPROBE_FLAG_DISABLED;
786 + }
787 + }
788 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
789 +index 7d734b4144fd7..4da64244f83df 100644
790 +--- a/kernel/trace/ftrace.c
791 ++++ b/kernel/trace/ftrace.c
792 +@@ -2818,6 +2818,16 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
793 +
794 + ftrace_startup_enable(command);
795 +
796 ++ /*
797 ++ * If ftrace is in an undefined state, we just remove ops from list
798 ++ * to prevent the NULL pointer, instead of totally rolling it back and
799 ++ * free trampoline, because those actions could cause further damage.
800 ++ */
801 ++ if (unlikely(ftrace_disabled)) {
802 ++ __unregister_ftrace_function(ops);
803 ++ return -ENODEV;
804 ++ }
805 ++
806 + ops->flags &= ~FTRACE_OPS_FL_ADDING;
807 +
808 + return 0;
809 +diff --git a/lib/ratelimit.c b/lib/ratelimit.c
810 +index d01f471352390..b805702de84dd 100644
811 +--- a/lib/ratelimit.c
812 ++++ b/lib/ratelimit.c
813 +@@ -27,10 +27,16 @@
814 + */
815 + int ___ratelimit(struct ratelimit_state *rs, const char *func)
816 + {
817 ++ /* Paired with WRITE_ONCE() in .proc_handler().
818 ++ * Changing two values seperately could be inconsistent
819 ++ * and some message could be lost. (See: net_ratelimit_state).
820 ++ */
821 ++ int interval = READ_ONCE(rs->interval);
822 ++ int burst = READ_ONCE(rs->burst);
823 + unsigned long flags;
824 + int ret;
825 +
826 +- if (!rs->interval)
827 ++ if (!interval)
828 + return 1;
829 +
830 + /*
831 +@@ -45,7 +51,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
832 + if (!rs->begin)
833 + rs->begin = jiffies;
834 +
835 +- if (time_is_before_jiffies(rs->begin + rs->interval)) {
836 ++ if (time_is_before_jiffies(rs->begin + interval)) {
837 + if (rs->missed) {
838 + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
839 + printk_deferred(KERN_WARNING
840 +@@ -57,7 +63,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
841 + rs->begin = jiffies;
842 + rs->printed = 0;
843 + }
844 +- if (rs->burst && rs->burst > rs->printed) {
845 ++ if (burst && burst > rs->printed) {
846 + rs->printed++;
847 + ret = 1;
848 + } else {
849 +diff --git a/mm/mmap.c b/mm/mmap.c
850 +index a29d5b1fa1a18..17caf44807dee 100644
851 +--- a/mm/mmap.c
852 ++++ b/mm/mmap.c
853 +@@ -1598,8 +1598,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
854 + pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
855 + return 0;
856 +
857 +- /* Do we need to track softdirty? */
858 +- if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
859 ++ /*
860 ++ * Do we need to track softdirty? hugetlb does not support softdirty
861 ++ * tracking yet.
862 ++ */
863 ++ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
864 ++ !is_vm_hugetlb_page(vma))
865 + return 1;
866 +
867 + /* Specialty mapping? */
868 +@@ -2525,6 +2529,18 @@ static void unmap_region(struct mm_struct *mm,
869 + tlb_gather_mmu(&tlb, mm, start, end);
870 + update_hiwater_rss(mm);
871 + unmap_vmas(&tlb, vma, start, end);
872 ++
873 ++ /*
874 ++ * Ensure we have no stale TLB entries by the time this mapping is
875 ++ * removed from the rmap.
876 ++ * Note that we don't have to worry about nested flushes here because
877 ++ * we're holding the mm semaphore for removing the mapping - so any
878 ++ * concurrent flush in this region has to be coming through the rmap,
879 ++ * and we synchronize against that using the rmap lock.
880 ++ */
881 ++ if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
882 ++ tlb_flush_mmu(&tlb);
883 ++
884 + free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
885 + next ? next->vm_start : USER_PGTABLES_CEILING);
886 + tlb_finish_mmu(&tlb, start, end);
887 +diff --git a/mm/rmap.c b/mm/rmap.c
888 +index 65de683e7f7cc..511853ed58d9e 100644
889 +--- a/mm/rmap.c
890 ++++ b/mm/rmap.c
891 +@@ -82,7 +82,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
892 + anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
893 + if (anon_vma) {
894 + atomic_set(&anon_vma->refcount, 1);
895 +- anon_vma->degree = 1; /* Reference for first vma */
896 ++ anon_vma->num_children = 0;
897 ++ anon_vma->num_active_vmas = 0;
898 + anon_vma->parent = anon_vma;
899 + /*
900 + * Initialise the anon_vma root to point to itself. If called
901 +@@ -190,6 +191,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
902 + anon_vma = anon_vma_alloc();
903 + if (unlikely(!anon_vma))
904 + goto out_enomem_free_avc;
905 ++ anon_vma->num_children++; /* self-parent link for new root */
906 + allocated = anon_vma;
907 + }
908 +
909 +@@ -199,8 +201,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
910 + if (likely(!vma->anon_vma)) {
911 + vma->anon_vma = anon_vma;
912 + anon_vma_chain_link(vma, avc, anon_vma);
913 +- /* vma reference or self-parent link for new root */
914 +- anon_vma->degree++;
915 ++ anon_vma->num_active_vmas++;
916 + allocated = NULL;
917 + avc = NULL;
918 + }
919 +@@ -279,19 +280,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
920 + anon_vma_chain_link(dst, avc, anon_vma);
921 +
922 + /*
923 +- * Reuse existing anon_vma if its degree lower than two,
924 +- * that means it has no vma and only one anon_vma child.
925 ++ * Reuse existing anon_vma if it has no vma and only one
926 ++ * anon_vma child.
927 + *
928 +- * Do not chose parent anon_vma, otherwise first child
929 +- * will always reuse it. Root anon_vma is never reused:
930 ++ * Root anon_vma is never reused:
931 + * it has self-parent reference and at least one child.
932 + */
933 +- if (!dst->anon_vma && anon_vma != src->anon_vma &&
934 +- anon_vma->degree < 2)
935 ++ if (!dst->anon_vma &&
936 ++ anon_vma->num_children < 2 &&
937 ++ anon_vma->num_active_vmas == 0)
938 + dst->anon_vma = anon_vma;
939 + }
940 + if (dst->anon_vma)
941 +- dst->anon_vma->degree++;
942 ++ dst->anon_vma->num_active_vmas++;
943 + unlock_anon_vma_root(root);
944 + return 0;
945 +
946 +@@ -341,6 +342,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
947 + anon_vma = anon_vma_alloc();
948 + if (!anon_vma)
949 + goto out_error;
950 ++ anon_vma->num_active_vmas++;
951 + avc = anon_vma_chain_alloc(GFP_KERNEL);
952 + if (!avc)
953 + goto out_error_free_anon_vma;
954 +@@ -361,7 +363,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
955 + vma->anon_vma = anon_vma;
956 + anon_vma_lock_write(anon_vma);
957 + anon_vma_chain_link(vma, avc, anon_vma);
958 +- anon_vma->parent->degree++;
959 ++ anon_vma->parent->num_children++;
960 + anon_vma_unlock_write(anon_vma);
961 +
962 + return 0;
963 +@@ -393,7 +395,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
964 + * to free them outside the lock.
965 + */
966 + if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
967 +- anon_vma->parent->degree--;
968 ++ anon_vma->parent->num_children--;
969 + continue;
970 + }
971 +
972 +@@ -401,7 +403,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
973 + anon_vma_chain_free(avc);
974 + }
975 + if (vma->anon_vma)
976 +- vma->anon_vma->degree--;
977 ++ vma->anon_vma->num_active_vmas--;
978 + unlock_anon_vma_root(root);
979 +
980 + /*
981 +@@ -412,7 +414,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
982 + list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
983 + struct anon_vma *anon_vma = avc->anon_vma;
984 +
985 +- VM_WARN_ON(anon_vma->degree);
986 ++ VM_WARN_ON(anon_vma->num_children);
987 ++ VM_WARN_ON(anon_vma->num_active_vmas);
988 + put_anon_vma(anon_vma);
989 +
990 + list_del(&avc->same_vma);
991 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
992 +index dbfe9a916581b..e45a12378bd10 100644
993 +--- a/net/bluetooth/l2cap_core.c
994 ++++ b/net/bluetooth/l2cap_core.c
995 +@@ -1826,11 +1826,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
996 + src_match = !bacmp(&c->src, src);
997 + dst_match = !bacmp(&c->dst, dst);
998 + if (src_match && dst_match) {
999 +- c = l2cap_chan_hold_unless_zero(c);
1000 +- if (c) {
1001 +- read_unlock(&chan_list_lock);
1002 +- return c;
1003 +- }
1004 ++ if (!l2cap_chan_hold_unless_zero(c))
1005 ++ continue;
1006 ++
1007 ++ read_unlock(&chan_list_lock);
1008 ++ return c;
1009 + }
1010 +
1011 + /* Closest match */
1012 +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
1013 +index 276b60262981c..b21c8a317be73 100644
1014 +--- a/net/bridge/netfilter/ebtable_broute.c
1015 ++++ b/net/bridge/netfilter/ebtable_broute.c
1016 +@@ -33,18 +33,10 @@ static struct ebt_replace_kernel initial_table = {
1017 + .entries = (char *)&initial_chain,
1018 + };
1019 +
1020 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
1021 +-{
1022 +- if (valid_hooks & ~(1 << NF_BR_BROUTING))
1023 +- return -EINVAL;
1024 +- return 0;
1025 +-}
1026 +-
1027 + static const struct ebt_table broute_table = {
1028 + .name = "broute",
1029 + .table = &initial_table,
1030 + .valid_hooks = 1 << NF_BR_BROUTING,
1031 +- .check = check,
1032 + .me = THIS_MODULE,
1033 + };
1034 +
1035 +diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
1036 +index c41da5fac84f4..c59021989af32 100644
1037 +--- a/net/bridge/netfilter/ebtable_filter.c
1038 ++++ b/net/bridge/netfilter/ebtable_filter.c
1039 +@@ -41,18 +41,10 @@ static struct ebt_replace_kernel initial_table = {
1040 + .entries = (char *)initial_chains,
1041 + };
1042 +
1043 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
1044 +-{
1045 +- if (valid_hooks & ~FILTER_VALID_HOOKS)
1046 +- return -EINVAL;
1047 +- return 0;
1048 +-}
1049 +-
1050 + static const struct ebt_table frame_filter = {
1051 + .name = "filter",
1052 + .table = &initial_table,
1053 + .valid_hooks = FILTER_VALID_HOOKS,
1054 +- .check = check,
1055 + .me = THIS_MODULE,
1056 + };
1057 +
1058 +diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
1059 +index 08df7406ecb38..1bb12157ce09d 100644
1060 +--- a/net/bridge/netfilter/ebtable_nat.c
1061 ++++ b/net/bridge/netfilter/ebtable_nat.c
1062 +@@ -41,18 +41,10 @@ static struct ebt_replace_kernel initial_table = {
1063 + .entries = (char *)initial_chains,
1064 + };
1065 +
1066 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
1067 +-{
1068 +- if (valid_hooks & ~NAT_VALID_HOOKS)
1069 +- return -EINVAL;
1070 +- return 0;
1071 +-}
1072 +-
1073 + static const struct ebt_table frame_nat = {
1074 + .name = "nat",
1075 + .table = &initial_table,
1076 + .valid_hooks = NAT_VALID_HOOKS,
1077 +- .check = check,
1078 + .me = THIS_MODULE,
1079 + };
1080 +
1081 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1082 +index a1834ad7422ce..a54149f10f7ef 100644
1083 +--- a/net/bridge/netfilter/ebtables.c
1084 ++++ b/net/bridge/netfilter/ebtables.c
1085 +@@ -991,8 +991,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
1086 + goto free_iterate;
1087 + }
1088 +
1089 +- /* the table doesn't like it */
1090 +- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1091 ++ if (repl->valid_hooks != t->valid_hooks)
1092 + goto free_unlock;
1093 +
1094 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
1095 +@@ -1200,11 +1199,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1096 + if (ret != 0)
1097 + goto free_chainstack;
1098 +
1099 +- if (table->check && table->check(newinfo, table->valid_hooks)) {
1100 +- ret = -EINVAL;
1101 +- goto free_chainstack;
1102 +- }
1103 +-
1104 + table->private = newinfo;
1105 + rwlock_init(&table->lock);
1106 + mutex_lock(&ebt_mutex);
1107 +diff --git a/net/core/dev.c b/net/core/dev.c
1108 +index ea09e0809c122..4741c239af170 100644
1109 +--- a/net/core/dev.c
1110 ++++ b/net/core/dev.c
1111 +@@ -5186,7 +5186,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
1112 + net_rps_action_and_irq_enable(sd);
1113 + }
1114 +
1115 +- napi->weight = dev_rx_weight;
1116 ++ napi->weight = READ_ONCE(dev_rx_weight);
1117 + while (again) {
1118 + struct sk_buff *skb;
1119 +
1120 +@@ -5648,8 +5648,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
1121 + {
1122 + struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1123 + unsigned long time_limit = jiffies +
1124 +- usecs_to_jiffies(netdev_budget_usecs);
1125 +- int budget = netdev_budget;
1126 ++ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
1127 ++ int budget = READ_ONCE(netdev_budget);
1128 + LIST_HEAD(list);
1129 + LIST_HEAD(repoll);
1130 +
1131 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1132 +index 358e84af0210b..51aacfdd4fb77 100644
1133 +--- a/net/core/neighbour.c
1134 ++++ b/net/core/neighbour.c
1135 +@@ -222,11 +222,26 @@ static int neigh_del_timer(struct neighbour *n)
1136 + return 0;
1137 + }
1138 +
1139 +-static void pneigh_queue_purge(struct sk_buff_head *list)
1140 ++static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
1141 + {
1142 ++ struct sk_buff_head tmp;
1143 ++ unsigned long flags;
1144 + struct sk_buff *skb;
1145 +
1146 +- while ((skb = skb_dequeue(list)) != NULL) {
1147 ++ skb_queue_head_init(&tmp);
1148 ++ spin_lock_irqsave(&list->lock, flags);
1149 ++ skb = skb_peek(list);
1150 ++ while (skb != NULL) {
1151 ++ struct sk_buff *skb_next = skb_peek_next(skb, list);
1152 ++ if (net == NULL || net_eq(dev_net(skb->dev), net)) {
1153 ++ __skb_unlink(skb, list);
1154 ++ __skb_queue_tail(&tmp, skb);
1155 ++ }
1156 ++ skb = skb_next;
1157 ++ }
1158 ++ spin_unlock_irqrestore(&list->lock, flags);
1159 ++
1160 ++ while ((skb = __skb_dequeue(&tmp))) {
1161 + dev_put(skb->dev);
1162 + kfree_skb(skb);
1163 + }
1164 +@@ -295,9 +310,9 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
1165 + write_lock_bh(&tbl->lock);
1166 + neigh_flush_dev(tbl, dev);
1167 + pneigh_ifdown_and_unlock(tbl, dev);
1168 +-
1169 +- del_timer_sync(&tbl->proxy_timer);
1170 +- pneigh_queue_purge(&tbl->proxy_queue);
1171 ++ pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
1172 ++ if (skb_queue_empty_lockless(&tbl->proxy_queue))
1173 ++ del_timer_sync(&tbl->proxy_timer);
1174 + return 0;
1175 + }
1176 + EXPORT_SYMBOL(neigh_ifdown);
1177 +@@ -1609,7 +1624,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
1178 + /* It is not clean... Fix it to unload IPv6 module safely */
1179 + cancel_delayed_work_sync(&tbl->gc_work);
1180 + del_timer_sync(&tbl->proxy_timer);
1181 +- pneigh_queue_purge(&tbl->proxy_queue);
1182 ++ pneigh_queue_purge(&tbl->proxy_queue, NULL);
1183 + neigh_ifdown(tbl, NULL);
1184 + if (atomic_read(&tbl->entries))
1185 + pr_crit("neighbour leakage\n");
1186 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1187 +index 629997753f69b..11d0ffc51c24a 100644
1188 +--- a/net/core/skbuff.c
1189 ++++ b/net/core/skbuff.c
1190 +@@ -4352,7 +4352,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
1191 + {
1192 + bool ret;
1193 +
1194 +- if (likely(sysctl_tstamp_allow_data || tsonly))
1195 ++ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
1196 + return true;
1197 +
1198 + read_lock_bh(&sk->sk_callback_lock);
1199 +diff --git a/net/core/sock.c b/net/core/sock.c
1200 +index bbf9517218ff3..002c91dd7191f 100644
1201 +--- a/net/core/sock.c
1202 ++++ b/net/core/sock.c
1203 +@@ -2783,7 +2783,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1204 +
1205 + #ifdef CONFIG_NET_RX_BUSY_POLL
1206 + sk->sk_napi_id = 0;
1207 +- sk->sk_ll_usec = sysctl_net_busy_read;
1208 ++ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
1209 + #endif
1210 +
1211 + sk->sk_max_pacing_rate = ~0U;
1212 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
1213 +index ac1a32d5cad3c..1b5749f2ef9c0 100644
1214 +--- a/net/core/sysctl_net_core.c
1215 ++++ b/net/core/sysctl_net_core.c
1216 +@@ -229,14 +229,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
1217 + static int proc_do_dev_weight(struct ctl_table *table, int write,
1218 + void __user *buffer, size_t *lenp, loff_t *ppos)
1219 + {
1220 +- int ret;
1221 ++ static DEFINE_MUTEX(dev_weight_mutex);
1222 ++ int ret, weight;
1223 +
1224 ++ mutex_lock(&dev_weight_mutex);
1225 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
1226 +- if (ret != 0)
1227 +- return ret;
1228 +-
1229 +- dev_rx_weight = weight_p * dev_weight_rx_bias;
1230 +- dev_tx_weight = weight_p * dev_weight_tx_bias;
1231 ++ if (!ret && write) {
1232 ++ weight = READ_ONCE(weight_p);
1233 ++ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
1234 ++ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
1235 ++ }
1236 ++ mutex_unlock(&dev_weight_mutex);
1237 +
1238 + return ret;
1239 + }
1240 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1241 +index 035123bf7259b..5f0d6a567a1e3 100644
1242 +--- a/net/key/af_key.c
1243 ++++ b/net/key/af_key.c
1244 +@@ -1707,9 +1707,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
1245 + pfk->registered |= (1<<hdr->sadb_msg_satype);
1246 + }
1247 +
1248 ++ mutex_lock(&pfkey_mutex);
1249 + xfrm_probe_algs();
1250 +
1251 + supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
1252 ++ mutex_unlock(&pfkey_mutex);
1253 ++
1254 + if (!supp_skb) {
1255 + if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
1256 + pfk->registered &= ~(1<<hdr->sadb_msg_satype);
1257 +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1258 +index 1b302d9fd0a07..19d6821b0ffd9 100644
1259 +--- a/net/netfilter/Kconfig
1260 ++++ b/net/netfilter/Kconfig
1261 +@@ -100,7 +100,6 @@ config NF_CONNTRACK_ZONES
1262 +
1263 + config NF_CONNTRACK_PROCFS
1264 + bool "Supply CT list in procfs (OBSOLETE)"
1265 +- default y
1266 + depends on PROC_FS
1267 + ---help---
1268 + This option enables for the list of known conntrack entries
1269 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
1270 +index fd87216bc0a99..5732b32ab9320 100644
1271 +--- a/net/netfilter/nft_payload.c
1272 ++++ b/net/netfilter/nft_payload.c
1273 +@@ -332,6 +332,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
1274 + const struct nlattr * const tb[])
1275 + {
1276 + struct nft_payload_set *priv = nft_expr_priv(expr);
1277 ++ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
1278 ++ int err;
1279 +
1280 + priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
1281 + priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
1282 +@@ -339,11 +341,15 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
1283 + priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
1284 +
1285 + if (tb[NFTA_PAYLOAD_CSUM_TYPE])
1286 +- priv->csum_type =
1287 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
1288 +- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
1289 +- priv->csum_offset =
1290 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
1291 ++ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
1292 ++ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
1293 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
1294 ++ &csum_offset);
1295 ++ if (err < 0)
1296 ++ return err;
1297 ++
1298 ++ priv->csum_offset = csum_offset;
1299 ++ }
1300 + if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
1301 + u32 flags;
1302 +
1303 +@@ -354,13 +360,14 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
1304 + priv->csum_flags = flags;
1305 + }
1306 +
1307 +- switch (priv->csum_type) {
1308 ++ switch (csum_type) {
1309 + case NFT_PAYLOAD_CSUM_NONE:
1310 + case NFT_PAYLOAD_CSUM_INET:
1311 + break;
1312 + default:
1313 + return -EOPNOTSUPP;
1314 + }
1315 ++ priv->csum_type = csum_type;
1316 +
1317 + return nft_validate_register_load(priv->sreg, priv->len);
1318 + }
1319 +@@ -398,6 +405,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
1320 + {
1321 + enum nft_payload_bases base;
1322 + unsigned int offset, len;
1323 ++ int err;
1324 +
1325 + if (tb[NFTA_PAYLOAD_BASE] == NULL ||
1326 + tb[NFTA_PAYLOAD_OFFSET] == NULL ||
1327 +@@ -423,8 +431,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
1328 + if (tb[NFTA_PAYLOAD_DREG] == NULL)
1329 + return ERR_PTR(-EINVAL);
1330 +
1331 +- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
1332 +- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
1333 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
1334 ++ if (err < 0)
1335 ++ return ERR_PTR(err);
1336 ++
1337 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
1338 ++ if (err < 0)
1339 ++ return ERR_PTR(err);
1340 +
1341 + if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
1342 + base != NFT_PAYLOAD_LL_HEADER)
1343 +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
1344 +index c318e5c9f6df3..56eea298b8ef7 100644
1345 +--- a/net/rose/rose_loopback.c
1346 ++++ b/net/rose/rose_loopback.c
1347 +@@ -99,7 +99,8 @@ static void rose_loopback_timer(struct timer_list *unused)
1348 + }
1349 +
1350 + if (frametype == ROSE_CALL_REQUEST) {
1351 +- if (!rose_loopback_neigh->dev) {
1352 ++ if (!rose_loopback_neigh->dev &&
1353 ++ !rose_loopback_neigh->loopback) {
1354 + kfree_skb(skb);
1355 + continue;
1356 + }
1357 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1358 +index 82752dcbf2a2c..4a76ceeca6fdd 100644
1359 +--- a/net/sched/sch_generic.c
1360 ++++ b/net/sched/sch_generic.c
1361 +@@ -251,7 +251,7 @@ static inline int qdisc_restart(struct Qdisc *q, int *packets)
1362 +
1363 + void __qdisc_run(struct Qdisc *q)
1364 + {
1365 +- int quota = dev_tx_weight;
1366 ++ int quota = READ_ONCE(dev_tx_weight);
1367 + int packets;
1368 +
1369 + while (qdisc_restart(q, &packets)) {
1370 +diff --git a/net/socket.c b/net/socket.c
1371 +index c74cfe1ee1699..7bcd7053e61f2 100644
1372 +--- a/net/socket.c
1373 ++++ b/net/socket.c
1374 +@@ -1509,7 +1509,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
1375 +
1376 + sock = sockfd_lookup_light(fd, &err, &fput_needed);
1377 + if (sock) {
1378 +- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
1379 ++ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
1380 + if ((unsigned int)backlog > somaxconn)
1381 + backlog = somaxconn;
1382 +
1383 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1384 +index e1840f70c0ff0..66c23a1b8758f 100644
1385 +--- a/net/xfrm/xfrm_policy.c
1386 ++++ b/net/xfrm/xfrm_policy.c
1387 +@@ -2332,6 +2332,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1388 + if (pols[1]) {
1389 + if (IS_ERR(pols[1])) {
1390 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1391 ++ xfrm_pol_put(pols[0]);
1392 + return 0;
1393 + }
1394 + pols[1]->curlft.use_time = get_seconds();
1395 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
1396 +index cf6f33b2633d5..a2063130c93c6 100644
1397 +--- a/scripts/Makefile.modpost
1398 ++++ b/scripts/Makefile.modpost
1399 +@@ -51,8 +51,7 @@ obj := $(KBUILD_EXTMOD)
1400 + src := $(obj)
1401 +
1402 + # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
1403 +-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
1404 +- $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
1405 ++include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
1406 + endif
1407 +
1408 + include scripts/Makefile.lib