Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:4.19 commit in: /
Date: Mon, 05 Sep 2022 12:06:05
Message-Id: 1662379547.ea21b2e23e5097ddcbc070fe25f180daae7f821f.mpagano@gentoo
1 commit: ea21b2e23e5097ddcbc070fe25f180daae7f821f
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 5 12:05:47 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 5 12:05:47 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=ea21b2e2
7
8 Linux patch 4.19.257
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1256_linux-4.19.257.patch | 2134 +++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2138 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index b20110e2..dff212f8 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -1067,6 +1067,10 @@ Patch: 1255_linux-4.19.256.patch
21 From: https://www.kernel.org
22 Desc: Linux 4.19.256
23
24 +Patch: 1256_linux-4.19.257.patch
25 +From: https://www.kernel.org
26 +Desc: Linux 4.19.257
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1256_linux-4.19.257.patch b/1256_linux-4.19.257.patch
33 new file mode 100644
34 index 00000000..7b7b81b6
35 --- /dev/null
36 +++ b/1256_linux-4.19.257.patch
37 @@ -0,0 +1,2134 @@
38 +diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
39 +index 9393c50b5afc9..c98fd11907cc8 100644
40 +--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
41 ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
42 +@@ -230,6 +230,20 @@ The possible values in this file are:
43 + * - 'Mitigation: Clear CPU buffers'
44 + - The processor is vulnerable and the CPU buffer clearing mitigation is
45 + enabled.
46 ++ * - 'Unknown: No mitigations'
47 ++ - The processor vulnerability status is unknown because it is
48 ++ out of Servicing period. Mitigation is not attempted.
49 ++
50 ++Definitions:
51 ++------------
52 ++
53 ++Servicing period: The process of providing functional and security updates to
54 ++Intel processors or platforms, utilizing the Intel Platform Update (IPU)
55 ++process or other similar mechanisms.
56 ++
57 ++End of Servicing Updates (ESU): ESU is the date at which Intel will no
58 ++longer provide Servicing, such as through IPU or other similar update
59 ++processes. ESU dates will typically be aligned to end of quarter.
60 +
61 + If the processor is vulnerable then the following information is appended to
62 + the above information:
63 +diff --git a/Makefile b/Makefile
64 +index ac79aef4520be..18ccab9a01b06 100644
65 +--- a/Makefile
66 ++++ b/Makefile
67 +@@ -1,7 +1,7 @@
68 + # SPDX-License-Identifier: GPL-2.0
69 + VERSION = 4
70 + PATCHLEVEL = 19
71 +-SUBLEVEL = 256
72 ++SUBLEVEL = 257
73 + EXTRAVERSION =
74 + NAME = "People's Front"
75 +
76 +diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
77 +index b37d185e0e841..3dda6ff32efd7 100644
78 +--- a/arch/arm64/include/asm/mmu.h
79 ++++ b/arch/arm64/include/asm/mmu.h
80 +@@ -98,7 +98,7 @@ extern void init_mem_pgprot(void);
81 + extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
82 + unsigned long virt, phys_addr_t size,
83 + pgprot_t prot, bool page_mappings_only);
84 +-extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
85 ++extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
86 + extern void mark_linear_text_alias_ro(void);
87 +
88 + #endif /* !__ASSEMBLY__ */
89 +diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
90 +index 06941c1fe418e..92bb53460401c 100644
91 +--- a/arch/arm64/kernel/kaslr.c
92 ++++ b/arch/arm64/kernel/kaslr.c
93 +@@ -65,9 +65,6 @@ out:
94 + return default_cmdline;
95 + }
96 +
97 +-extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size,
98 +- pgprot_t prot);
99 +-
100 + /*
101 + * This routine will be executed with the kernel mapped at its default virtual
102 + * address, and if it returns successfully, the kernel will be remapped, and
103 +@@ -96,7 +93,7 @@ u64 __init kaslr_early_init(u64 dt_phys)
104 + * attempt at mapping the FDT in setup_machine()
105 + */
106 + early_fixmap_init();
107 +- fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
108 ++ fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
109 + if (!fdt)
110 + return 0;
111 +
112 +diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
113 +index b3354ff94e798..43e9786f1d604 100644
114 +--- a/arch/arm64/kernel/setup.c
115 ++++ b/arch/arm64/kernel/setup.c
116 +@@ -183,9 +183,13 @@ static void __init smp_build_mpidr_hash(void)
117 +
118 + static void __init setup_machine_fdt(phys_addr_t dt_phys)
119 + {
120 +- void *dt_virt = fixmap_remap_fdt(dt_phys);
121 ++ int size;
122 ++ void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
123 + const char *name;
124 +
125 ++ if (dt_virt)
126 ++ memblock_reserve(dt_phys, size);
127 ++
128 + if (!dt_virt || !early_init_dt_scan(dt_virt)) {
129 + pr_crit("\n"
130 + "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
131 +@@ -197,6 +201,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
132 + cpu_relax();
133 + }
134 +
135 ++ /* Early fixups are done, map the FDT as read-only now */
136 ++ fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
137 ++
138 + name = of_flat_dt_get_machine_name();
139 + if (!name)
140 + return;
141 +diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
142 +index b0a83dbed2dc4..7042fbb6d92ba 100644
143 +--- a/arch/arm64/mm/mmu.c
144 ++++ b/arch/arm64/mm/mmu.c
145 +@@ -859,7 +859,7 @@ void __set_fixmap(enum fixed_addresses idx,
146 + }
147 + }
148 +
149 +-void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
150 ++void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
151 + {
152 + const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
153 + int offset;
154 +@@ -912,19 +912,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
155 + return dt_virt;
156 + }
157 +
158 +-void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
159 +-{
160 +- void *dt_virt;
161 +- int size;
162 +-
163 +- dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
164 +- if (!dt_virt)
165 +- return NULL;
166 +-
167 +- memblock_reserve(dt_phys, size);
168 +- return dt_virt;
169 +-}
170 +-
171 + int __init arch_ioremap_pud_supported(void)
172 + {
173 + /*
174 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
175 +index c16af267362e4..a8b5ad11c7a41 100644
176 +--- a/arch/parisc/kernel/unaligned.c
177 ++++ b/arch/parisc/kernel/unaligned.c
178 +@@ -121,7 +121,7 @@
179 + #define R1(i) (((i)>>21)&0x1f)
180 + #define R2(i) (((i)>>16)&0x1f)
181 + #define R3(i) ((i)&0x1f)
182 +-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
183 ++#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
184 + #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
185 + #define IM5_2(i) IM((i)>>16,5)
186 + #define IM5_3(i) IM((i),5)
187 +diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
188 +index 3452e18bb1ca8..38105ba35c814 100644
189 +--- a/arch/s390/hypfs/hypfs_diag.c
190 ++++ b/arch/s390/hypfs/hypfs_diag.c
191 +@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
192 + int rc;
193 +
194 + if (diag204_probe()) {
195 +- pr_err("The hardware system does not support hypfs\n");
196 ++ pr_info("The hardware system does not support hypfs\n");
197 + return -ENODATA;
198 + }
199 + if (diag204_info_type == DIAG204_INFO_EXT) {
200 +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
201 +index e4d17d9ea93d8..4af5c0dd9fbe2 100644
202 +--- a/arch/s390/hypfs/inode.c
203 ++++ b/arch/s390/hypfs/inode.c
204 +@@ -494,9 +494,9 @@ fail_hypfs_vm_exit:
205 + hypfs_vm_exit();
206 + fail_hypfs_diag_exit:
207 + hypfs_diag_exit();
208 ++ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
209 + fail_dbfs_exit:
210 + hypfs_dbfs_exit();
211 +- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
212 + return rc;
213 + }
214 + device_initcall(hypfs_init)
215 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
216 +index 99ef537e548a3..5772ef90dd26c 100644
217 +--- a/arch/s390/kernel/process.c
218 ++++ b/arch/s390/kernel/process.c
219 +@@ -75,6 +75,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
220 +
221 + memcpy(dst, src, arch_task_struct_size);
222 + dst->thread.fpu.regs = dst->thread.fpu.fprs;
223 ++
224 ++ /*
225 ++ * Don't transfer over the runtime instrumentation or the guarded
226 ++ * storage control block pointers. These fields are cleared here instead
227 ++ * of in copy_thread() to avoid premature freeing of associated memory
228 ++ * on fork() failure. Wait to clear the RI flag because ->stack still
229 ++ * refers to the source thread.
230 ++ */
231 ++ dst->thread.ri_cb = NULL;
232 ++ dst->thread.gs_cb = NULL;
233 ++ dst->thread.gs_bc_cb = NULL;
234 ++
235 + return 0;
236 + }
237 +
238 +@@ -131,13 +143,11 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
239 + frame->childregs.flags = 0;
240 + if (new_stackp)
241 + frame->childregs.gprs[15] = new_stackp;
242 +-
243 +- /* Don't copy runtime instrumentation info */
244 +- p->thread.ri_cb = NULL;
245 ++ /*
246 ++ * Clear the runtime instrumentation flag after the above childregs
247 ++ * copy. The CB pointer was already cleared in arch_dup_task_struct().
248 ++ */
249 + frame->childregs.psw.mask &= ~PSW_MASK_RI;
250 +- /* Don't copy guarded storage control block */
251 +- p->thread.gs_cb = NULL;
252 +- p->thread.gs_bc_cb = NULL;
253 +
254 + /* Set a new TLS ? */
255 + if (clone_flags & CLONE_SETTLS) {
256 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
257 +index a6e3c7022245d..d64b180caedaf 100644
258 +--- a/arch/s390/mm/fault.c
259 ++++ b/arch/s390/mm/fault.c
260 +@@ -455,7 +455,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
261 + flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
262 + if (user_mode(regs))
263 + flags |= FAULT_FLAG_USER;
264 +- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
265 ++ if ((trans_exc_code & store_indication) == 0x400)
266 ++ access = VM_WRITE;
267 ++ if (access == VM_WRITE)
268 + flags |= FAULT_FLAG_WRITE;
269 + down_read(&mm->mmap_sem);
270 +
271 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
272 +index 89145ea183d6d..e9b79bac9b2af 100644
273 +--- a/arch/x86/include/asm/cpufeatures.h
274 ++++ b/arch/x86/include/asm/cpufeatures.h
275 +@@ -396,6 +396,7 @@
276 + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
277 + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
278 + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
279 +-#define X86_BUG_EIBRS_PBRSB X86_BUG(26) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
280 ++#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
281 ++#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
282 +
283 + #endif /* _ASM_X86_CPUFEATURES_H */
284 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
285 +index a36be67860432..501d09d59abcc 100644
286 +--- a/arch/x86/kernel/cpu/bugs.c
287 ++++ b/arch/x86/kernel/cpu/bugs.c
288 +@@ -396,7 +396,8 @@ static void __init mmio_select_mitigation(void)
289 + u64 ia32_cap;
290 +
291 + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
292 +- cpu_mitigations_off()) {
293 ++ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
294 ++ cpu_mitigations_off()) {
295 + mmio_mitigation = MMIO_MITIGATION_OFF;
296 + return;
297 + }
298 +@@ -501,6 +502,8 @@ out:
299 + pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
300 + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
301 + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
302 ++ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
303 ++ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
304 + }
305 +
306 + static void __init md_clear_select_mitigation(void)
307 +@@ -1868,6 +1871,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
308 +
309 + static ssize_t mmio_stale_data_show_state(char *buf)
310 + {
311 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
312 ++ return sysfs_emit(buf, "Unknown: No mitigations\n");
313 ++
314 + if (mmio_mitigation == MMIO_MITIGATION_OFF)
315 + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
316 +
317 +@@ -1995,6 +2001,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
318 + return srbds_show_state(buf);
319 +
320 + case X86_BUG_MMIO_STALE_DATA:
321 ++ case X86_BUG_MMIO_UNKNOWN:
322 + return mmio_stale_data_show_state(buf);
323 +
324 + default:
325 +@@ -2051,6 +2058,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
326 +
327 + ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
328 + {
329 +- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
330 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
331 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
332 ++ else
333 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
334 + }
335 + #endif
336 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
337 +index 3ab35d5426b76..653ced7cb3964 100644
338 +--- a/arch/x86/kernel/cpu/common.c
339 ++++ b/arch/x86/kernel/cpu/common.c
340 +@@ -955,6 +955,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
341 + #define NO_SWAPGS BIT(6)
342 + #define NO_ITLB_MULTIHIT BIT(7)
343 + #define NO_EIBRS_PBRSB BIT(8)
344 ++#define NO_MMIO BIT(9)
345 +
346 + #define VULNWL(_vendor, _family, _model, _whitelist) \
347 + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
348 +@@ -972,6 +973,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
349 + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
350 +
351 + /* Intel Family 6 */
352 ++ VULNWL_INTEL(TIGERLAKE, NO_MMIO),
353 ++ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
354 ++ VULNWL_INTEL(ALDERLAKE, NO_MMIO),
355 ++ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
356 ++
357 + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
358 + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
359 + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
360 +@@ -989,9 +995,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
361 +
362 + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
363 +
364 +- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
365 +- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
366 +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
367 ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
368 ++ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
369 ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
370 +
371 + /*
372 + * Technically, swapgs isn't serializing on AMD (despite it previously
373 +@@ -1006,13 +1012,13 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
374 + VULNWL_INTEL(ATOM_TREMONT_X, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
375 +
376 + /* AMD Family 0xf - 0x12 */
377 +- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
378 +- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
379 +- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
380 +- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
381 ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
382 ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
383 ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
384 ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
385 +
386 + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
387 +- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
388 ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
389 + {}
390 + };
391 +
392 +@@ -1152,10 +1158,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
393 + * Affected CPU list is generally enough to enumerate the vulnerability,
394 + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
395 + * not want the guest to enumerate the bug.
396 ++ *
397 ++ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
398 ++ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
399 + */
400 +- if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
401 +- !arch_cap_mmio_immune(ia32_cap))
402 +- setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
403 ++ if (!arch_cap_mmio_immune(ia32_cap)) {
404 ++ if (cpu_matches(cpu_vuln_blacklist, MMIO))
405 ++ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
406 ++ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
407 ++ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
408 ++ }
409 +
410 + if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
411 + !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
412 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
413 +index 4f17c1c949498..5c48d2c4cabe6 100644
414 +--- a/arch/x86/kernel/unwind_orc.c
415 ++++ b/arch/x86/kernel/unwind_orc.c
416 +@@ -89,22 +89,27 @@ static struct orc_entry *orc_find(unsigned long ip);
417 + static struct orc_entry *orc_ftrace_find(unsigned long ip)
418 + {
419 + struct ftrace_ops *ops;
420 +- unsigned long caller;
421 ++ unsigned long tramp_addr, offset;
422 +
423 + ops = ftrace_ops_trampoline(ip);
424 + if (!ops)
425 + return NULL;
426 +
427 ++ /* Set tramp_addr to the start of the code copied by the trampoline */
428 + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
429 +- caller = (unsigned long)ftrace_regs_call;
430 ++ tramp_addr = (unsigned long)ftrace_regs_caller;
431 + else
432 +- caller = (unsigned long)ftrace_call;
433 ++ tramp_addr = (unsigned long)ftrace_caller;
434 ++
435 ++ /* Now place tramp_addr to the location within the trampoline ip is at */
436 ++ offset = ip - ops->trampoline;
437 ++ tramp_addr += offset;
438 +
439 + /* Prevent unlikely recursion */
440 +- if (ip == caller)
441 ++ if (ip == tramp_addr)
442 + return NULL;
443 +
444 +- return orc_find(caller);
445 ++ return orc_find(tramp_addr);
446 + }
447 + #else
448 + static struct orc_entry *orc_ftrace_find(unsigned long ip)
449 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
450 +index c31a76485c9cb..12eb48980df7b 100644
451 +--- a/drivers/block/loop.c
452 ++++ b/drivers/block/loop.c
453 +@@ -1351,6 +1351,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
454 + info->lo_number = lo->lo_number;
455 + info->lo_offset = lo->lo_offset;
456 + info->lo_sizelimit = lo->lo_sizelimit;
457 ++
458 ++ /* loff_t vars have been assigned __u64 */
459 ++ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
460 ++ return -EOVERFLOW;
461 ++
462 + info->lo_flags = lo->lo_flags;
463 + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
464 + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
465 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
466 +index 411f89218e019..cb5c44b339e09 100644
467 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
468 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
469 +@@ -452,6 +452,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
470 + OTG_CLOCK_ON, 1,
471 + 1, 1000);
472 + } else {
473 ++
474 ++ //last chance to clear underflow, otherwise, it will always there due to clock is off.
475 ++ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
476 ++ optc->funcs->clear_optc_underflow(optc);
477 ++
478 + REG_UPDATE_2(OTG_CLOCK_CONTROL,
479 + OTG_CLOCK_GATE_DIS, 0,
480 + OTG_CLOCK_EN, 0);
481 +diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
482 +index a3b151b29bd71..fc616db4231bb 100644
483 +--- a/drivers/hid/hid-steam.c
484 ++++ b/drivers/hid/hid-steam.c
485 +@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
486 + int ret;
487 +
488 + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
489 ++ if (!r) {
490 ++ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
491 ++ return -EINVAL;
492 ++ }
493 ++
494 + if (hid_report_len(r) < 64)
495 + return -EINVAL;
496 +
497 +@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
498 + int ret;
499 +
500 + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
501 ++ if (!r) {
502 ++ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
503 ++ return -EINVAL;
504 ++ }
505 ++
506 + if (hid_report_len(r) < 64)
507 + return -EINVAL;
508 +
509 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
510 +index c4ba2d28dd731..6a5c5ce85d85b 100644
511 +--- a/drivers/hid/hidraw.c
512 ++++ b/drivers/hid/hidraw.c
513 +@@ -354,10 +354,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
514 + unsigned int minor = iminor(inode);
515 + struct hidraw_list *list = file->private_data;
516 + unsigned long flags;
517 ++ int i;
518 +
519 + mutex_lock(&minors_lock);
520 +
521 + spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
522 ++ for (i = list->tail; i < list->head; i++)
523 ++ kfree(list->buffer[i].value);
524 + list_del(&list->node);
525 + spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
526 + kfree(list);
527 +diff --git a/drivers/md/md.c b/drivers/md/md.c
528 +index 4594a1ee88b9b..38cbde9061339 100644
529 +--- a/drivers/md/md.c
530 ++++ b/drivers/md/md.c
531 +@@ -5937,6 +5937,7 @@ void md_stop(struct mddev *mddev)
532 + /* stop the array and free an attached data structures.
533 + * This is called from dm-raid
534 + */
535 ++ __md_stop_writes(mddev);
536 + __md_stop(mddev);
537 + bioset_exit(&mddev->bio_set);
538 + bioset_exit(&mddev->sync_set);
539 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
540 +index bbb5ff16abd61..4cbb39bfb7da4 100644
541 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
542 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
543 +@@ -2602,6 +2602,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
544 + del_timer_sync(&hdw->encoder_run_timer);
545 + del_timer_sync(&hdw->encoder_wait_timer);
546 + flush_work(&hdw->workpoll);
547 ++ v4l2_device_unregister(&hdw->v4l2_dev);
548 + usb_free_urb(hdw->ctl_read_urb);
549 + usb_free_urb(hdw->ctl_write_urb);
550 + kfree(hdw->ctl_read_buffer);
551 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
552 +index b3eaef31b7673..a6bb7e915f74f 100644
553 +--- a/drivers/net/bonding/bond_3ad.c
554 ++++ b/drivers/net/bonding/bond_3ad.c
555 +@@ -1977,30 +1977,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
556 + */
557 + void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
558 + {
559 +- /* check that the bond is not initialized yet */
560 +- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
561 +- bond->dev->dev_addr)) {
562 +-
563 +- BOND_AD_INFO(bond).aggregator_identifier = 0;
564 +-
565 +- BOND_AD_INFO(bond).system.sys_priority =
566 +- bond->params.ad_actor_sys_prio;
567 +- if (is_zero_ether_addr(bond->params.ad_actor_system))
568 +- BOND_AD_INFO(bond).system.sys_mac_addr =
569 +- *((struct mac_addr *)bond->dev->dev_addr);
570 +- else
571 +- BOND_AD_INFO(bond).system.sys_mac_addr =
572 +- *((struct mac_addr *)bond->params.ad_actor_system);
573 ++ BOND_AD_INFO(bond).aggregator_identifier = 0;
574 ++ BOND_AD_INFO(bond).system.sys_priority =
575 ++ bond->params.ad_actor_sys_prio;
576 ++ if (is_zero_ether_addr(bond->params.ad_actor_system))
577 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
578 ++ *((struct mac_addr *)bond->dev->dev_addr);
579 ++ else
580 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
581 ++ *((struct mac_addr *)bond->params.ad_actor_system);
582 +
583 +- /* initialize how many times this module is called in one
584 +- * second (should be about every 100ms)
585 +- */
586 +- ad_ticks_per_sec = tick_resolution;
587 ++ /* initialize how many times this module is called in one
588 ++ * second (should be about every 100ms)
589 ++ */
590 ++ ad_ticks_per_sec = tick_resolution;
591 +
592 +- bond_3ad_initiate_agg_selection(bond,
593 +- AD_AGGREGATOR_SELECTION_TIMER *
594 +- ad_ticks_per_sec);
595 +- }
596 ++ bond_3ad_initiate_agg_selection(bond,
597 ++ AD_AGGREGATOR_SELECTION_TIMER *
598 ++ ad_ticks_per_sec);
599 + }
600 +
601 + /**
602 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
603 +index b3e0d8bb5cbd8..eec68cc9288c8 100644
604 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
605 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
606 +@@ -1066,7 +1066,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
607 + struct cyclecounter cc;
608 + unsigned long flags;
609 + u32 incval = 0;
610 +- u32 tsauxc = 0;
611 + u32 fuse0 = 0;
612 +
613 + /* For some of the boards below this mask is technically incorrect.
614 +@@ -1101,18 +1100,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
615 + case ixgbe_mac_x550em_a:
616 + case ixgbe_mac_X550:
617 + cc.read = ixgbe_ptp_read_X550;
618 +-
619 +- /* enable SYSTIME counter */
620 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
621 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
622 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
623 +- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
624 +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
625 +- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
626 +- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
627 +- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
628 +-
629 +- IXGBE_WRITE_FLUSH(hw);
630 + break;
631 + case ixgbe_mac_X540:
632 + cc.read = ixgbe_ptp_read_82599;
633 +@@ -1144,6 +1131,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
634 + spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
635 + }
636 +
637 ++/**
638 ++ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
639 ++ * @adapter: the ixgbe private board structure
640 ++ *
641 ++ * Initialize and start the SYSTIME registers.
642 ++ */
643 ++static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
644 ++{
645 ++ struct ixgbe_hw *hw = &adapter->hw;
646 ++ u32 tsauxc;
647 ++
648 ++ switch (hw->mac.type) {
649 ++ case ixgbe_mac_X550EM_x:
650 ++ case ixgbe_mac_x550em_a:
651 ++ case ixgbe_mac_X550:
652 ++ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
653 ++
654 ++ /* Reset SYSTIME registers to 0 */
655 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
656 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
657 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
658 ++
659 ++ /* Reset interrupt settings */
660 ++ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
661 ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
662 ++
663 ++ /* Activate the SYSTIME counter */
664 ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
665 ++ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
666 ++ break;
667 ++ case ixgbe_mac_X540:
668 ++ case ixgbe_mac_82599EB:
669 ++ /* Reset SYSTIME registers to 0 */
670 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
671 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
672 ++ break;
673 ++ default:
674 ++ /* Other devices aren't supported */
675 ++ return;
676 ++ };
677 ++
678 ++ IXGBE_WRITE_FLUSH(hw);
679 ++}
680 ++
681 + /**
682 + * ixgbe_ptp_reset
683 + * @adapter: the ixgbe private board structure
684 +@@ -1170,6 +1201,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
685 +
686 + ixgbe_ptp_start_cyclecounter(adapter);
687 +
688 ++ ixgbe_ptp_init_systime(adapter);
689 ++
690 + spin_lock_irqsave(&adapter->tmreg_lock, flags);
691 + timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
692 + ktime_to_ns(ktime_get_real()));
693 +diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
694 +index 0bcc07f346c3e..2e517e30c5ac1 100644
695 +--- a/drivers/net/ipvlan/ipvtap.c
696 ++++ b/drivers/net/ipvlan/ipvtap.c
697 +@@ -193,7 +193,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
698 + .notifier_call = ipvtap_device_event,
699 + };
700 +
701 +-static int ipvtap_init(void)
702 ++static int __init ipvtap_init(void)
703 + {
704 + int err;
705 +
706 +@@ -227,7 +227,7 @@ out1:
707 + }
708 + module_init(ipvtap_init);
709 +
710 +-static void ipvtap_exit(void)
711 ++static void __exit ipvtap_exit(void)
712 + {
713 + rtnl_link_unregister(&ipvtap_link_ops);
714 + unregister_netdevice_notifier(&ipvtap_notifier_block);
715 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
716 +index 8d2dbf607bd15..66b9c5826ec03 100644
717 +--- a/drivers/pinctrl/pinctrl-amd.c
718 ++++ b/drivers/pinctrl/pinctrl-amd.c
719 +@@ -798,6 +798,7 @@ static int amd_gpio_suspend(struct device *dev)
720 + struct platform_device *pdev = to_platform_device(dev);
721 + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
722 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
723 ++ unsigned long flags;
724 + int i;
725 +
726 + for (i = 0; i < desc->npins; i++) {
727 +@@ -806,7 +807,9 @@ static int amd_gpio_suspend(struct device *dev)
728 + if (!amd_gpio_should_save(gpio_dev, pin))
729 + continue;
730 +
731 +- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
732 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
733 ++ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
734 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
735 + }
736 +
737 + return 0;
738 +@@ -817,6 +820,7 @@ static int amd_gpio_resume(struct device *dev)
739 + struct platform_device *pdev = to_platform_device(dev);
740 + struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
741 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
742 ++ unsigned long flags;
743 + int i;
744 +
745 + for (i = 0; i < desc->npins; i++) {
746 +@@ -825,7 +829,10 @@ static int amd_gpio_resume(struct device *dev)
747 + if (!amd_gpio_should_save(gpio_dev, pin))
748 + continue;
749 +
750 +- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
751 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
752 ++ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
753 ++ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
754 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
755 + }
756 +
757 + return 0;
758 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
759 +index 0c2ba075bc713..f3701b4e374b6 100644
760 +--- a/drivers/scsi/storvsc_drv.c
761 ++++ b/drivers/scsi/storvsc_drv.c
762 +@@ -1858,7 +1858,7 @@ static int storvsc_probe(struct hv_device *device,
763 + */
764 + host_dev->handle_error_wq =
765 + alloc_ordered_workqueue("storvsc_error_wq_%d",
766 +- WQ_MEM_RECLAIM,
767 ++ 0,
768 + host->host_no);
769 + if (!host_dev->handle_error_wq)
770 + goto err_out2;
771 +diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
772 +index 1dcf02e12af4f..8ae010f07d7da 100644
773 +--- a/drivers/video/fbdev/pm2fb.c
774 ++++ b/drivers/video/fbdev/pm2fb.c
775 +@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
776 + return -EINVAL;
777 + }
778 +
779 ++ if (!var->pixclock) {
780 ++ DPRINTK("pixclock is zero\n");
781 ++ return -EINVAL;
782 ++ }
783 ++
784 + if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
785 + DPRINTK("pixclock too high (%ldKHz)\n",
786 + PICOS2KHZ(var->pixclock));
787 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
788 +index f141b45ce3498..6adee94637a93 100644
789 +--- a/fs/btrfs/xattr.c
790 ++++ b/fs/btrfs/xattr.c
791 +@@ -369,6 +369,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
792 + const char *name, const void *buffer,
793 + size_t size, int flags)
794 + {
795 ++ if (btrfs_root_readonly(BTRFS_I(inode)->root))
796 ++ return -EROFS;
797 ++
798 + name = xattr_full_name(handler, name);
799 + return btrfs_setxattr(NULL, inode, name, buffer, size, flags);
800 + }
801 +diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
802 +index ea5987bb0b84a..40e03afa9ad10 100644
803 +--- a/include/asm-generic/sections.h
804 ++++ b/include/asm-generic/sections.h
805 +@@ -100,7 +100,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
806 + /**
807 + * memory_intersects - checks if the region occupied by an object intersects
808 + * with another memory region
809 +- * @begin: virtual address of the beginning of the memory regien
810 ++ * @begin: virtual address of the beginning of the memory region
811 + * @end: virtual address of the end of the memory region
812 + * @virt: virtual address of the memory object
813 + * @size: size of the memory object
814 +@@ -113,7 +113,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
815 + {
816 + void *vend = virt + size;
817 +
818 +- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
819 ++ if (virt < end && vend > begin)
820 ++ return true;
821 ++
822 ++ return false;
823 + }
824 +
825 + /**
826 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
827 +index c6935be7c6ca3..954ffe32f6227 100644
828 +--- a/include/linux/netfilter_bridge/ebtables.h
829 ++++ b/include/linux/netfilter_bridge/ebtables.h
830 +@@ -94,10 +94,6 @@ struct ebt_table {
831 + struct ebt_replace_kernel *table;
832 + unsigned int valid_hooks;
833 + rwlock_t lock;
834 +- /* e.g. could be the table explicitly only allows certain
835 +- * matches, targets, ... 0 == let it in */
836 +- int (*check)(const struct ebt_table_info *info,
837 +- unsigned int valid_hooks);
838 + /* the data used by the kernel */
839 + struct ebt_table_info *private;
840 + struct module *me;
841 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h
842 +index 91ccae9467164..c80bd129e9399 100644
843 +--- a/include/linux/rmap.h
844 ++++ b/include/linux/rmap.h
845 +@@ -39,12 +39,15 @@ struct anon_vma {
846 + atomic_t refcount;
847 +
848 + /*
849 +- * Count of child anon_vmas and VMAs which points to this anon_vma.
850 ++ * Count of child anon_vmas. Equals to the count of all anon_vmas that
851 ++ * have ->parent pointing to this one, including itself.
852 + *
853 + * This counter is used for making decision about reusing anon_vma
854 + * instead of forking new one. See comments in function anon_vma_clone.
855 + */
856 +- unsigned degree;
857 ++ unsigned long num_children;
858 ++ /* Count of VMAs whose ->anon_vma pointer points to this object. */
859 ++ unsigned long num_active_vmas;
860 +
861 + struct anon_vma *parent; /* Parent of this anon_vma */
862 +
863 +diff --git a/include/linux/sched.h b/include/linux/sched.h
864 +index f92d5ae6d04e7..fd4899236037f 100644
865 +--- a/include/linux/sched.h
866 ++++ b/include/linux/sched.h
867 +@@ -528,10 +528,6 @@ struct sched_dl_entity {
868 + * task has to wait for a replenishment to be performed at the
869 + * next firing of dl_timer.
870 + *
871 +- * @dl_boosted tells if we are boosted due to DI. If so we are
872 +- * outside bandwidth enforcement mechanism (but only until we
873 +- * exit the critical section);
874 +- *
875 + * @dl_yielded tells if task gave up the CPU before consuming
876 + * all its available runtime during the last job.
877 + *
878 +@@ -546,7 +542,6 @@ struct sched_dl_entity {
879 + * overruns.
880 + */
881 + unsigned int dl_throttled : 1;
882 +- unsigned int dl_boosted : 1;
883 + unsigned int dl_yielded : 1;
884 + unsigned int dl_non_contending : 1;
885 + unsigned int dl_overrun : 1;
886 +@@ -565,6 +560,15 @@ struct sched_dl_entity {
887 + * time.
888 + */
889 + struct hrtimer inactive_timer;
890 ++
891 ++#ifdef CONFIG_RT_MUTEXES
892 ++ /*
893 ++ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
894 ++ * pi_se points to the donor, otherwise points to the dl_se it belongs
895 ++ * to (the original one/itself).
896 ++ */
897 ++ struct sched_dl_entity *pi_se;
898 ++#endif
899 + };
900 +
901 + union rcu_special {
902 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
903 +index c76a5e9894dac..8f42f6f3af86f 100644
904 +--- a/include/net/busy_poll.h
905 ++++ b/include/net/busy_poll.h
906 +@@ -43,7 +43,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
907 +
908 + static inline bool net_busy_loop_on(void)
909 + {
910 +- return sysctl_net_busy_poll;
911 ++ return READ_ONCE(sysctl_net_busy_poll);
912 + }
913 +
914 + static inline bool sk_can_busy_loop(const struct sock *sk)
915 +diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
916 +index fba78047fb37c..57404292c6d14 100644
917 +--- a/kernel/audit_fsnotify.c
918 ++++ b/kernel/audit_fsnotify.c
919 +@@ -111,6 +111,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
920 +
921 + ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
922 + if (ret < 0) {
923 ++ audit_mark->path = NULL;
924 + fsnotify_put_mark(&audit_mark->mark);
925 + audit_mark = ERR_PTR(ret);
926 + }
927 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
928 +index 099191716d4c9..b2fcad8635bcf 100644
929 +--- a/kernel/kprobes.c
930 ++++ b/kernel/kprobes.c
931 +@@ -1709,11 +1709,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
932 + /* Try to disarm and disable this/parent probe */
933 + if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
934 + /*
935 +- * If kprobes_all_disarmed is set, orig_p
936 +- * should have already been disarmed, so
937 +- * skip unneed disarming process.
938 ++ * Don't be lazy here. Even if 'kprobes_all_disarmed'
939 ++ * is false, 'orig_p' might not have been armed yet.
940 ++ * Note arm_all_kprobes() __tries__ to arm all kprobes
941 ++ * on the best effort basis.
942 + */
943 +- if (!kprobes_all_disarmed) {
944 ++ if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
945 + ret = disarm_kprobe(orig_p, true);
946 + if (ret) {
947 + p->flags &= ~KPROBE_FLAG_DISABLED;
948 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
949 +index 32af895bd86b3..a034642497718 100644
950 +--- a/kernel/sched/core.c
951 ++++ b/kernel/sched/core.c
952 +@@ -3869,20 +3869,21 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
953 + if (!dl_prio(p->normal_prio) ||
954 + (pi_task && dl_prio(pi_task->prio) &&
955 + dl_entity_preempt(&pi_task->dl, &p->dl))) {
956 +- p->dl.dl_boosted = 1;
957 ++ p->dl.pi_se = pi_task->dl.pi_se;
958 + queue_flag |= ENQUEUE_REPLENISH;
959 +- } else
960 +- p->dl.dl_boosted = 0;
961 ++ } else {
962 ++ p->dl.pi_se = &p->dl;
963 ++ }
964 + p->sched_class = &dl_sched_class;
965 + } else if (rt_prio(prio)) {
966 + if (dl_prio(oldprio))
967 +- p->dl.dl_boosted = 0;
968 ++ p->dl.pi_se = &p->dl;
969 + if (oldprio < prio)
970 + queue_flag |= ENQUEUE_HEAD;
971 + p->sched_class = &rt_sched_class;
972 + } else {
973 + if (dl_prio(oldprio))
974 +- p->dl.dl_boosted = 0;
975 ++ p->dl.pi_se = &p->dl;
976 + if (rt_prio(oldprio))
977 + p->rt.timeout = 0;
978 + p->sched_class = &fair_sched_class;
979 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
980 +index beec5081a55af..29ed5d8d30d68 100644
981 +--- a/kernel/sched/deadline.c
982 ++++ b/kernel/sched/deadline.c
983 +@@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
984 + return !RB_EMPTY_NODE(&dl_se->rb_node);
985 + }
986 +
987 ++#ifdef CONFIG_RT_MUTEXES
988 ++static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
989 ++{
990 ++ return dl_se->pi_se;
991 ++}
992 ++
993 ++static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
994 ++{
995 ++ return pi_of(dl_se) != dl_se;
996 ++}
997 ++#else
998 ++static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
999 ++{
1000 ++ return dl_se;
1001 ++}
1002 ++
1003 ++static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
1004 ++{
1005 ++ return false;
1006 ++}
1007 ++#endif
1008 ++
1009 + #ifdef CONFIG_SMP
1010 + static inline struct dl_bw *dl_bw_of(int i)
1011 + {
1012 +@@ -657,7 +679,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
1013 + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1014 + struct rq *rq = rq_of_dl_rq(dl_rq);
1015 +
1016 +- WARN_ON(dl_se->dl_boosted);
1017 ++ WARN_ON(is_dl_boosted(dl_se));
1018 + WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
1019 +
1020 + /*
1021 +@@ -695,21 +717,20 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
1022 + * could happen are, typically, a entity voluntarily trying to overcome its
1023 + * runtime, or it just underestimated it during sched_setattr().
1024 + */
1025 +-static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1026 +- struct sched_dl_entity *pi_se)
1027 ++static void replenish_dl_entity(struct sched_dl_entity *dl_se)
1028 + {
1029 + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1030 + struct rq *rq = rq_of_dl_rq(dl_rq);
1031 +
1032 +- BUG_ON(pi_se->dl_runtime <= 0);
1033 ++ BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
1034 +
1035 + /*
1036 + * This could be the case for a !-dl task that is boosted.
1037 + * Just go with full inherited parameters.
1038 + */
1039 + if (dl_se->dl_deadline == 0) {
1040 +- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1041 +- dl_se->runtime = pi_se->dl_runtime;
1042 ++ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1043 ++ dl_se->runtime = pi_of(dl_se)->dl_runtime;
1044 + }
1045 +
1046 + if (dl_se->dl_yielded && dl_se->runtime > 0)
1047 +@@ -722,8 +743,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1048 + * arbitrary large.
1049 + */
1050 + while (dl_se->runtime <= 0) {
1051 +- dl_se->deadline += pi_se->dl_period;
1052 +- dl_se->runtime += pi_se->dl_runtime;
1053 ++ dl_se->deadline += pi_of(dl_se)->dl_period;
1054 ++ dl_se->runtime += pi_of(dl_se)->dl_runtime;
1055 + }
1056 +
1057 + /*
1058 +@@ -737,8 +758,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1059 + */
1060 + if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
1061 + printk_deferred_once("sched: DL replenish lagged too much\n");
1062 +- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1063 +- dl_se->runtime = pi_se->dl_runtime;
1064 ++ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1065 ++ dl_se->runtime = pi_of(dl_se)->dl_runtime;
1066 + }
1067 +
1068 + if (dl_se->dl_yielded)
1069 +@@ -771,8 +792,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1070 + * task with deadline equal to period this is the same of using
1071 + * dl_period instead of dl_deadline in the equation above.
1072 + */
1073 +-static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
1074 +- struct sched_dl_entity *pi_se, u64 t)
1075 ++static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
1076 + {
1077 + u64 left, right;
1078 +
1079 +@@ -794,9 +814,9 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
1080 + * of anything below microseconds resolution is actually fiction
1081 + * (but still we want to give the user that illusion >;).
1082 + */
1083 +- left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
1084 ++ left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
1085 + right = ((dl_se->deadline - t) >> DL_SCALE) *
1086 +- (pi_se->dl_runtime >> DL_SCALE);
1087 ++ (pi_of(dl_se)->dl_runtime >> DL_SCALE);
1088 +
1089 + return dl_time_before(right, left);
1090 + }
1091 +@@ -881,24 +901,23 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1092 + * Please refer to the comments update_dl_revised_wakeup() function to find
1093 + * more about the Revised CBS rule.
1094 + */
1095 +-static void update_dl_entity(struct sched_dl_entity *dl_se,
1096 +- struct sched_dl_entity *pi_se)
1097 ++static void update_dl_entity(struct sched_dl_entity *dl_se)
1098 + {
1099 + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1100 + struct rq *rq = rq_of_dl_rq(dl_rq);
1101 +
1102 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1103 +- dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
1104 ++ dl_entity_overflow(dl_se, rq_clock(rq))) {
1105 +
1106 + if (unlikely(!dl_is_implicit(dl_se) &&
1107 + !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1108 +- !dl_se->dl_boosted)){
1109 ++ !is_dl_boosted(dl_se))) {
1110 + update_dl_revised_wakeup(dl_se, rq);
1111 + return;
1112 + }
1113 +
1114 +- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1115 +- dl_se->runtime = pi_se->dl_runtime;
1116 ++ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1117 ++ dl_se->runtime = pi_of(dl_se)->dl_runtime;
1118 + }
1119 + }
1120 +
1121 +@@ -997,7 +1016,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1122 + * The task might have been boosted by someone else and might be in the
1123 + * boosting/deboosting path, its not throttled.
1124 + */
1125 +- if (dl_se->dl_boosted)
1126 ++ if (is_dl_boosted(dl_se))
1127 + goto unlock;
1128 +
1129 + /*
1130 +@@ -1025,7 +1044,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1131 + * but do not enqueue -- wait for our wakeup to do that.
1132 + */
1133 + if (!task_on_rq_queued(p)) {
1134 +- replenish_dl_entity(dl_se, dl_se);
1135 ++ replenish_dl_entity(dl_se);
1136 + goto unlock;
1137 + }
1138 +
1139 +@@ -1115,7 +1134,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1140 +
1141 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1142 + dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1143 +- if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1144 ++ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1145 + return;
1146 + dl_se->dl_throttled = 1;
1147 + if (dl_se->runtime > 0)
1148 +@@ -1246,7 +1265,7 @@ throttle:
1149 + dl_se->dl_overrun = 1;
1150 +
1151 + __dequeue_task_dl(rq, curr, 0);
1152 +- if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1153 ++ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1154 + enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1155 +
1156 + if (!is_leftmost(curr, &rq->dl))
1157 +@@ -1440,8 +1459,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1158 + }
1159 +
1160 + static void
1161 +-enqueue_dl_entity(struct sched_dl_entity *dl_se,
1162 +- struct sched_dl_entity *pi_se, int flags)
1163 ++enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1164 + {
1165 + BUG_ON(on_dl_rq(dl_se));
1166 +
1167 +@@ -1452,9 +1470,9 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
1168 + */
1169 + if (flags & ENQUEUE_WAKEUP) {
1170 + task_contending(dl_se, flags);
1171 +- update_dl_entity(dl_se, pi_se);
1172 ++ update_dl_entity(dl_se);
1173 + } else if (flags & ENQUEUE_REPLENISH) {
1174 +- replenish_dl_entity(dl_se, pi_se);
1175 ++ replenish_dl_entity(dl_se);
1176 + } else if ((flags & ENQUEUE_RESTORE) &&
1177 + dl_time_before(dl_se->deadline,
1178 + rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1179 +@@ -1471,28 +1489,40 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1180 +
1181 + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1182 + {
1183 +- struct task_struct *pi_task = rt_mutex_get_top_task(p);
1184 +- struct sched_dl_entity *pi_se = &p->dl;
1185 +-
1186 +- /*
1187 +- * Use the scheduling parameters of the top pi-waiter task if:
1188 +- * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1189 +- * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1190 +- * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1191 +- * boosted due to a SCHED_DEADLINE pi-waiter).
1192 +- * Otherwise we keep our runtime and deadline.
1193 +- */
1194 +- if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1195 +- pi_se = &pi_task->dl;
1196 ++ if (is_dl_boosted(&p->dl)) {
1197 ++ /*
1198 ++ * Because of delays in the detection of the overrun of a
1199 ++ * thread's runtime, it might be the case that a thread
1200 ++ * goes to sleep in a rt mutex with negative runtime. As
1201 ++ * a consequence, the thread will be throttled.
1202 ++ *
1203 ++ * While waiting for the mutex, this thread can also be
1204 ++ * boosted via PI, resulting in a thread that is throttled
1205 ++ * and boosted at the same time.
1206 ++ *
1207 ++ * In this case, the boost overrides the throttle.
1208 ++ */
1209 ++ if (p->dl.dl_throttled) {
1210 ++ /*
1211 ++ * The replenish timer needs to be canceled. No
1212 ++ * problem if it fires concurrently: boosted threads
1213 ++ * are ignored in dl_task_timer().
1214 ++ */
1215 ++ hrtimer_try_to_cancel(&p->dl.dl_timer);
1216 ++ p->dl.dl_throttled = 0;
1217 ++ }
1218 + } else if (!dl_prio(p->normal_prio)) {
1219 + /*
1220 +- * Special case in which we have a !SCHED_DEADLINE task
1221 +- * that is going to be deboosted, but exceeds its
1222 +- * runtime while doing so. No point in replenishing
1223 +- * it, as it's going to return back to its original
1224 +- * scheduling class after this.
1225 ++ * Special case in which we have a !SCHED_DEADLINE task that is going
1226 ++ * to be deboosted, but exceeds its runtime while doing so. No point in
1227 ++ * replenishing it, as it's going to return back to its original
1228 ++ * scheduling class after this. If it has been throttled, we need to
1229 ++ * clear the flag, otherwise the task may wake up as throttled after
1230 ++ * being boosted again with no means to replenish the runtime and clear
1231 ++ * the throttle.
1232 + */
1233 +- BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1234 ++ p->dl.dl_throttled = 0;
1235 ++ BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1236 + return;
1237 + }
1238 +
1239 +@@ -1529,7 +1559,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1240 + return;
1241 + }
1242 +
1243 +- enqueue_dl_entity(&p->dl, pi_se, flags);
1244 ++ enqueue_dl_entity(&p->dl, flags);
1245 +
1246 + if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1247 + enqueue_pushable_dl_task(rq, p);
1248 +@@ -2691,11 +2721,14 @@ void __dl_clear_params(struct task_struct *p)
1249 + dl_se->dl_bw = 0;
1250 + dl_se->dl_density = 0;
1251 +
1252 +- dl_se->dl_boosted = 0;
1253 + dl_se->dl_throttled = 0;
1254 + dl_se->dl_yielded = 0;
1255 + dl_se->dl_non_contending = 0;
1256 + dl_se->dl_overrun = 0;
1257 ++
1258 ++#ifdef CONFIG_RT_MUTEXES
1259 ++ dl_se->pi_se = dl_se;
1260 ++#endif
1261 + }
1262 +
1263 + bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
1264 +diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
1265 +index df556175be506..acd7e12217743 100644
1266 +--- a/kernel/sys_ni.c
1267 ++++ b/kernel/sys_ni.c
1268 +@@ -257,6 +257,7 @@ COND_SYSCALL_COMPAT(keyctl);
1269 +
1270 + /* mm/fadvise.c */
1271 + COND_SYSCALL(fadvise64_64);
1272 ++COND_SYSCALL_COMPAT(fadvise64_64);
1273 +
1274 + /* mm/, CONFIG_MMU only */
1275 + COND_SYSCALL(swapon);
1276 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1277 +index 998d141488a95..d2272fff2f591 100644
1278 +--- a/kernel/trace/ftrace.c
1279 ++++ b/kernel/trace/ftrace.c
1280 +@@ -2748,6 +2748,16 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
1281 +
1282 + ftrace_startup_enable(command);
1283 +
1284 ++ /*
1285 ++ * If ftrace is in an undefined state, we just remove ops from list
1286 ++ * to prevent the NULL pointer, instead of totally rolling it back and
1287 ++ * free trampoline, because those actions could cause further damage.
1288 ++ */
1289 ++ if (unlikely(ftrace_disabled)) {
1290 ++ __unregister_ftrace_function(ops);
1291 ++ return -ENODEV;
1292 ++ }
1293 ++
1294 + ops->flags &= ~FTRACE_OPS_FL_ADDING;
1295 +
1296 + return 0;
1297 +diff --git a/lib/ratelimit.c b/lib/ratelimit.c
1298 +index d01f471352390..b805702de84dd 100644
1299 +--- a/lib/ratelimit.c
1300 ++++ b/lib/ratelimit.c
1301 +@@ -27,10 +27,16 @@
1302 + */
1303 + int ___ratelimit(struct ratelimit_state *rs, const char *func)
1304 + {
1305 ++ /* Paired with WRITE_ONCE() in .proc_handler().
1306 ++ * Changing two values seperately could be inconsistent
1307 ++ * and some message could be lost. (See: net_ratelimit_state).
1308 ++ */
1309 ++ int interval = READ_ONCE(rs->interval);
1310 ++ int burst = READ_ONCE(rs->burst);
1311 + unsigned long flags;
1312 + int ret;
1313 +
1314 +- if (!rs->interval)
1315 ++ if (!interval)
1316 + return 1;
1317 +
1318 + /*
1319 +@@ -45,7 +51,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
1320 + if (!rs->begin)
1321 + rs->begin = jiffies;
1322 +
1323 +- if (time_is_before_jiffies(rs->begin + rs->interval)) {
1324 ++ if (time_is_before_jiffies(rs->begin + interval)) {
1325 + if (rs->missed) {
1326 + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
1327 + printk_deferred(KERN_WARNING
1328 +@@ -57,7 +63,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
1329 + rs->begin = jiffies;
1330 + rs->printed = 0;
1331 + }
1332 +- if (rs->burst && rs->burst > rs->printed) {
1333 ++ if (burst && burst > rs->printed) {
1334 + rs->printed++;
1335 + ret = 1;
1336 + } else {
1337 +diff --git a/mm/mmap.c b/mm/mmap.c
1338 +index 590840c3a3b5f..5ee3c91450de1 100644
1339 +--- a/mm/mmap.c
1340 ++++ b/mm/mmap.c
1341 +@@ -1640,8 +1640,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1342 + pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
1343 + return 0;
1344 +
1345 +- /* Do we need to track softdirty? */
1346 +- if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1347 ++ /*
1348 ++ * Do we need to track softdirty? hugetlb does not support softdirty
1349 ++ * tracking yet.
1350 ++ */
1351 ++ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
1352 ++ !is_vm_hugetlb_page(vma))
1353 + return 1;
1354 +
1355 + /* Specialty mapping? */
1356 +@@ -2568,6 +2572,18 @@ static void unmap_region(struct mm_struct *mm,
1357 + tlb_gather_mmu(&tlb, mm, start, end);
1358 + update_hiwater_rss(mm);
1359 + unmap_vmas(&tlb, vma, start, end);
1360 ++
1361 ++ /*
1362 ++ * Ensure we have no stale TLB entries by the time this mapping is
1363 ++ * removed from the rmap.
1364 ++ * Note that we don't have to worry about nested flushes here because
1365 ++ * we're holding the mm semaphore for removing the mapping - so any
1366 ++ * concurrent flush in this region has to be coming through the rmap,
1367 ++ * and we synchronize against that using the rmap lock.
1368 ++ */
1369 ++ if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
1370 ++ tlb_flush_mmu(&tlb);
1371 ++
1372 + free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
1373 + next ? next->vm_start : USER_PGTABLES_CEILING);
1374 + tlb_finish_mmu(&tlb, start, end);
1375 +diff --git a/mm/rmap.c b/mm/rmap.c
1376 +index e578eb942317b..3c2a439381529 100644
1377 +--- a/mm/rmap.c
1378 ++++ b/mm/rmap.c
1379 +@@ -82,7 +82,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
1380 + anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
1381 + if (anon_vma) {
1382 + atomic_set(&anon_vma->refcount, 1);
1383 +- anon_vma->degree = 1; /* Reference for first vma */
1384 ++ anon_vma->num_children = 0;
1385 ++ anon_vma->num_active_vmas = 0;
1386 + anon_vma->parent = anon_vma;
1387 + /*
1388 + * Initialise the anon_vma root to point to itself. If called
1389 +@@ -190,6 +191,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
1390 + anon_vma = anon_vma_alloc();
1391 + if (unlikely(!anon_vma))
1392 + goto out_enomem_free_avc;
1393 ++ anon_vma->num_children++; /* self-parent link for new root */
1394 + allocated = anon_vma;
1395 + }
1396 +
1397 +@@ -199,8 +201,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
1398 + if (likely(!vma->anon_vma)) {
1399 + vma->anon_vma = anon_vma;
1400 + anon_vma_chain_link(vma, avc, anon_vma);
1401 +- /* vma reference or self-parent link for new root */
1402 +- anon_vma->degree++;
1403 ++ anon_vma->num_active_vmas++;
1404 + allocated = NULL;
1405 + avc = NULL;
1406 + }
1407 +@@ -279,19 +280,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
1408 + anon_vma_chain_link(dst, avc, anon_vma);
1409 +
1410 + /*
1411 +- * Reuse existing anon_vma if its degree lower than two,
1412 +- * that means it has no vma and only one anon_vma child.
1413 ++ * Reuse existing anon_vma if it has no vma and only one
1414 ++ * anon_vma child.
1415 + *
1416 +- * Do not chose parent anon_vma, otherwise first child
1417 +- * will always reuse it. Root anon_vma is never reused:
1418 ++ * Root anon_vma is never reused:
1419 + * it has self-parent reference and at least one child.
1420 + */
1421 +- if (!dst->anon_vma && anon_vma != src->anon_vma &&
1422 +- anon_vma->degree < 2)
1423 ++ if (!dst->anon_vma &&
1424 ++ anon_vma->num_children < 2 &&
1425 ++ anon_vma->num_active_vmas == 0)
1426 + dst->anon_vma = anon_vma;
1427 + }
1428 + if (dst->anon_vma)
1429 +- dst->anon_vma->degree++;
1430 ++ dst->anon_vma->num_active_vmas++;
1431 + unlock_anon_vma_root(root);
1432 + return 0;
1433 +
1434 +@@ -341,6 +342,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1435 + anon_vma = anon_vma_alloc();
1436 + if (!anon_vma)
1437 + goto out_error;
1438 ++ anon_vma->num_active_vmas++;
1439 + avc = anon_vma_chain_alloc(GFP_KERNEL);
1440 + if (!avc)
1441 + goto out_error_free_anon_vma;
1442 +@@ -361,7 +363,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
1443 + vma->anon_vma = anon_vma;
1444 + anon_vma_lock_write(anon_vma);
1445 + anon_vma_chain_link(vma, avc, anon_vma);
1446 +- anon_vma->parent->degree++;
1447 ++ anon_vma->parent->num_children++;
1448 + anon_vma_unlock_write(anon_vma);
1449 +
1450 + return 0;
1451 +@@ -393,7 +395,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
1452 + * to free them outside the lock.
1453 + */
1454 + if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
1455 +- anon_vma->parent->degree--;
1456 ++ anon_vma->parent->num_children--;
1457 + continue;
1458 + }
1459 +
1460 +@@ -401,7 +403,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
1461 + anon_vma_chain_free(avc);
1462 + }
1463 + if (vma->anon_vma)
1464 +- vma->anon_vma->degree--;
1465 ++ vma->anon_vma->num_active_vmas--;
1466 + unlock_anon_vma_root(root);
1467 +
1468 + /*
1469 +@@ -412,7 +414,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
1470 + list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
1471 + struct anon_vma *anon_vma = avc->anon_vma;
1472 +
1473 +- VM_WARN_ON(anon_vma->degree);
1474 ++ VM_WARN_ON(anon_vma->num_children);
1475 ++ VM_WARN_ON(anon_vma->num_active_vmas);
1476 + put_anon_vma(anon_vma);
1477 +
1478 + list_del(&avc->same_vma);
1479 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
1480 +index 0dfc47adccb17..65d20bdff0238 100644
1481 +--- a/net/bluetooth/l2cap_core.c
1482 ++++ b/net/bluetooth/l2cap_core.c
1483 +@@ -1826,11 +1826,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1484 + src_match = !bacmp(&c->src, src);
1485 + dst_match = !bacmp(&c->dst, dst);
1486 + if (src_match && dst_match) {
1487 +- c = l2cap_chan_hold_unless_zero(c);
1488 +- if (c) {
1489 +- read_unlock(&chan_list_lock);
1490 +- return c;
1491 +- }
1492 ++ if (!l2cap_chan_hold_unless_zero(c))
1493 ++ continue;
1494 ++
1495 ++ read_unlock(&chan_list_lock);
1496 ++ return c;
1497 + }
1498 +
1499 + /* Closest match */
1500 +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
1501 +index 276b60262981c..b21c8a317be73 100644
1502 +--- a/net/bridge/netfilter/ebtable_broute.c
1503 ++++ b/net/bridge/netfilter/ebtable_broute.c
1504 +@@ -33,18 +33,10 @@ static struct ebt_replace_kernel initial_table = {
1505 + .entries = (char *)&initial_chain,
1506 + };
1507 +
1508 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
1509 +-{
1510 +- if (valid_hooks & ~(1 << NF_BR_BROUTING))
1511 +- return -EINVAL;
1512 +- return 0;
1513 +-}
1514 +-
1515 + static const struct ebt_table broute_table = {
1516 + .name = "broute",
1517 + .table = &initial_table,
1518 + .valid_hooks = 1 << NF_BR_BROUTING,
1519 +- .check = check,
1520 + .me = THIS_MODULE,
1521 + };
1522 +
1523 +diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
1524 +index 550324c516ee3..c71795e4c18cf 100644
1525 +--- a/net/bridge/netfilter/ebtable_filter.c
1526 ++++ b/net/bridge/netfilter/ebtable_filter.c
1527 +@@ -42,18 +42,10 @@ static struct ebt_replace_kernel initial_table = {
1528 + .entries = (char *)initial_chains,
1529 + };
1530 +
1531 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
1532 +-{
1533 +- if (valid_hooks & ~FILTER_VALID_HOOKS)
1534 +- return -EINVAL;
1535 +- return 0;
1536 +-}
1537 +-
1538 + static const struct ebt_table frame_filter = {
1539 + .name = "filter",
1540 + .table = &initial_table,
1541 + .valid_hooks = FILTER_VALID_HOOKS,
1542 +- .check = check,
1543 + .me = THIS_MODULE,
1544 + };
1545 +
1546 +diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
1547 +index c0fb3ca518af8..44dde9e635e24 100644
1548 +--- a/net/bridge/netfilter/ebtable_nat.c
1549 ++++ b/net/bridge/netfilter/ebtable_nat.c
1550 +@@ -42,18 +42,10 @@ static struct ebt_replace_kernel initial_table = {
1551 + .entries = (char *)initial_chains,
1552 + };
1553 +
1554 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
1555 +-{
1556 +- if (valid_hooks & ~NAT_VALID_HOOKS)
1557 +- return -EINVAL;
1558 +- return 0;
1559 +-}
1560 +-
1561 + static const struct ebt_table frame_nat = {
1562 + .name = "nat",
1563 + .table = &initial_table,
1564 + .valid_hooks = NAT_VALID_HOOKS,
1565 +- .check = check,
1566 + .me = THIS_MODULE,
1567 + };
1568 +
1569 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1570 +index f59230e4fc295..ea27bacbd0057 100644
1571 +--- a/net/bridge/netfilter/ebtables.c
1572 ++++ b/net/bridge/netfilter/ebtables.c
1573 +@@ -1003,8 +1003,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
1574 + goto free_iterate;
1575 + }
1576 +
1577 +- /* the table doesn't like it */
1578 +- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1579 ++ if (repl->valid_hooks != t->valid_hooks)
1580 + goto free_unlock;
1581 +
1582 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
1583 +@@ -1197,11 +1196,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
1584 + if (ret != 0)
1585 + goto free_chainstack;
1586 +
1587 +- if (table->check && table->check(newinfo, table->valid_hooks)) {
1588 +- ret = -EINVAL;
1589 +- goto free_chainstack;
1590 +- }
1591 +-
1592 + table->private = newinfo;
1593 + rwlock_init(&table->lock);
1594 + mutex_lock(&ebt_mutex);
1595 +diff --git a/net/core/dev.c b/net/core/dev.c
1596 +index 42f6ff8b9703c..880b096eef8a6 100644
1597 +--- a/net/core/dev.c
1598 ++++ b/net/core/dev.c
1599 +@@ -4474,7 +4474,7 @@ static int netif_rx_internal(struct sk_buff *skb)
1600 + {
1601 + int ret;
1602 +
1603 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
1604 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
1605 +
1606 + trace_netif_rx(skb);
1607 +
1608 +@@ -4794,7 +4794,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
1609 + int ret = NET_RX_DROP;
1610 + __be16 type;
1611 +
1612 +- net_timestamp_check(!netdev_tstamp_prequeue, skb);
1613 ++ net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
1614 +
1615 + trace_netif_receive_skb(skb);
1616 +
1617 +@@ -5146,7 +5146,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
1618 + {
1619 + int ret;
1620 +
1621 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
1622 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
1623 +
1624 + if (skb_defer_rx_timestamp(skb))
1625 + return NET_RX_SUCCESS;
1626 +@@ -5176,7 +5176,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
1627 +
1628 + INIT_LIST_HEAD(&sublist);
1629 + list_for_each_entry_safe(skb, next, head, list) {
1630 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
1631 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
1632 + skb_list_del_init(skb);
1633 + if (!skb_defer_rx_timestamp(skb))
1634 + list_add_tail(&skb->list, &sublist);
1635 +@@ -5851,7 +5851,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
1636 + net_rps_action_and_irq_enable(sd);
1637 + }
1638 +
1639 +- napi->weight = dev_rx_weight;
1640 ++ napi->weight = READ_ONCE(dev_rx_weight);
1641 + while (again) {
1642 + struct sk_buff *skb;
1643 +
1644 +@@ -6335,8 +6335,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
1645 + {
1646 + struct softnet_data *sd = this_cpu_ptr(&softnet_data);
1647 + unsigned long time_limit = jiffies +
1648 +- usecs_to_jiffies(netdev_budget_usecs);
1649 +- int budget = netdev_budget;
1650 ++ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
1651 ++ int budget = READ_ONCE(netdev_budget);
1652 + LIST_HEAD(list);
1653 + LIST_HEAD(repoll);
1654 +
1655 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1656 +index 6233e9856016e..73042407eb5b4 100644
1657 +--- a/net/core/neighbour.c
1658 ++++ b/net/core/neighbour.c
1659 +@@ -224,11 +224,26 @@ static int neigh_del_timer(struct neighbour *n)
1660 + return 0;
1661 + }
1662 +
1663 +-static void pneigh_queue_purge(struct sk_buff_head *list)
1664 ++static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
1665 + {
1666 ++ struct sk_buff_head tmp;
1667 ++ unsigned long flags;
1668 + struct sk_buff *skb;
1669 +
1670 +- while ((skb = skb_dequeue(list)) != NULL) {
1671 ++ skb_queue_head_init(&tmp);
1672 ++ spin_lock_irqsave(&list->lock, flags);
1673 ++ skb = skb_peek(list);
1674 ++ while (skb != NULL) {
1675 ++ struct sk_buff *skb_next = skb_peek_next(skb, list);
1676 ++ if (net == NULL || net_eq(dev_net(skb->dev), net)) {
1677 ++ __skb_unlink(skb, list);
1678 ++ __skb_queue_tail(&tmp, skb);
1679 ++ }
1680 ++ skb = skb_next;
1681 ++ }
1682 ++ spin_unlock_irqrestore(&list->lock, flags);
1683 ++
1684 ++ while ((skb = __skb_dequeue(&tmp))) {
1685 + dev_put(skb->dev);
1686 + kfree_skb(skb);
1687 + }
1688 +@@ -297,9 +312,9 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
1689 + write_lock_bh(&tbl->lock);
1690 + neigh_flush_dev(tbl, dev);
1691 + pneigh_ifdown_and_unlock(tbl, dev);
1692 +-
1693 +- del_timer_sync(&tbl->proxy_timer);
1694 +- pneigh_queue_purge(&tbl->proxy_queue);
1695 ++ pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
1696 ++ if (skb_queue_empty_lockless(&tbl->proxy_queue))
1697 ++ del_timer_sync(&tbl->proxy_timer);
1698 + return 0;
1699 + }
1700 + EXPORT_SYMBOL(neigh_ifdown);
1701 +@@ -1614,7 +1629,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
1702 + /* It is not clean... Fix it to unload IPv6 module safely */
1703 + cancel_delayed_work_sync(&tbl->gc_work);
1704 + del_timer_sync(&tbl->proxy_timer);
1705 +- pneigh_queue_purge(&tbl->proxy_queue);
1706 ++ pneigh_queue_purge(&tbl->proxy_queue, NULL);
1707 + neigh_ifdown(tbl, NULL);
1708 + if (atomic_read(&tbl->entries))
1709 + pr_crit("neighbour leakage\n");
1710 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1711 +index c623c129d0ab6..e0be1f8651bbe 100644
1712 +--- a/net/core/skbuff.c
1713 ++++ b/net/core/skbuff.c
1714 +@@ -4377,7 +4377,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
1715 + {
1716 + bool ret;
1717 +
1718 +- if (likely(sysctl_tstamp_allow_data || tsonly))
1719 ++ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
1720 + return true;
1721 +
1722 + read_lock_bh(&sk->sk_callback_lock);
1723 +diff --git a/net/core/sock.c b/net/core/sock.c
1724 +index 79f085df52cef..cd23a8e4556ca 100644
1725 +--- a/net/core/sock.c
1726 ++++ b/net/core/sock.c
1727 +@@ -2856,7 +2856,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1728 +
1729 + #ifdef CONFIG_NET_RX_BUSY_POLL
1730 + sk->sk_napi_id = 0;
1731 +- sk->sk_ll_usec = sysctl_net_busy_read;
1732 ++ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
1733 + #endif
1734 +
1735 + sk->sk_max_pacing_rate = ~0U;
1736 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
1737 +index 0a0bf80623658..d7e39167ceca0 100644
1738 +--- a/net/core/sysctl_net_core.c
1739 ++++ b/net/core/sysctl_net_core.c
1740 +@@ -231,14 +231,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
1741 + static int proc_do_dev_weight(struct ctl_table *table, int write,
1742 + void __user *buffer, size_t *lenp, loff_t *ppos)
1743 + {
1744 +- int ret;
1745 ++ static DEFINE_MUTEX(dev_weight_mutex);
1746 ++ int ret, weight;
1747 +
1748 ++ mutex_lock(&dev_weight_mutex);
1749 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
1750 +- if (ret != 0)
1751 +- return ret;
1752 +-
1753 +- dev_rx_weight = weight_p * dev_weight_rx_bias;
1754 +- dev_tx_weight = weight_p * dev_weight_tx_bias;
1755 ++ if (!ret && write) {
1756 ++ weight = READ_ONCE(weight_p);
1757 ++ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
1758 ++ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
1759 ++ }
1760 ++ mutex_unlock(&dev_weight_mutex);
1761 +
1762 + return ret;
1763 + }
1764 +diff --git a/net/key/af_key.c b/net/key/af_key.c
1765 +index af67e0d265c05..337c6bc8211ed 100644
1766 +--- a/net/key/af_key.c
1767 ++++ b/net/key/af_key.c
1768 +@@ -1707,9 +1707,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
1769 + pfk->registered |= (1<<hdr->sadb_msg_satype);
1770 + }
1771 +
1772 ++ mutex_lock(&pfkey_mutex);
1773 + xfrm_probe_algs();
1774 +
1775 + supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
1776 ++ mutex_unlock(&pfkey_mutex);
1777 ++
1778 + if (!supp_skb) {
1779 + if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
1780 + pfk->registered &= ~(1<<hdr->sadb_msg_satype);
1781 +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
1782 +index 56cddadb65d0c..92e0514f624fa 100644
1783 +--- a/net/netfilter/Kconfig
1784 ++++ b/net/netfilter/Kconfig
1785 +@@ -117,7 +117,6 @@ config NF_CONNTRACK_ZONES
1786 +
1787 + config NF_CONNTRACK_PROCFS
1788 + bool "Supply CT list in procfs (OBSOLETE)"
1789 +- default y
1790 + depends on PROC_FS
1791 + ---help---
1792 + This option enables for the list of known conntrack entries
1793 +diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
1794 +index e259454b6a643..4fac2d9a4b885 100644
1795 +--- a/net/netfilter/nft_osf.c
1796 ++++ b/net/netfilter/nft_osf.c
1797 +@@ -81,9 +81,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
1798 + const struct nft_expr *expr,
1799 + const struct nft_data **data)
1800 + {
1801 +- return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
1802 +- (1 << NF_INET_PRE_ROUTING) |
1803 +- (1 << NF_INET_FORWARD));
1804 ++ unsigned int hooks;
1805 ++
1806 ++ switch (ctx->family) {
1807 ++ case NFPROTO_IPV4:
1808 ++ case NFPROTO_IPV6:
1809 ++ case NFPROTO_INET:
1810 ++ hooks = (1 << NF_INET_LOCAL_IN) |
1811 ++ (1 << NF_INET_PRE_ROUTING) |
1812 ++ (1 << NF_INET_FORWARD);
1813 ++ break;
1814 ++ default:
1815 ++ return -EOPNOTSUPP;
1816 ++ }
1817 ++
1818 ++ return nft_chain_validate_hooks(ctx->chain, hooks);
1819 + }
1820 +
1821 + static struct nft_expr_type nft_osf_type;
1822 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
1823 +index fd87216bc0a99..5732b32ab9320 100644
1824 +--- a/net/netfilter/nft_payload.c
1825 ++++ b/net/netfilter/nft_payload.c
1826 +@@ -332,6 +332,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
1827 + const struct nlattr * const tb[])
1828 + {
1829 + struct nft_payload_set *priv = nft_expr_priv(expr);
1830 ++ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
1831 ++ int err;
1832 +
1833 + priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
1834 + priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
1835 +@@ -339,11 +341,15 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
1836 + priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
1837 +
1838 + if (tb[NFTA_PAYLOAD_CSUM_TYPE])
1839 +- priv->csum_type =
1840 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
1841 +- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
1842 +- priv->csum_offset =
1843 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
1844 ++ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
1845 ++ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
1846 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
1847 ++ &csum_offset);
1848 ++ if (err < 0)
1849 ++ return err;
1850 ++
1851 ++ priv->csum_offset = csum_offset;
1852 ++ }
1853 + if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
1854 + u32 flags;
1855 +
1856 +@@ -354,13 +360,14 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
1857 + priv->csum_flags = flags;
1858 + }
1859 +
1860 +- switch (priv->csum_type) {
1861 ++ switch (csum_type) {
1862 + case NFT_PAYLOAD_CSUM_NONE:
1863 + case NFT_PAYLOAD_CSUM_INET:
1864 + break;
1865 + default:
1866 + return -EOPNOTSUPP;
1867 + }
1868 ++ priv->csum_type = csum_type;
1869 +
1870 + return nft_validate_register_load(priv->sreg, priv->len);
1871 + }
1872 +@@ -398,6 +405,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
1873 + {
1874 + enum nft_payload_bases base;
1875 + unsigned int offset, len;
1876 ++ int err;
1877 +
1878 + if (tb[NFTA_PAYLOAD_BASE] == NULL ||
1879 + tb[NFTA_PAYLOAD_OFFSET] == NULL ||
1880 +@@ -423,8 +431,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
1881 + if (tb[NFTA_PAYLOAD_DREG] == NULL)
1882 + return ERR_PTR(-EINVAL);
1883 +
1884 +- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
1885 +- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
1886 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
1887 ++ if (err < 0)
1888 ++ return ERR_PTR(err);
1889 ++
1890 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
1891 ++ if (err < 0)
1892 ++ return ERR_PTR(err);
1893 +
1894 + if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
1895 + base != NFT_PAYLOAD_LL_HEADER)
1896 +diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
1897 +index 8ae948fd9dcfc..3fc55c81f16ac 100644
1898 +--- a/net/netfilter/nft_tunnel.c
1899 ++++ b/net/netfilter/nft_tunnel.c
1900 +@@ -104,6 +104,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
1901 +
1902 + static struct nft_expr_type nft_tunnel_type __read_mostly = {
1903 + .name = "tunnel",
1904 ++ .family = NFPROTO_NETDEV,
1905 + .ops = &nft_tunnel_get_ops,
1906 + .policy = nft_tunnel_policy,
1907 + .maxattr = NFTA_TUNNEL_MAX,
1908 +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
1909 +index c318e5c9f6df3..56eea298b8ef7 100644
1910 +--- a/net/rose/rose_loopback.c
1911 ++++ b/net/rose/rose_loopback.c
1912 +@@ -99,7 +99,8 @@ static void rose_loopback_timer(struct timer_list *unused)
1913 + }
1914 +
1915 + if (frametype == ROSE_CALL_REQUEST) {
1916 +- if (!rose_loopback_neigh->dev) {
1917 ++ if (!rose_loopback_neigh->dev &&
1918 ++ !rose_loopback_neigh->loopback) {
1919 + kfree_skb(skb);
1920 + continue;
1921 + }
1922 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1923 +index cad2586c34734..c966dacf1130b 100644
1924 +--- a/net/sched/sch_generic.c
1925 ++++ b/net/sched/sch_generic.c
1926 +@@ -397,7 +397,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
1927 +
1928 + void __qdisc_run(struct Qdisc *q)
1929 + {
1930 +- int quota = dev_tx_weight;
1931 ++ int quota = READ_ONCE(dev_tx_weight);
1932 + int packets;
1933 +
1934 + while (qdisc_restart(q, &packets)) {
1935 +diff --git a/net/socket.c b/net/socket.c
1936 +index e5cc9f2b981ed..a5167f03c31db 100644
1937 +--- a/net/socket.c
1938 ++++ b/net/socket.c
1939 +@@ -1619,7 +1619,7 @@ int __sys_listen(int fd, int backlog)
1940 +
1941 + sock = sockfd_lookup_light(fd, &err, &fput_needed);
1942 + if (sock) {
1943 +- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
1944 ++ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
1945 + if ((unsigned int)backlog > somaxconn)
1946 + backlog = somaxconn;
1947 +
1948 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
1949 +index 3582f77bab6a8..1cd21a8c4deac 100644
1950 +--- a/net/xfrm/xfrm_policy.c
1951 ++++ b/net/xfrm/xfrm_policy.c
1952 +@@ -2403,6 +2403,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1953 + if (pols[1]) {
1954 + if (IS_ERR(pols[1])) {
1955 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1956 ++ xfrm_pol_put(pols[0]);
1957 + return 0;
1958 + }
1959 + pols[1]->curlft.use_time = ktime_get_real_seconds();
1960 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
1961 +index 51884c7b80697..4eac2ecb35fb9 100644
1962 +--- a/scripts/Makefile.modpost
1963 ++++ b/scripts/Makefile.modpost
1964 +@@ -51,8 +51,7 @@ obj := $(KBUILD_EXTMOD)
1965 + src := $(obj)
1966 +
1967 + # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
1968 +-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
1969 +- $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
1970 ++include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
1971 + endif
1972 +
1973 + include scripts/Makefile.lib
1974 +diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
1975 +index 3c789d03b629d..0ae7a74154146 100644
1976 +--- a/tools/testing/selftests/bpf/test_align.c
1977 ++++ b/tools/testing/selftests/bpf/test_align.c
1978 +@@ -359,15 +359,15 @@ static struct bpf_align_test tests[] = {
1979 + * is still (4n), fixed offset is not changed.
1980 + * Also, we create a new reg->id.
1981 + */
1982 +- {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
1983 ++ {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
1984 + /* At the time the word size load is performed from R5,
1985 + * its total fixed offset is NET_IP_ALIGN + reg->off (18)
1986 + * which is 20. Then the variable offset is (4n), so
1987 + * the total offset is 4-byte aligned and meets the
1988 + * load's requirements.
1989 + */
1990 +- {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
1991 +- {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
1992 ++ {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
1993 ++ {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
1994 + },
1995 + },
1996 + {
1997 +@@ -410,15 +410,15 @@ static struct bpf_align_test tests[] = {
1998 + /* Adding 14 makes R6 be (4n+2) */
1999 + {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
2000 + /* Packet pointer has (4n+2) offset */
2001 +- {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
2002 +- {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
2003 ++ {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
2004 ++ {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
2005 + /* At the time the word size load is performed from R5,
2006 + * its total fixed offset is NET_IP_ALIGN + reg->off (0)
2007 + * which is 2. Then the variable offset is (4n+2), so
2008 + * the total offset is 4-byte aligned and meets the
2009 + * load's requirements.
2010 + */
2011 +- {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
2012 ++ {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
2013 + /* Newly read value in R6 was shifted left by 2, so has
2014 + * known alignment of 4.
2015 + */
2016 +@@ -426,15 +426,15 @@ static struct bpf_align_test tests[] = {
2017 + /* Added (4n) to packet pointer's (4n+2) var_off, giving
2018 + * another (4n+2).
2019 + */
2020 +- {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
2021 +- {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
2022 ++ {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
2023 ++ {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
2024 + /* At the time the word size load is performed from R5,
2025 + * its total fixed offset is NET_IP_ALIGN + reg->off (0)
2026 + * which is 2. Then the variable offset is (4n+2), so
2027 + * the total offset is 4-byte aligned and meets the
2028 + * load's requirements.
2029 + */
2030 +- {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
2031 ++ {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
2032 + },
2033 + },
2034 + {
2035 +@@ -469,11 +469,11 @@ static struct bpf_align_test tests[] = {
2036 + .matches = {
2037 + {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
2038 + /* (ptr - ptr) << 2 == unknown, (4n) */
2039 +- {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
2040 ++ {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
2041 + /* (4n) + 14 == (4n+2). We blow our bounds, because
2042 + * the add could overflow.
2043 + */
2044 +- {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
2045 ++ {7, "R5=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
2046 + /* Checked s>=0 */
2047 + {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2048 + /* packet pointer + nonnegative (4n+2) */
2049 +@@ -528,7 +528,7 @@ static struct bpf_align_test tests[] = {
2050 + /* New unknown value in R7 is (4n) */
2051 + {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
2052 + /* Subtracting it from R6 blows our unsigned bounds */
2053 +- {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
2054 ++ {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
2055 + /* Checked s>= 0 */
2056 + {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
2057 + /* At the time the word size load is performed from R5,
2058 +@@ -537,7 +537,8 @@ static struct bpf_align_test tests[] = {
2059 + * the total offset is 4-byte aligned and meets the
2060 + * load's requirements.
2061 + */
2062 +- {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
2063 ++ {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
2064 ++
2065 + },
2066 + },
2067 + {
2068 +diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
2069 +index 858e551432339..9a103bd3542cf 100644
2070 +--- a/tools/testing/selftests/bpf/test_verifier.c
2071 ++++ b/tools/testing/selftests/bpf/test_verifier.c
2072 +@@ -9108,10 +9108,10 @@ static struct bpf_test tests[] = {
2073 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2074 + offsetof(struct xdp_md, data_end)),
2075 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2076 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2077 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2078 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
2079 + BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2080 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2081 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2082 + BPF_MOV64_IMM(BPF_REG_0, 0),
2083 + BPF_EXIT_INSN(),
2084 + },
2085 +@@ -9166,10 +9166,10 @@ static struct bpf_test tests[] = {
2086 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2087 + offsetof(struct xdp_md, data_end)),
2088 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2089 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2090 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2091 + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
2092 + BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2093 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2094 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2095 + BPF_MOV64_IMM(BPF_REG_0, 0),
2096 + BPF_EXIT_INSN(),
2097 + },
2098 +@@ -9279,9 +9279,9 @@ static struct bpf_test tests[] = {
2099 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2100 + offsetof(struct xdp_md, data_end)),
2101 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2102 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2103 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2104 + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
2105 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2106 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2107 + BPF_MOV64_IMM(BPF_REG_0, 0),
2108 + BPF_EXIT_INSN(),
2109 + },
2110 +@@ -9451,9 +9451,9 @@ static struct bpf_test tests[] = {
2111 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2112 + offsetof(struct xdp_md, data_end)),
2113 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2114 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2115 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2116 + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
2117 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2118 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2119 + BPF_MOV64_IMM(BPF_REG_0, 0),
2120 + BPF_EXIT_INSN(),
2121 + },
2122 +@@ -9564,10 +9564,10 @@ static struct bpf_test tests[] = {
2123 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2124 + offsetof(struct xdp_md, data)),
2125 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2126 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2127 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2128 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1),
2129 + BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2130 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2131 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2132 + BPF_MOV64_IMM(BPF_REG_0, 0),
2133 + BPF_EXIT_INSN(),
2134 + },
2135 +@@ -9622,10 +9622,10 @@ static struct bpf_test tests[] = {
2136 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2137 + offsetof(struct xdp_md, data)),
2138 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2139 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2140 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2141 + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1),
2142 + BPF_JMP_IMM(BPF_JA, 0, 0, 1),
2143 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2144 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2145 + BPF_MOV64_IMM(BPF_REG_0, 0),
2146 + BPF_EXIT_INSN(),
2147 + },
2148 +@@ -9735,9 +9735,9 @@ static struct bpf_test tests[] = {
2149 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2150 + offsetof(struct xdp_md, data)),
2151 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2152 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2153 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2154 + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1),
2155 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2156 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2157 + BPF_MOV64_IMM(BPF_REG_0, 0),
2158 + BPF_EXIT_INSN(),
2159 + },
2160 +@@ -9907,9 +9907,9 @@ static struct bpf_test tests[] = {
2161 + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2162 + offsetof(struct xdp_md, data)),
2163 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
2164 +- BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
2165 ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
2166 + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1),
2167 +- BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8),
2168 ++ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6),
2169 + BPF_MOV64_IMM(BPF_REG_0, 0),
2170 + BPF_EXIT_INSN(),
2171 + },