Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.4 commit in: /
Date: Mon, 05 Sep 2022 12:05:02
Message-Id: 1662379485.7f3e1b14d218c20ae5f0bc29e2b4629635b8b88d.mpagano@gentoo
1 commit: 7f3e1b14d218c20ae5f0bc29e2b4629635b8b88d
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Mon Sep 5 12:04:45 2022 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Mon Sep 5 12:04:45 2022 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=7f3e1b14
7
8 Linux patch 5.4.212
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1211_linux-5.4.212.patch | 2741 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 2745 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index bf698f0d..daf9aa4b 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -887,6 +887,10 @@ Patch: 1210_linux-5.4.211.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.4.211
23
24 +Patch: 1211_linux-5.4.212.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.4.212
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1211_linux-5.4.212.patch b/1211_linux-5.4.212.patch
33 new file mode 100644
34 index 00000000..ef15b11d
35 --- /dev/null
36 +++ b/1211_linux-5.4.212.patch
37 @@ -0,0 +1,2741 @@
38 +diff --git a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
39 +index 9393c50b5afc9..c98fd11907cc8 100644
40 +--- a/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
41 ++++ b/Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst
42 +@@ -230,6 +230,20 @@ The possible values in this file are:
43 + * - 'Mitigation: Clear CPU buffers'
44 + - The processor is vulnerable and the CPU buffer clearing mitigation is
45 + enabled.
46 ++ * - 'Unknown: No mitigations'
47 ++ - The processor vulnerability status is unknown because it is
48 ++ out of Servicing period. Mitigation is not attempted.
49 ++
50 ++Definitions:
51 ++------------
52 ++
53 ++Servicing period: The process of providing functional and security updates to
54 ++Intel processors or platforms, utilizing the Intel Platform Update (IPU)
55 ++process or other similar mechanisms.
56 ++
57 ++End of Servicing Updates (ESU): ESU is the date at which Intel will no
58 ++longer provide Servicing, such as through IPU or other similar update
59 ++processes. ESU dates will typically be aligned to end of quarter.
60 +
61 + If the processor is vulnerable then the following information is appended to
62 + the above information:
63 +diff --git a/Makefile b/Makefile
64 +index e54b9a1659b4f..cecfe23f521f1 100644
65 +--- a/Makefile
66 ++++ b/Makefile
67 +@@ -1,7 +1,7 @@
68 + # SPDX-License-Identifier: GPL-2.0
69 + VERSION = 5
70 + PATCHLEVEL = 4
71 +-SUBLEVEL = 211
72 ++SUBLEVEL = 212
73 + EXTRAVERSION =
74 + NAME = Kleptomaniac Octopus
75 +
76 +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
77 +index 286cec4d86d7b..cc6ed74960501 100644
78 +--- a/arch/parisc/kernel/unaligned.c
79 ++++ b/arch/parisc/kernel/unaligned.c
80 +@@ -107,7 +107,7 @@
81 + #define R1(i) (((i)>>21)&0x1f)
82 + #define R2(i) (((i)>>16)&0x1f)
83 + #define R3(i) ((i)&0x1f)
84 +-#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
85 ++#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
86 + #define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
87 + #define IM5_2(i) IM((i)>>16,5)
88 + #define IM5_3(i) IM((i),5)
89 +diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
90 +index f0bc4dc3e9bf0..6511d15ace45e 100644
91 +--- a/arch/s390/hypfs/hypfs_diag.c
92 ++++ b/arch/s390/hypfs/hypfs_diag.c
93 +@@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
94 + int rc;
95 +
96 + if (diag204_probe()) {
97 +- pr_err("The hardware system does not support hypfs\n");
98 ++ pr_info("The hardware system does not support hypfs\n");
99 + return -ENODATA;
100 + }
101 +
102 +diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
103 +index 70139d0791b61..ca4fc66a361fb 100644
104 +--- a/arch/s390/hypfs/inode.c
105 ++++ b/arch/s390/hypfs/inode.c
106 +@@ -501,9 +501,9 @@ fail_hypfs_sprp_exit:
107 + hypfs_vm_exit();
108 + fail_hypfs_diag_exit:
109 + hypfs_diag_exit();
110 ++ pr_err("Initialization of hypfs failed with rc=%i\n", rc);
111 + fail_dbfs_exit:
112 + hypfs_dbfs_exit();
113 +- pr_err("Initialization of hypfs failed with rc=%i\n", rc);
114 + return rc;
115 + }
116 + device_initcall(hypfs_init)
117 +diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
118 +index 4e6299e2ca947..fdd5f37ac1fb8 100644
119 +--- a/arch/s390/kernel/process.c
120 ++++ b/arch/s390/kernel/process.c
121 +@@ -76,6 +76,18 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
122 +
123 + memcpy(dst, src, arch_task_struct_size);
124 + dst->thread.fpu.regs = dst->thread.fpu.fprs;
125 ++
126 ++ /*
127 ++ * Don't transfer over the runtime instrumentation or the guarded
128 ++ * storage control block pointers. These fields are cleared here instead
129 ++ * of in copy_thread() to avoid premature freeing of associated memory
130 ++ * on fork() failure. Wait to clear the RI flag because ->stack still
131 ++ * refers to the source thread.
132 ++ */
133 ++ dst->thread.ri_cb = NULL;
134 ++ dst->thread.gs_cb = NULL;
135 ++ dst->thread.gs_bc_cb = NULL;
136 ++
137 + return 0;
138 + }
139 +
140 +@@ -133,13 +145,11 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
141 + frame->childregs.flags = 0;
142 + if (new_stackp)
143 + frame->childregs.gprs[15] = new_stackp;
144 +-
145 +- /* Don't copy runtime instrumentation info */
146 +- p->thread.ri_cb = NULL;
147 ++ /*
148 ++ * Clear the runtime instrumentation flag after the above childregs
149 ++ * copy. The CB pointer was already cleared in arch_dup_task_struct().
150 ++ */
151 + frame->childregs.psw.mask &= ~PSW_MASK_RI;
152 +- /* Don't copy guarded storage control block */
153 +- p->thread.gs_cb = NULL;
154 +- p->thread.gs_bc_cb = NULL;
155 +
156 + /* Set a new TLS ? */
157 + if (clone_flags & CLONE_SETTLS) {
158 +diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
159 +index 7b0bb475c1664..9770381776a63 100644
160 +--- a/arch/s390/mm/fault.c
161 ++++ b/arch/s390/mm/fault.c
162 +@@ -432,7 +432,9 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
163 + flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
164 + if (user_mode(regs))
165 + flags |= FAULT_FLAG_USER;
166 +- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
167 ++ if ((trans_exc_code & store_indication) == 0x400)
168 ++ access = VM_WRITE;
169 ++ if (access == VM_WRITE)
170 + flags |= FAULT_FLAG_WRITE;
171 + down_read(&mm->mmap_sem);
172 +
173 +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
174 +index aec6e63c6a04a..0258e0065771a 100644
175 +--- a/arch/x86/events/intel/uncore_snb.c
176 ++++ b/arch/x86/events/intel/uncore_snb.c
177 +@@ -575,6 +575,22 @@ int snb_pci2phy_map_init(int devid)
178 + return 0;
179 + }
180 +
181 ++static u64 snb_uncore_imc_read_counter(struct intel_uncore_box *box, struct perf_event *event)
182 ++{
183 ++ struct hw_perf_event *hwc = &event->hw;
184 ++
185 ++ /*
186 ++ * SNB IMC counters are 32-bit and are laid out back to back
187 ++ * in MMIO space. Therefore we must use a 32-bit accessor function
188 ++ * using readq() from uncore_mmio_read_counter() causes problems
189 ++ * because it is reading 64-bit at a time. This is okay for the
190 ++ * uncore_perf_event_update() function because it drops the upper
191 ++ * 32-bits but not okay for plain uncore_read_counter() as invoked
192 ++ * in uncore_pmu_event_start().
193 ++ */
194 ++ return (u64)readl(box->io_addr + hwc->event_base);
195 ++}
196 ++
197 + static struct pmu snb_uncore_imc_pmu = {
198 + .task_ctx_nr = perf_invalid_context,
199 + .event_init = snb_uncore_imc_event_init,
200 +@@ -594,7 +610,7 @@ static struct intel_uncore_ops snb_uncore_imc_ops = {
201 + .disable_event = snb_uncore_imc_disable_event,
202 + .enable_event = snb_uncore_imc_enable_event,
203 + .hw_config = snb_uncore_imc_hw_config,
204 +- .read_counter = uncore_mmio_read_counter,
205 ++ .read_counter = snb_uncore_imc_read_counter,
206 + };
207 +
208 + static struct intel_uncore_type snb_uncore_imc = {
209 +diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
210 +index a3e32bc938562..736b0e412344b 100644
211 +--- a/arch/x86/include/asm/cpufeatures.h
212 ++++ b/arch/x86/include/asm/cpufeatures.h
213 +@@ -407,6 +407,7 @@
214 + #define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
215 + #define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
216 + #define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
217 +-#define X86_BUG_EIBRS_PBRSB X86_BUG(26) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
218 ++#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
219 ++#define X86_BUG_EIBRS_PBRSB X86_BUG(27) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
220 +
221 + #endif /* _ASM_X86_CPUFEATURES_H */
222 +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
223 +index 57efa90f3fbd0..c90d91cb14341 100644
224 +--- a/arch/x86/kernel/cpu/bugs.c
225 ++++ b/arch/x86/kernel/cpu/bugs.c
226 +@@ -396,7 +396,8 @@ static void __init mmio_select_mitigation(void)
227 + u64 ia32_cap;
228 +
229 + if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
230 +- cpu_mitigations_off()) {
231 ++ boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
232 ++ cpu_mitigations_off()) {
233 + mmio_mitigation = MMIO_MITIGATION_OFF;
234 + return;
235 + }
236 +@@ -501,6 +502,8 @@ out:
237 + pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
238 + if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
239 + pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
240 ++ else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
241 ++ pr_info("MMIO Stale Data: Unknown: No mitigations\n");
242 + }
243 +
244 + static void __init md_clear_select_mitigation(void)
245 +@@ -1880,6 +1883,9 @@ static ssize_t tsx_async_abort_show_state(char *buf)
246 +
247 + static ssize_t mmio_stale_data_show_state(char *buf)
248 + {
249 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
250 ++ return sysfs_emit(buf, "Unknown: No mitigations\n");
251 ++
252 + if (mmio_mitigation == MMIO_MITIGATION_OFF)
253 + return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
254 +
255 +@@ -2007,6 +2013,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
256 + return srbds_show_state(buf);
257 +
258 + case X86_BUG_MMIO_STALE_DATA:
259 ++ case X86_BUG_MMIO_UNKNOWN:
260 + return mmio_stale_data_show_state(buf);
261 +
262 + default:
263 +@@ -2063,6 +2070,9 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *
264 +
265 + ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
266 + {
267 +- return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
268 ++ if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
269 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
270 ++ else
271 ++ return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
272 + }
273 + #endif
274 +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
275 +index b926b7244d42d..59413e741ecf1 100644
276 +--- a/arch/x86/kernel/cpu/common.c
277 ++++ b/arch/x86/kernel/cpu/common.c
278 +@@ -1026,6 +1026,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
279 + #define NO_ITLB_MULTIHIT BIT(7)
280 + #define NO_SPECTRE_V2 BIT(8)
281 + #define NO_EIBRS_PBRSB BIT(9)
282 ++#define NO_MMIO BIT(10)
283 +
284 + #define VULNWL(_vendor, _family, _model, _whitelist) \
285 + { X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
286 +@@ -1046,6 +1047,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
287 + VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
288 +
289 + /* Intel Family 6 */
290 ++ VULNWL_INTEL(TIGERLAKE, NO_MMIO),
291 ++ VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
292 ++ VULNWL_INTEL(ALDERLAKE, NO_MMIO),
293 ++ VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
294 ++
295 + VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
296 + VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
297 + VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
298 +@@ -1064,9 +1070,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
299 + VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
300 + VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
301 +
302 +- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
303 +- VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
304 +- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
305 ++ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
306 ++ VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
307 ++ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
308 +
309 + /*
310 + * Technically, swapgs isn't serializing on AMD (despite it previously
311 +@@ -1081,18 +1087,18 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
312 + VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
313 +
314 + /* AMD Family 0xf - 0x12 */
315 +- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
316 +- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
317 +- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
318 +- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
319 ++ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
320 ++ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
321 ++ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
322 ++ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
323 +
324 + /* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
325 +- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
326 +- VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT),
327 ++ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
328 ++ VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
329 +
330 + /* Zhaoxin Family 7 */
331 +- VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2),
332 +- VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2),
333 ++ VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
334 ++ VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_MMIO),
335 + {}
336 + };
337 +
338 +@@ -1234,10 +1240,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
339 + * Affected CPU list is generally enough to enumerate the vulnerability,
340 + * but for virtualization case check for ARCH_CAP MSR bits also, VMM may
341 + * not want the guest to enumerate the bug.
342 ++ *
343 ++ * Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
344 ++ * nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
345 + */
346 +- if (cpu_matches(cpu_vuln_blacklist, MMIO) &&
347 +- !arch_cap_mmio_immune(ia32_cap))
348 +- setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
349 ++ if (!arch_cap_mmio_immune(ia32_cap)) {
350 ++ if (cpu_matches(cpu_vuln_blacklist, MMIO))
351 ++ setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
352 ++ else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
353 ++ setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
354 ++ }
355 +
356 + if (cpu_has(c, X86_FEATURE_IBRS_ENHANCED) &&
357 + !cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
358 +diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c
359 +index b934f9f68a168..4e7c49fcf0030 100644
360 +--- a/arch/x86/kernel/unwind_orc.c
361 ++++ b/arch/x86/kernel/unwind_orc.c
362 +@@ -90,22 +90,27 @@ static struct orc_entry *orc_find(unsigned long ip);
363 + static struct orc_entry *orc_ftrace_find(unsigned long ip)
364 + {
365 + struct ftrace_ops *ops;
366 +- unsigned long caller;
367 ++ unsigned long tramp_addr, offset;
368 +
369 + ops = ftrace_ops_trampoline(ip);
370 + if (!ops)
371 + return NULL;
372 +
373 ++ /* Set tramp_addr to the start of the code copied by the trampoline */
374 + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
375 +- caller = (unsigned long)ftrace_regs_call;
376 ++ tramp_addr = (unsigned long)ftrace_regs_caller;
377 + else
378 +- caller = (unsigned long)ftrace_call;
379 ++ tramp_addr = (unsigned long)ftrace_caller;
380 ++
381 ++ /* Now place tramp_addr to the location within the trampoline ip is at */
382 ++ offset = ip - ops->trampoline;
383 ++ tramp_addr += offset;
384 +
385 + /* Prevent unlikely recursion */
386 +- if (ip == caller)
387 ++ if (ip == tramp_addr)
388 + return NULL;
389 +
390 +- return orc_find(caller);
391 ++ return orc_find(tramp_addr);
392 + }
393 + #else
394 + static struct orc_entry *orc_ftrace_find(unsigned long ip)
395 +diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
396 +index 41feb88ee92d6..458b4d99fb4e5 100644
397 +--- a/drivers/acpi/processor_thermal.c
398 ++++ b/drivers/acpi/processor_thermal.c
399 +@@ -150,7 +150,7 @@ void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
400 + unsigned int cpu;
401 +
402 + for_each_cpu(cpu, policy->related_cpus) {
403 +- struct acpi_processor *pr = per_cpu(processors, policy->cpu);
404 ++ struct acpi_processor *pr = per_cpu(processors, cpu);
405 +
406 + if (pr)
407 + freq_qos_remove_request(&pr->thermal_req);
408 +diff --git a/drivers/android/binder.c b/drivers/android/binder.c
409 +index b9fb2a9269443..c273d0df69394 100644
410 +--- a/drivers/android/binder.c
411 ++++ b/drivers/android/binder.c
412 +@@ -6083,6 +6083,7 @@ const struct file_operations binder_fops = {
413 + .open = binder_open,
414 + .flush = binder_flush,
415 + .release = binder_release,
416 ++ .may_pollfree = true,
417 + };
418 +
419 + static int __init init_binder_device(const char *name)
420 +diff --git a/drivers/block/loop.c b/drivers/block/loop.c
421 +index 6b3e27b8cd245..b8f57b1c2864b 100644
422 +--- a/drivers/block/loop.c
423 ++++ b/drivers/block/loop.c
424 +@@ -1397,6 +1397,11 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
425 + info->lo_number = lo->lo_number;
426 + info->lo_offset = lo->lo_offset;
427 + info->lo_sizelimit = lo->lo_sizelimit;
428 ++
429 ++ /* loff_t vars have been assigned __u64 */
430 ++ if (lo->lo_offset < 0 || lo->lo_sizelimit < 0)
431 ++ return -EOVERFLOW;
432 ++
433 + info->lo_flags = lo->lo_flags;
434 + memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
435 + memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
436 +diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
437 +index c6e9b7bd7618c..80ccdf96093ff 100644
438 +--- a/drivers/dma-buf/udmabuf.c
439 ++++ b/drivers/dma-buf/udmabuf.c
440 +@@ -287,7 +287,23 @@ static struct miscdevice udmabuf_misc = {
441 +
442 + static int __init udmabuf_dev_init(void)
443 + {
444 +- return misc_register(&udmabuf_misc);
445 ++ int ret;
446 ++
447 ++ ret = misc_register(&udmabuf_misc);
448 ++ if (ret < 0) {
449 ++ pr_err("Could not initialize udmabuf device\n");
450 ++ return ret;
451 ++ }
452 ++
453 ++ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
454 ++ DMA_BIT_MASK(64));
455 ++ if (ret < 0) {
456 ++ pr_err("Could not setup DMA mask for udmabuf device\n");
457 ++ misc_deregister(&udmabuf_misc);
458 ++ return ret;
459 ++ }
460 ++
461 ++ return 0;
462 + }
463 +
464 + static void __exit udmabuf_dev_exit(void)
465 +diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
466 +index eca67d5d5b10d..721be82ccebec 100644
467 +--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
468 ++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
469 +@@ -546,9 +546,11 @@ static void dce112_get_pix_clk_dividers_helper (
470 + switch (pix_clk_params->color_depth) {
471 + case COLOR_DEPTH_101010:
472 + actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 5) >> 2;
473 ++ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
474 + break;
475 + case COLOR_DEPTH_121212:
476 + actual_pixel_clock_100hz = (actual_pixel_clock_100hz * 6) >> 2;
477 ++ actual_pixel_clock_100hz -= actual_pixel_clock_100hz % 10;
478 + break;
479 + case COLOR_DEPTH_161616:
480 + actual_pixel_clock_100hz = actual_pixel_clock_100hz * 2;
481 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
482 +index 8b2f29f6dabd2..068e79fa3490d 100644
483 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
484 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
485 +@@ -118,6 +118,12 @@ struct mpcc *mpc1_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
486 + while (tmp_mpcc != NULL) {
487 + if (tmp_mpcc->dpp_id == dpp_id)
488 + return tmp_mpcc;
489 ++
490 ++ /* avoid circular linked list */
491 ++ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
492 ++ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
493 ++ break;
494 ++
495 + tmp_mpcc = tmp_mpcc->mpcc_bot;
496 + }
497 + return NULL;
498 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
499 +index e74a07d03fde9..4b0200e96eb77 100644
500 +--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
501 ++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
502 +@@ -425,6 +425,11 @@ void optc1_enable_optc_clock(struct timing_generator *optc, bool enable)
503 + OTG_CLOCK_ON, 1,
504 + 1, 1000);
505 + } else {
506 ++
507 ++ //last chance to clear underflow, otherwise, it will always there due to clock is off.
508 ++ if (optc->funcs->is_optc_underflow_occurred(optc) == true)
509 ++ optc->funcs->clear_optc_underflow(optc);
510 ++
511 + REG_UPDATE_2(OTG_CLOCK_CONTROL,
512 + OTG_CLOCK_GATE_DIS, 0,
513 + OTG_CLOCK_EN, 0);
514 +diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
515 +index 5a188b2bc033c..0a00bd8e00abc 100644
516 +--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
517 ++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
518 +@@ -488,6 +488,12 @@ struct mpcc *mpc2_get_mpcc_for_dpp(struct mpc_tree *tree, int dpp_id)
519 + while (tmp_mpcc != NULL) {
520 + if (tmp_mpcc->dpp_id == 0xf || tmp_mpcc->dpp_id == dpp_id)
521 + return tmp_mpcc;
522 ++
523 ++ /* avoid circular linked list */
524 ++ ASSERT(tmp_mpcc != tmp_mpcc->mpcc_bot);
525 ++ if (tmp_mpcc == tmp_mpcc->mpcc_bot)
526 ++ break;
527 ++
528 + tmp_mpcc = tmp_mpcc->mpcc_bot;
529 + }
530 + return NULL;
531 +diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
532 +index a3b151b29bd71..fc616db4231bb 100644
533 +--- a/drivers/hid/hid-steam.c
534 ++++ b/drivers/hid/hid-steam.c
535 +@@ -134,6 +134,11 @@ static int steam_recv_report(struct steam_device *steam,
536 + int ret;
537 +
538 + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
539 ++ if (!r) {
540 ++ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
541 ++ return -EINVAL;
542 ++ }
543 ++
544 + if (hid_report_len(r) < 64)
545 + return -EINVAL;
546 +
547 +@@ -165,6 +170,11 @@ static int steam_send_report(struct steam_device *steam,
548 + int ret;
549 +
550 + r = steam->hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[0];
551 ++ if (!r) {
552 ++ hid_err(steam->hdev, "No HID_FEATURE_REPORT submitted - nothing to read\n");
553 ++ return -EINVAL;
554 ++ }
555 ++
556 + if (hid_report_len(r) < 64)
557 + return -EINVAL;
558 +
559 +diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
560 +index b382c6bf2c5cb..f8ef6268f3f29 100644
561 +--- a/drivers/hid/hidraw.c
562 ++++ b/drivers/hid/hidraw.c
563 +@@ -346,10 +346,13 @@ static int hidraw_release(struct inode * inode, struct file * file)
564 + unsigned int minor = iminor(inode);
565 + struct hidraw_list *list = file->private_data;
566 + unsigned long flags;
567 ++ int i;
568 +
569 + mutex_lock(&minors_lock);
570 +
571 + spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
572 ++ for (i = list->tail; i < list->head; i++)
573 ++ kfree(list->buffer[i].value);
574 + list_del(&list->node);
575 + spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
576 + kfree(list);
577 +diff --git a/drivers/md/md.c b/drivers/md/md.c
578 +index 11fd3b32b5621..5226a23c72dba 100644
579 +--- a/drivers/md/md.c
580 ++++ b/drivers/md/md.c
581 +@@ -6094,6 +6094,7 @@ void md_stop(struct mddev *mddev)
582 + /* stop the array and free an attached data structures.
583 + * This is called from dm-raid
584 + */
585 ++ __md_stop_writes(mddev);
586 + __md_stop(mddev);
587 + bioset_exit(&mddev->bio_set);
588 + bioset_exit(&mddev->sync_set);
589 +diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
590 +index 11e7fcfc3f195..d101fa8d61bb0 100644
591 +--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
592 ++++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
593 +@@ -2611,6 +2611,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
594 + del_timer_sync(&hdw->encoder_run_timer);
595 + del_timer_sync(&hdw->encoder_wait_timer);
596 + flush_work(&hdw->workpoll);
597 ++ v4l2_device_unregister(&hdw->v4l2_dev);
598 + usb_free_urb(hdw->ctl_read_urb);
599 + usb_free_urb(hdw->ctl_write_urb);
600 + kfree(hdw->ctl_read_buffer);
601 +diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
602 +index 31ed7616e84e7..0d6cd2a4cc416 100644
603 +--- a/drivers/net/bonding/bond_3ad.c
604 ++++ b/drivers/net/bonding/bond_3ad.c
605 +@@ -1997,30 +1997,24 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
606 + */
607 + void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
608 + {
609 +- /* check that the bond is not initialized yet */
610 +- if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
611 +- bond->dev->dev_addr)) {
612 +-
613 +- BOND_AD_INFO(bond).aggregator_identifier = 0;
614 +-
615 +- BOND_AD_INFO(bond).system.sys_priority =
616 +- bond->params.ad_actor_sys_prio;
617 +- if (is_zero_ether_addr(bond->params.ad_actor_system))
618 +- BOND_AD_INFO(bond).system.sys_mac_addr =
619 +- *((struct mac_addr *)bond->dev->dev_addr);
620 +- else
621 +- BOND_AD_INFO(bond).system.sys_mac_addr =
622 +- *((struct mac_addr *)bond->params.ad_actor_system);
623 ++ BOND_AD_INFO(bond).aggregator_identifier = 0;
624 ++ BOND_AD_INFO(bond).system.sys_priority =
625 ++ bond->params.ad_actor_sys_prio;
626 ++ if (is_zero_ether_addr(bond->params.ad_actor_system))
627 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
628 ++ *((struct mac_addr *)bond->dev->dev_addr);
629 ++ else
630 ++ BOND_AD_INFO(bond).system.sys_mac_addr =
631 ++ *((struct mac_addr *)bond->params.ad_actor_system);
632 +
633 +- /* initialize how many times this module is called in one
634 +- * second (should be about every 100ms)
635 +- */
636 +- ad_ticks_per_sec = tick_resolution;
637 ++ /* initialize how many times this module is called in one
638 ++ * second (should be about every 100ms)
639 ++ */
640 ++ ad_ticks_per_sec = tick_resolution;
641 +
642 +- bond_3ad_initiate_agg_selection(bond,
643 +- AD_AGGREGATOR_SELECTION_TIMER *
644 +- ad_ticks_per_sec);
645 +- }
646 ++ bond_3ad_initiate_agg_selection(bond,
647 ++ AD_AGGREGATOR_SELECTION_TIMER *
648 ++ ad_ticks_per_sec);
649 + }
650 +
651 + /**
652 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
653 +index 452be9749827a..3434ad6824a05 100644
654 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
655 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
656 +@@ -597,7 +597,7 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset)
657 + hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n;
658 + hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n;
659 + if (bp->flags & BNXT_FLAG_CHIP_P5)
660 +- hw_resc->max_irqs -= vf_msix * n;
661 ++ hw_resc->max_nqs -= vf_msix;
662 +
663 + rc = pf->active_vfs;
664 + }
665 +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
666 +index 0be13a90ff792..d155181b939e4 100644
667 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
668 ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
669 +@@ -1211,7 +1211,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
670 + struct cyclecounter cc;
671 + unsigned long flags;
672 + u32 incval = 0;
673 +- u32 tsauxc = 0;
674 + u32 fuse0 = 0;
675 +
676 + /* For some of the boards below this mask is technically incorrect.
677 +@@ -1246,18 +1245,6 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
678 + case ixgbe_mac_x550em_a:
679 + case ixgbe_mac_X550:
680 + cc.read = ixgbe_ptp_read_X550;
681 +-
682 +- /* enable SYSTIME counter */
683 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
684 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
685 +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
686 +- tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
687 +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
688 +- tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
689 +- IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
690 +- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
691 +-
692 +- IXGBE_WRITE_FLUSH(hw);
693 + break;
694 + case ixgbe_mac_X540:
695 + cc.read = ixgbe_ptp_read_82599;
696 +@@ -1289,6 +1276,50 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
697 + spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
698 + }
699 +
700 ++/**
701 ++ * ixgbe_ptp_init_systime - Initialize SYSTIME registers
702 ++ * @adapter: the ixgbe private board structure
703 ++ *
704 ++ * Initialize and start the SYSTIME registers.
705 ++ */
706 ++static void ixgbe_ptp_init_systime(struct ixgbe_adapter *adapter)
707 ++{
708 ++ struct ixgbe_hw *hw = &adapter->hw;
709 ++ u32 tsauxc;
710 ++
711 ++ switch (hw->mac.type) {
712 ++ case ixgbe_mac_X550EM_x:
713 ++ case ixgbe_mac_x550em_a:
714 ++ case ixgbe_mac_X550:
715 ++ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
716 ++
717 ++ /* Reset SYSTIME registers to 0 */
718 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0);
719 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
720 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
721 ++
722 ++ /* Reset interrupt settings */
723 ++ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS);
724 ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC);
725 ++
726 ++ /* Activate the SYSTIME counter */
727 ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC,
728 ++ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME);
729 ++ break;
730 ++ case ixgbe_mac_X540:
731 ++ case ixgbe_mac_82599EB:
732 ++ /* Reset SYSTIME registers to 0 */
733 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0);
734 ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0);
735 ++ break;
736 ++ default:
737 ++ /* Other devices aren't supported */
738 ++ return;
739 ++ };
740 ++
741 ++ IXGBE_WRITE_FLUSH(hw);
742 ++}
743 ++
744 + /**
745 + * ixgbe_ptp_reset
746 + * @adapter: the ixgbe private board structure
747 +@@ -1315,6 +1346,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
748 +
749 + ixgbe_ptp_start_cyclecounter(adapter);
750 +
751 ++ ixgbe_ptp_init_systime(adapter);
752 ++
753 + spin_lock_irqsave(&adapter->tmreg_lock, flags);
754 + timecounter_init(&adapter->hw_tc, &adapter->hw_cc,
755 + ktime_to_ns(ktime_get_real()));
756 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
757 +index 88b51f64a64ea..f448a139e222e 100644
758 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
759 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
760 +@@ -1434,6 +1434,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
761 +
762 + params->num_tc = 1;
763 + params->tunneled_offload_en = false;
764 ++ if (rep->vport != MLX5_VPORT_UPLINK)
765 ++ params->vlan_strip_disable = true;
766 +
767 + mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
768 +
769 +diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
770 +index 383d72415c659..87327086ea8ca 100644
771 +--- a/drivers/net/ethernet/moxa/moxart_ether.c
772 ++++ b/drivers/net/ethernet/moxa/moxart_ether.c
773 +@@ -74,11 +74,6 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
774 + static void moxart_mac_free_memory(struct net_device *ndev)
775 + {
776 + struct moxart_mac_priv_t *priv = netdev_priv(ndev);
777 +- int i;
778 +-
779 +- for (i = 0; i < RX_DESC_NUM; i++)
780 +- dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
781 +- priv->rx_buf_size, DMA_FROM_DEVICE);
782 +
783 + if (priv->tx_desc_base)
784 + dma_free_coherent(&priv->pdev->dev,
785 +@@ -193,6 +188,7 @@ static int moxart_mac_open(struct net_device *ndev)
786 + static int moxart_mac_stop(struct net_device *ndev)
787 + {
788 + struct moxart_mac_priv_t *priv = netdev_priv(ndev);
789 ++ int i;
790 +
791 + napi_disable(&priv->napi);
792 +
793 +@@ -204,6 +200,11 @@ static int moxart_mac_stop(struct net_device *ndev)
794 + /* disable all functions */
795 + writel(0, priv->base + REG_MAC_CTRL);
796 +
797 ++ /* unmap areas mapped in moxart_mac_setup_desc_ring() */
798 ++ for (i = 0; i < RX_DESC_NUM; i++)
799 ++ dma_unmap_single(&priv->pdev->dev, priv->rx_mapping[i],
800 ++ priv->rx_buf_size, DMA_FROM_DEVICE);
801 ++
802 + return 0;
803 + }
804 +
805 +diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
806 +index 1cedb634f4f7b..f01078b2581ce 100644
807 +--- a/drivers/net/ipvlan/ipvtap.c
808 ++++ b/drivers/net/ipvlan/ipvtap.c
809 +@@ -194,7 +194,7 @@ static struct notifier_block ipvtap_notifier_block __read_mostly = {
810 + .notifier_call = ipvtap_device_event,
811 + };
812 +
813 +-static int ipvtap_init(void)
814 ++static int __init ipvtap_init(void)
815 + {
816 + int err;
817 +
818 +@@ -228,7 +228,7 @@ out1:
819 + }
820 + module_init(ipvtap_init);
821 +
822 +-static void ipvtap_exit(void)
823 ++static void __exit ipvtap_exit(void)
824 + {
825 + rtnl_link_unregister(&ipvtap_link_ops);
826 + unregister_netdevice_notifier(&ipvtap_notifier_block);
827 +diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
828 +index 4c02439d3776d..ca3f18aa16acb 100644
829 +--- a/drivers/pinctrl/pinctrl-amd.c
830 ++++ b/drivers/pinctrl/pinctrl-amd.c
831 +@@ -793,6 +793,7 @@ static int amd_gpio_suspend(struct device *dev)
832 + {
833 + struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
834 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
835 ++ unsigned long flags;
836 + int i;
837 +
838 + for (i = 0; i < desc->npins; i++) {
839 +@@ -801,7 +802,9 @@ static int amd_gpio_suspend(struct device *dev)
840 + if (!amd_gpio_should_save(gpio_dev, pin))
841 + continue;
842 +
843 +- gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
844 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
845 ++ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin * 4) & ~PIN_IRQ_PENDING;
846 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
847 + }
848 +
849 + return 0;
850 +@@ -811,6 +814,7 @@ static int amd_gpio_resume(struct device *dev)
851 + {
852 + struct amd_gpio *gpio_dev = dev_get_drvdata(dev);
853 + struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
854 ++ unsigned long flags;
855 + int i;
856 +
857 + for (i = 0; i < desc->npins; i++) {
858 +@@ -819,7 +823,10 @@ static int amd_gpio_resume(struct device *dev)
859 + if (!amd_gpio_should_save(gpio_dev, pin))
860 + continue;
861 +
862 +- writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
863 ++ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
864 ++ gpio_dev->saved_regs[i] |= readl(gpio_dev->base + pin * 4) & PIN_IRQ_PENDING;
865 ++ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin * 4);
866 ++ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
867 + }
868 +
869 + return 0;
870 +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
871 +index 5087ed6afbdc3..8d1b19b2322f5 100644
872 +--- a/drivers/scsi/storvsc_drv.c
873 ++++ b/drivers/scsi/storvsc_drv.c
874 +@@ -1846,7 +1846,7 @@ static int storvsc_probe(struct hv_device *device,
875 + */
876 + host_dev->handle_error_wq =
877 + alloc_ordered_workqueue("storvsc_error_wq_%d",
878 +- WQ_MEM_RECLAIM,
879 ++ 0,
880 + host->host_no);
881 + if (!host_dev->handle_error_wq)
882 + goto err_out2;
883 +diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
884 +index a9399f2b39308..8bedf0504e92f 100644
885 +--- a/drivers/usb/cdns3/gadget.c
886 ++++ b/drivers/usb/cdns3/gadget.c
887 +@@ -2166,6 +2166,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
888 + struct usb_request *request;
889 + struct cdns3_request *priv_req;
890 + struct cdns3_trb *trb = NULL;
891 ++ struct cdns3_trb trb_tmp;
892 + int ret;
893 + int val;
894 +
895 +@@ -2175,8 +2176,10 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
896 + if (request) {
897 + priv_req = to_cdns3_request(request);
898 + trb = priv_req->trb;
899 +- if (trb)
900 ++ if (trb) {
901 ++ trb_tmp = *trb;
902 + trb->control = trb->control ^ TRB_CYCLE;
903 ++ }
904 + }
905 +
906 + writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
907 +@@ -2191,7 +2194,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
908 +
909 + if (request) {
910 + if (trb)
911 +- trb->control = trb->control ^ TRB_CYCLE;
912 ++ *trb = trb_tmp;
913 ++
914 + cdns3_rearm_transfer(priv_ep, 1);
915 + }
916 +
917 +diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
918 +index 1dcf02e12af4f..8ae010f07d7da 100644
919 +--- a/drivers/video/fbdev/pm2fb.c
920 ++++ b/drivers/video/fbdev/pm2fb.c
921 +@@ -616,6 +616,11 @@ static int pm2fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
922 + return -EINVAL;
923 + }
924 +
925 ++ if (!var->pixclock) {
926 ++ DPRINTK("pixclock is zero\n");
927 ++ return -EINVAL;
928 ++ }
929 ++
930 + if (PICOS2KHZ(var->pixclock) > PM2_MAX_PIXCLOCK) {
931 + DPRINTK("pixclock too high (%ldKHz)\n",
932 + PICOS2KHZ(var->pixclock));
933 +diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
934 +index cd77c0621a555..c2e5fe972f566 100644
935 +--- a/fs/btrfs/ctree.h
936 ++++ b/fs/btrfs/ctree.h
937 +@@ -2727,7 +2727,7 @@ struct btrfs_dir_item *
938 + btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
939 + struct btrfs_root *root,
940 + struct btrfs_path *path, u64 dir,
941 +- u64 objectid, const char *name, int name_len,
942 ++ u64 index, const char *name, int name_len,
943 + int mod);
944 + struct btrfs_dir_item *
945 + btrfs_search_dir_index_item(struct btrfs_root *root,
946 +diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
947 +index 1cb7f5d79765d..444e1e5d012e4 100644
948 +--- a/fs/btrfs/dev-replace.c
949 ++++ b/fs/btrfs/dev-replace.c
950 +@@ -125,7 +125,7 @@ no_valid_dev_replace_entry_found:
951 + if (btrfs_find_device(fs_info->fs_devices,
952 + BTRFS_DEV_REPLACE_DEVID, NULL, NULL, false)) {
953 + btrfs_err(fs_info,
954 +- "replace devid present without an active replace item");
955 ++"replace without active item, run 'device scan --forget' on the target device");
956 + ret = -EUCLEAN;
957 + } else {
958 + dev_replace->srcdev = NULL;
959 +@@ -918,8 +918,7 @@ int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
960 + up_write(&dev_replace->rwsem);
961 +
962 + /* Scrub for replace must not be running in suspended state */
963 +- ret = btrfs_scrub_cancel(fs_info);
964 +- ASSERT(ret != -ENOTCONN);
965 ++ btrfs_scrub_cancel(fs_info);
966 +
967 + trans = btrfs_start_transaction(root, 0);
968 + if (IS_ERR(trans)) {
969 +diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
970 +index 863367c2c6205..98c6faa8ce15b 100644
971 +--- a/fs/btrfs/dir-item.c
972 ++++ b/fs/btrfs/dir-item.c
973 +@@ -171,10 +171,40 @@ out_free:
974 + return 0;
975 + }
976 +
977 ++static struct btrfs_dir_item *btrfs_lookup_match_dir(
978 ++ struct btrfs_trans_handle *trans,
979 ++ struct btrfs_root *root, struct btrfs_path *path,
980 ++ struct btrfs_key *key, const char *name,
981 ++ int name_len, int mod)
982 ++{
983 ++ const int ins_len = (mod < 0 ? -1 : 0);
984 ++ const int cow = (mod != 0);
985 ++ int ret;
986 ++
987 ++ ret = btrfs_search_slot(trans, root, key, path, ins_len, cow);
988 ++ if (ret < 0)
989 ++ return ERR_PTR(ret);
990 ++ if (ret > 0)
991 ++ return ERR_PTR(-ENOENT);
992 ++
993 ++ return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
994 ++}
995 ++
996 + /*
997 +- * lookup a directory item based on name. 'dir' is the objectid
998 +- * we're searching in, and 'mod' tells us if you plan on deleting the
999 +- * item (use mod < 0) or changing the options (use mod > 0)
1000 ++ * Lookup for a directory item by name.
1001 ++ *
1002 ++ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
1003 ++ * @root: The root of the target tree.
1004 ++ * @path: Path to use for the search.
1005 ++ * @dir: The inode number (objectid) of the directory.
1006 ++ * @name: The name associated to the directory entry we are looking for.
1007 ++ * @name_len: The length of the name.
1008 ++ * @mod: Used to indicate if the tree search is meant for a read only
1009 ++ * lookup, for a modification lookup or for a deletion lookup, so
1010 ++ * its value should be 0, 1 or -1, respectively.
1011 ++ *
1012 ++ * Returns: NULL if the dir item does not exists, an error pointer if an error
1013 ++ * happened, or a pointer to a dir item if a dir item exists for the given name.
1014 + */
1015 + struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1016 + struct btrfs_root *root,
1017 +@@ -182,23 +212,18 @@ struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans,
1018 + const char *name, int name_len,
1019 + int mod)
1020 + {
1021 +- int ret;
1022 + struct btrfs_key key;
1023 +- int ins_len = mod < 0 ? -1 : 0;
1024 +- int cow = mod != 0;
1025 ++ struct btrfs_dir_item *di;
1026 +
1027 + key.objectid = dir;
1028 + key.type = BTRFS_DIR_ITEM_KEY;
1029 +-
1030 + key.offset = btrfs_name_hash(name, name_len);
1031 +
1032 +- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
1033 +- if (ret < 0)
1034 +- return ERR_PTR(ret);
1035 +- if (ret > 0)
1036 ++ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
1037 ++ if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
1038 + return NULL;
1039 +
1040 +- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1041 ++ return di;
1042 + }
1043 +
1044 + int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1045 +@@ -212,7 +237,6 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1046 + int slot;
1047 + struct btrfs_path *path;
1048 +
1049 +-
1050 + path = btrfs_alloc_path();
1051 + if (!path)
1052 + return -ENOMEM;
1053 +@@ -221,20 +245,20 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir,
1054 + key.type = BTRFS_DIR_ITEM_KEY;
1055 + key.offset = btrfs_name_hash(name, name_len);
1056 +
1057 +- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1058 +-
1059 +- /* return back any errors */
1060 +- if (ret < 0)
1061 +- goto out;
1062 ++ di = btrfs_lookup_match_dir(NULL, root, path, &key, name, name_len, 0);
1063 ++ if (IS_ERR(di)) {
1064 ++ ret = PTR_ERR(di);
1065 ++ /* Nothing found, we're safe */
1066 ++ if (ret == -ENOENT) {
1067 ++ ret = 0;
1068 ++ goto out;
1069 ++ }
1070 +
1071 +- /* nothing found, we're safe */
1072 +- if (ret > 0) {
1073 +- ret = 0;
1074 +- goto out;
1075 ++ if (ret < 0)
1076 ++ goto out;
1077 + }
1078 +
1079 + /* we found an item, look for our name in the item */
1080 +- di = btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1081 + if (di) {
1082 + /* our exact name was found */
1083 + ret = -EEXIST;
1084 +@@ -261,35 +285,42 @@ out:
1085 + }
1086 +
1087 + /*
1088 +- * lookup a directory item based on index. 'dir' is the objectid
1089 +- * we're searching in, and 'mod' tells us if you plan on deleting the
1090 +- * item (use mod < 0) or changing the options (use mod > 0)
1091 ++ * Lookup for a directory index item by name and index number.
1092 ++ *
1093 ++ * @trans: The transaction handle to use. Can be NULL if @mod is 0.
1094 ++ * @root: The root of the target tree.
1095 ++ * @path: Path to use for the search.
1096 ++ * @dir: The inode number (objectid) of the directory.
1097 ++ * @index: The index number.
1098 ++ * @name: The name associated to the directory entry we are looking for.
1099 ++ * @name_len: The length of the name.
1100 ++ * @mod: Used to indicate if the tree search is meant for a read only
1101 ++ * lookup, for a modification lookup or for a deletion lookup, so
1102 ++ * its value should be 0, 1 or -1, respectively.
1103 + *
1104 +- * The name is used to make sure the index really points to the name you were
1105 +- * looking for.
1106 ++ * Returns: NULL if the dir index item does not exists, an error pointer if an
1107 ++ * error happened, or a pointer to a dir item if the dir index item exists and
1108 ++ * matches the criteria (name and index number).
1109 + */
1110 + struct btrfs_dir_item *
1111 + btrfs_lookup_dir_index_item(struct btrfs_trans_handle *trans,
1112 + struct btrfs_root *root,
1113 + struct btrfs_path *path, u64 dir,
1114 +- u64 objectid, const char *name, int name_len,
1115 ++ u64 index, const char *name, int name_len,
1116 + int mod)
1117 + {
1118 +- int ret;
1119 ++ struct btrfs_dir_item *di;
1120 + struct btrfs_key key;
1121 +- int ins_len = mod < 0 ? -1 : 0;
1122 +- int cow = mod != 0;
1123 +
1124 + key.objectid = dir;
1125 + key.type = BTRFS_DIR_INDEX_KEY;
1126 +- key.offset = objectid;
1127 ++ key.offset = index;
1128 +
1129 +- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
1130 +- if (ret < 0)
1131 +- return ERR_PTR(ret);
1132 +- if (ret > 0)
1133 +- return ERR_PTR(-ENOENT);
1134 +- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1135 ++ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
1136 ++ if (di == ERR_PTR(-ENOENT))
1137 ++ return NULL;
1138 ++
1139 ++ return di;
1140 + }
1141 +
1142 + struct btrfs_dir_item *
1143 +@@ -346,21 +377,18 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
1144 + const char *name, u16 name_len,
1145 + int mod)
1146 + {
1147 +- int ret;
1148 + struct btrfs_key key;
1149 +- int ins_len = mod < 0 ? -1 : 0;
1150 +- int cow = mod != 0;
1151 ++ struct btrfs_dir_item *di;
1152 +
1153 + key.objectid = dir;
1154 + key.type = BTRFS_XATTR_ITEM_KEY;
1155 + key.offset = btrfs_name_hash(name, name_len);
1156 +- ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
1157 +- if (ret < 0)
1158 +- return ERR_PTR(ret);
1159 +- if (ret > 0)
1160 ++
1161 ++ di = btrfs_lookup_match_dir(trans, root, path, &key, name, name_len, mod);
1162 ++ if (IS_ERR(di) && PTR_ERR(di) == -ENOENT)
1163 + return NULL;
1164 +
1165 +- return btrfs_match_dir_item_name(root->fs_info, path, name, name_len);
1166 ++ return di;
1167 + }
1168 +
1169 + /*
1170 +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1171 +index 7755a0362a3ad..20c5db8ef8427 100644
1172 +--- a/fs/btrfs/inode.c
1173 ++++ b/fs/btrfs/inode.c
1174 +@@ -9751,8 +9751,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1175 + /* force full log commit if subvolume involved. */
1176 + btrfs_set_log_full_commit(trans);
1177 + } else {
1178 +- btrfs_pin_log_trans(root);
1179 +- root_log_pinned = true;
1180 + ret = btrfs_insert_inode_ref(trans, dest,
1181 + new_dentry->d_name.name,
1182 + new_dentry->d_name.len,
1183 +@@ -9768,8 +9766,6 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1184 + /* force full log commit if subvolume involved. */
1185 + btrfs_set_log_full_commit(trans);
1186 + } else {
1187 +- btrfs_pin_log_trans(dest);
1188 +- dest_log_pinned = true;
1189 + ret = btrfs_insert_inode_ref(trans, root,
1190 + old_dentry->d_name.name,
1191 + old_dentry->d_name.len,
1192 +@@ -9797,6 +9793,29 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1193 + BTRFS_I(new_inode), 1);
1194 + }
1195 +
1196 ++ /*
1197 ++ * Now pin the logs of the roots. We do it to ensure that no other task
1198 ++ * can sync the logs while we are in progress with the rename, because
1199 ++ * that could result in an inconsistency in case any of the inodes that
1200 ++ * are part of this rename operation were logged before.
1201 ++ *
1202 ++ * We pin the logs even if at this precise moment none of the inodes was
1203 ++ * logged before. This is because right after we checked for that, some
1204 ++ * other task fsyncing some other inode not involved with this rename
1205 ++ * operation could log that one of our inodes exists.
1206 ++ *
1207 ++ * We don't need to pin the logs before the above calls to
1208 ++ * btrfs_insert_inode_ref(), since those don't ever need to change a log.
1209 ++ */
1210 ++ if (old_ino != BTRFS_FIRST_FREE_OBJECTID) {
1211 ++ btrfs_pin_log_trans(root);
1212 ++ root_log_pinned = true;
1213 ++ }
1214 ++ if (new_ino != BTRFS_FIRST_FREE_OBJECTID) {
1215 ++ btrfs_pin_log_trans(dest);
1216 ++ dest_log_pinned = true;
1217 ++ }
1218 ++
1219 + /* src is a subvolume */
1220 + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) {
1221 + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
1222 +@@ -10046,8 +10065,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1223 + /* force full log commit if subvolume involved. */
1224 + btrfs_set_log_full_commit(trans);
1225 + } else {
1226 +- btrfs_pin_log_trans(root);
1227 +- log_pinned = true;
1228 + ret = btrfs_insert_inode_ref(trans, dest,
1229 + new_dentry->d_name.name,
1230 + new_dentry->d_name.len,
1231 +@@ -10071,6 +10088,25 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1232 + if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) {
1233 + ret = btrfs_unlink_subvol(trans, old_dir, old_dentry);
1234 + } else {
1235 ++ /*
1236 ++ * Now pin the log. We do it to ensure that no other task can
1237 ++ * sync the log while we are in progress with the rename, as
1238 ++ * that could result in an inconsistency in case any of the
1239 ++ * inodes that are part of this rename operation were logged
1240 ++ * before.
1241 ++ *
1242 ++ * We pin the log even if at this precise moment none of the
1243 ++ * inodes was logged before. This is because right after we
1244 ++ * checked for that, some other task fsyncing some other inode
1245 ++ * not involved with this rename operation could log that one of
1246 ++ * our inodes exists.
1247 ++ *
1248 ++ * We don't need to pin the logs before the above call to
1249 ++ * btrfs_insert_inode_ref(), since that does not need to change
1250 ++ * a log.
1251 ++ */
1252 ++ btrfs_pin_log_trans(root);
1253 ++ log_pinned = true;
1254 + ret = __btrfs_unlink_inode(trans, root, BTRFS_I(old_dir),
1255 + BTRFS_I(d_inode(old_dentry)),
1256 + old_dentry->d_name.name,
1257 +diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
1258 +index 0d07ebe511e7f..ba4e198811a47 100644
1259 +--- a/fs/btrfs/root-tree.c
1260 ++++ b/fs/btrfs/root-tree.c
1261 +@@ -371,9 +371,10 @@ int btrfs_del_root_ref(struct btrfs_trans_handle *trans, u64 root_id,
1262 + key.offset = ref_id;
1263 + again:
1264 + ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
1265 +- if (ret < 0)
1266 ++ if (ret < 0) {
1267 ++ err = ret;
1268 + goto out;
1269 +- if (ret == 0) {
1270 ++ } else if (ret == 0) {
1271 + leaf = path->nodes[0];
1272 + ref = btrfs_item_ptr(leaf, path->slots[0],
1273 + struct btrfs_root_ref);
1274 +diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
1275 +index 368c43c6cbd08..d15de5abb562d 100644
1276 +--- a/fs/btrfs/tree-checker.c
1277 ++++ b/fs/btrfs/tree-checker.c
1278 +@@ -1019,7 +1019,8 @@ static void extent_err(const struct extent_buffer *eb, int slot,
1279 + }
1280 +
1281 + static int check_extent_item(struct extent_buffer *leaf,
1282 +- struct btrfs_key *key, int slot)
1283 ++ struct btrfs_key *key, int slot,
1284 ++ struct btrfs_key *prev_key)
1285 + {
1286 + struct btrfs_fs_info *fs_info = leaf->fs_info;
1287 + struct btrfs_extent_item *ei;
1288 +@@ -1230,6 +1231,26 @@ static int check_extent_item(struct extent_buffer *leaf,
1289 + total_refs, inline_refs);
1290 + return -EUCLEAN;
1291 + }
1292 ++
1293 ++ if ((prev_key->type == BTRFS_EXTENT_ITEM_KEY) ||
1294 ++ (prev_key->type == BTRFS_METADATA_ITEM_KEY)) {
1295 ++ u64 prev_end = prev_key->objectid;
1296 ++
1297 ++ if (prev_key->type == BTRFS_METADATA_ITEM_KEY)
1298 ++ prev_end += fs_info->nodesize;
1299 ++ else
1300 ++ prev_end += prev_key->offset;
1301 ++
1302 ++ if (unlikely(prev_end > key->objectid)) {
1303 ++ extent_err(leaf, slot,
1304 ++ "previous extent [%llu %u %llu] overlaps current extent [%llu %u %llu]",
1305 ++ prev_key->objectid, prev_key->type,
1306 ++ prev_key->offset, key->objectid, key->type,
1307 ++ key->offset);
1308 ++ return -EUCLEAN;
1309 ++ }
1310 ++ }
1311 ++
1312 + return 0;
1313 + }
1314 +
1315 +@@ -1343,7 +1364,7 @@ static int check_leaf_item(struct extent_buffer *leaf,
1316 + break;
1317 + case BTRFS_EXTENT_ITEM_KEY:
1318 + case BTRFS_METADATA_ITEM_KEY:
1319 +- ret = check_extent_item(leaf, key, slot);
1320 ++ ret = check_extent_item(leaf, key, slot, prev_key);
1321 + break;
1322 + case BTRFS_TREE_BLOCK_REF_KEY:
1323 + case BTRFS_SHARED_DATA_REF_KEY:
1324 +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
1325 +index bebd74267bed6..926b1d34e55cc 100644
1326 +--- a/fs/btrfs/tree-log.c
1327 ++++ b/fs/btrfs/tree-log.c
1328 +@@ -918,8 +918,7 @@ static noinline int inode_in_dir(struct btrfs_root *root,
1329 + di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
1330 + index, name, name_len, 0);
1331 + if (IS_ERR(di)) {
1332 +- if (PTR_ERR(di) != -ENOENT)
1333 +- ret = PTR_ERR(di);
1334 ++ ret = PTR_ERR(di);
1335 + goto out;
1336 + } else if (di) {
1337 + btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
1338 +@@ -1171,8 +1170,7 @@ next:
1339 + di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1340 + ref_index, name, namelen, 0);
1341 + if (IS_ERR(di)) {
1342 +- if (PTR_ERR(di) != -ENOENT)
1343 +- return PTR_ERR(di);
1344 ++ return PTR_ERR(di);
1345 + } else if (di) {
1346 + ret = drop_one_dir_item(trans, root, path, dir, di);
1347 + if (ret)
1348 +@@ -2022,9 +2020,6 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1349 + goto out;
1350 + }
1351 +
1352 +- if (dst_di == ERR_PTR(-ENOENT))
1353 +- dst_di = NULL;
1354 +-
1355 + if (IS_ERR(dst_di)) {
1356 + ret = PTR_ERR(dst_di);
1357 + goto out;
1358 +@@ -2309,7 +2304,7 @@ again:
1359 + dir_key->offset,
1360 + name, name_len, 0);
1361 + }
1362 +- if (!log_di || log_di == ERR_PTR(-ENOENT)) {
1363 ++ if (!log_di) {
1364 + btrfs_dir_item_key_to_cpu(eb, di, &location);
1365 + btrfs_release_path(path);
1366 + btrfs_release_path(log_path);
1367 +@@ -3522,8 +3517,7 @@ out_unlock:
1368 + if (err == -ENOSPC) {
1369 + btrfs_set_log_full_commit(trans);
1370 + err = 0;
1371 +- } else if (err < 0 && err != -ENOENT) {
1372 +- /* ENOENT can be returned if the entry hasn't been fsynced yet */
1373 ++ } else if (err < 0) {
1374 + btrfs_abort_transaction(trans, err);
1375 + }
1376 +
1377 +diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
1378 +index 48858510739b2..cd7ddf24157a3 100644
1379 +--- a/fs/btrfs/xattr.c
1380 ++++ b/fs/btrfs/xattr.c
1381 +@@ -387,6 +387,9 @@ static int btrfs_xattr_handler_set(const struct xattr_handler *handler,
1382 + const char *name, const void *buffer,
1383 + size_t size, int flags)
1384 + {
1385 ++ if (btrfs_root_readonly(BTRFS_I(inode)->root))
1386 ++ return -EROFS;
1387 ++
1388 + name = xattr_full_name(handler, name);
1389 + return btrfs_setxattr_trans(inode, name, buffer, size, flags);
1390 + }
1391 +diff --git a/fs/io_uring.c b/fs/io_uring.c
1392 +index e73969fa96bcb..501c7e14c07cf 100644
1393 +--- a/fs/io_uring.c
1394 ++++ b/fs/io_uring.c
1395 +@@ -1908,6 +1908,9 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1396 + __poll_t mask;
1397 + u16 events;
1398 +
1399 ++ if (req->file->f_op->may_pollfree)
1400 ++ return -EOPNOTSUPP;
1401 ++
1402 + if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1403 + return -EINVAL;
1404 + if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1405 +diff --git a/fs/signalfd.c b/fs/signalfd.c
1406 +index 3e94d181930fd..c3415d969ecfc 100644
1407 +--- a/fs/signalfd.c
1408 ++++ b/fs/signalfd.c
1409 +@@ -248,6 +248,7 @@ static const struct file_operations signalfd_fops = {
1410 + .poll = signalfd_poll,
1411 + .read = signalfd_read,
1412 + .llseek = noop_llseek,
1413 ++ .may_pollfree = true,
1414 + };
1415 +
1416 + static int do_signalfd4(int ufd, sigset_t *mask, int flags)
1417 +diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
1418 +index 66397ed10acb7..69ab5942bd14f 100644
1419 +--- a/include/asm-generic/sections.h
1420 ++++ b/include/asm-generic/sections.h
1421 +@@ -114,7 +114,7 @@ static inline bool memory_contains(void *begin, void *end, void *virt,
1422 + /**
1423 + * memory_intersects - checks if the region occupied by an object intersects
1424 + * with another memory region
1425 +- * @begin: virtual address of the beginning of the memory regien
1426 ++ * @begin: virtual address of the beginning of the memory region
1427 + * @end: virtual address of the end of the memory region
1428 + * @virt: virtual address of the memory object
1429 + * @size: size of the memory object
1430 +@@ -127,7 +127,10 @@ static inline bool memory_intersects(void *begin, void *end, void *virt,
1431 + {
1432 + void *vend = virt + size;
1433 +
1434 +- return (virt >= begin && virt < end) || (vend >= begin && vend < end);
1435 ++ if (virt < end && vend > begin)
1436 ++ return true;
1437 ++
1438 ++ return false;
1439 + }
1440 +
1441 + /**
1442 +diff --git a/include/linux/fs.h b/include/linux/fs.h
1443 +index ef118b8ba6993..4ecbe12f62152 100644
1444 +--- a/include/linux/fs.h
1445 ++++ b/include/linux/fs.h
1446 +@@ -1859,6 +1859,7 @@ struct file_operations {
1447 + struct file *file_out, loff_t pos_out,
1448 + loff_t len, unsigned int remap_flags);
1449 + int (*fadvise)(struct file *, loff_t, loff_t, int);
1450 ++ bool may_pollfree;
1451 + } __randomize_layout;
1452 +
1453 + struct inode_operations {
1454 +diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
1455 +index db472c9cd8e9d..f0d846df3a424 100644
1456 +--- a/include/linux/netfilter_bridge/ebtables.h
1457 ++++ b/include/linux/netfilter_bridge/ebtables.h
1458 +@@ -94,10 +94,6 @@ struct ebt_table {
1459 + struct ebt_replace_kernel *table;
1460 + unsigned int valid_hooks;
1461 + rwlock_t lock;
1462 +- /* e.g. could be the table explicitly only allows certain
1463 +- * matches, targets, ... 0 == let it in */
1464 +- int (*check)(const struct ebt_table_info *info,
1465 +- unsigned int valid_hooks);
1466 + /* the data used by the kernel */
1467 + struct ebt_table_info *private;
1468 + struct module *me;
1469 +diff --git a/include/linux/rmap.h b/include/linux/rmap.h
1470 +index 91ccae9467164..c80bd129e9399 100644
1471 +--- a/include/linux/rmap.h
1472 ++++ b/include/linux/rmap.h
1473 +@@ -39,12 +39,15 @@ struct anon_vma {
1474 + atomic_t refcount;
1475 +
1476 + /*
1477 +- * Count of child anon_vmas and VMAs which points to this anon_vma.
1478 ++ * Count of child anon_vmas. Equals to the count of all anon_vmas that
1479 ++ * have ->parent pointing to this one, including itself.
1480 + *
1481 + * This counter is used for making decision about reusing anon_vma
1482 + * instead of forking new one. See comments in function anon_vma_clone.
1483 + */
1484 +- unsigned degree;
1485 ++ unsigned long num_children;
1486 ++ /* Count of VMAs whose ->anon_vma pointer points to this object. */
1487 ++ unsigned long num_active_vmas;
1488 +
1489 + struct anon_vma *parent; /* Parent of this anon_vma */
1490 +
1491 +diff --git a/include/linux/sched.h b/include/linux/sched.h
1492 +index 171cb7475b450..d0e639497b107 100644
1493 +--- a/include/linux/sched.h
1494 ++++ b/include/linux/sched.h
1495 +@@ -526,10 +526,6 @@ struct sched_dl_entity {
1496 + * task has to wait for a replenishment to be performed at the
1497 + * next firing of dl_timer.
1498 + *
1499 +- * @dl_boosted tells if we are boosted due to DI. If so we are
1500 +- * outside bandwidth enforcement mechanism (but only until we
1501 +- * exit the critical section);
1502 +- *
1503 + * @dl_yielded tells if task gave up the CPU before consuming
1504 + * all its available runtime during the last job.
1505 + *
1506 +@@ -544,7 +540,6 @@ struct sched_dl_entity {
1507 + * overruns.
1508 + */
1509 + unsigned int dl_throttled : 1;
1510 +- unsigned int dl_boosted : 1;
1511 + unsigned int dl_yielded : 1;
1512 + unsigned int dl_non_contending : 1;
1513 + unsigned int dl_overrun : 1;
1514 +@@ -563,6 +558,15 @@ struct sched_dl_entity {
1515 + * time.
1516 + */
1517 + struct hrtimer inactive_timer;
1518 ++
1519 ++#ifdef CONFIG_RT_MUTEXES
1520 ++ /*
1521 ++ * Priority Inheritance. When a DEADLINE scheduling entity is boosted
1522 ++ * pi_se points to the donor, otherwise points to the dl_se it belongs
1523 ++ * to (the original one/itself).
1524 ++ */
1525 ++ struct sched_dl_entity *pi_se;
1526 ++#endif
1527 + };
1528 +
1529 + #ifdef CONFIG_UCLAMP_TASK
1530 +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1531 +index b04b5bd43f541..680f71ecdc08b 100644
1532 +--- a/include/linux/skbuff.h
1533 ++++ b/include/linux/skbuff.h
1534 +@@ -2201,6 +2201,14 @@ static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1535 +
1536 + #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1537 +
1538 ++static inline void skb_assert_len(struct sk_buff *skb)
1539 ++{
1540 ++#ifdef CONFIG_DEBUG_NET
1541 ++ if (WARN_ONCE(!skb->len, "%s\n", __func__))
1542 ++ DO_ONCE_LITE(skb_dump, KERN_ERR, skb, false);
1543 ++#endif /* CONFIG_DEBUG_NET */
1544 ++}
1545 ++
1546 + /*
1547 + * Add data to an sk_buff
1548 + */
1549 +diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
1550 +index 9899b9af7f22f..16258c0c7319e 100644
1551 +--- a/include/net/busy_poll.h
1552 ++++ b/include/net/busy_poll.h
1553 +@@ -31,7 +31,7 @@ extern unsigned int sysctl_net_busy_poll __read_mostly;
1554 +
1555 + static inline bool net_busy_loop_on(void)
1556 + {
1557 +- return sysctl_net_busy_poll;
1558 ++ return READ_ONCE(sysctl_net_busy_poll);
1559 + }
1560 +
1561 + static inline bool sk_can_busy_loop(const struct sock *sk)
1562 +diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
1563 +index f0d2433184521..2973878162962 100644
1564 +--- a/kernel/audit_fsnotify.c
1565 ++++ b/kernel/audit_fsnotify.c
1566 +@@ -102,6 +102,7 @@ struct audit_fsnotify_mark *audit_alloc_mark(struct audit_krule *krule, char *pa
1567 +
1568 + ret = fsnotify_add_inode_mark(&audit_mark->mark, inode, true);
1569 + if (ret < 0) {
1570 ++ audit_mark->path = NULL;
1571 + fsnotify_put_mark(&audit_mark->mark);
1572 + audit_mark = ERR_PTR(ret);
1573 + }
1574 +diff --git a/kernel/kprobes.c b/kernel/kprobes.c
1575 +index 671b51782182b..9631ecc8a34c9 100644
1576 +--- a/kernel/kprobes.c
1577 ++++ b/kernel/kprobes.c
1578 +@@ -1737,11 +1737,12 @@ static struct kprobe *__disable_kprobe(struct kprobe *p)
1579 + /* Try to disarm and disable this/parent probe */
1580 + if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1581 + /*
1582 +- * If kprobes_all_disarmed is set, orig_p
1583 +- * should have already been disarmed, so
1584 +- * skip unneed disarming process.
1585 ++ * Don't be lazy here. Even if 'kprobes_all_disarmed'
1586 ++ * is false, 'orig_p' might not have been armed yet.
1587 ++ * Note arm_all_kprobes() __tries__ to arm all kprobes
1588 ++ * on the best effort basis.
1589 + */
1590 +- if (!kprobes_all_disarmed) {
1591 ++ if (!kprobes_all_disarmed && !kprobe_disabled(orig_p)) {
1592 + ret = disarm_kprobe(orig_p, true);
1593 + if (ret) {
1594 + p->flags &= ~KPROBE_FLAG_DISABLED;
1595 +diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1596 +index 5befdecefe947..06b686ef36e68 100644
1597 +--- a/kernel/sched/core.c
1598 ++++ b/kernel/sched/core.c
1599 +@@ -4554,20 +4554,21 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
1600 + if (!dl_prio(p->normal_prio) ||
1601 + (pi_task && dl_prio(pi_task->prio) &&
1602 + dl_entity_preempt(&pi_task->dl, &p->dl))) {
1603 +- p->dl.dl_boosted = 1;
1604 ++ p->dl.pi_se = pi_task->dl.pi_se;
1605 + queue_flag |= ENQUEUE_REPLENISH;
1606 +- } else
1607 +- p->dl.dl_boosted = 0;
1608 ++ } else {
1609 ++ p->dl.pi_se = &p->dl;
1610 ++ }
1611 + p->sched_class = &dl_sched_class;
1612 + } else if (rt_prio(prio)) {
1613 + if (dl_prio(oldprio))
1614 +- p->dl.dl_boosted = 0;
1615 ++ p->dl.pi_se = &p->dl;
1616 + if (oldprio < prio)
1617 + queue_flag |= ENQUEUE_HEAD;
1618 + p->sched_class = &rt_sched_class;
1619 + } else {
1620 + if (dl_prio(oldprio))
1621 +- p->dl.dl_boosted = 0;
1622 ++ p->dl.pi_se = &p->dl;
1623 + if (rt_prio(oldprio))
1624 + p->rt.timeout = 0;
1625 + p->sched_class = &fair_sched_class;
1626 +diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
1627 +index 2bda9fdba31c4..d8052c2d87e49 100644
1628 +--- a/kernel/sched/deadline.c
1629 ++++ b/kernel/sched/deadline.c
1630 +@@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se)
1631 + return !RB_EMPTY_NODE(&dl_se->rb_node);
1632 + }
1633 +
1634 ++#ifdef CONFIG_RT_MUTEXES
1635 ++static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
1636 ++{
1637 ++ return dl_se->pi_se;
1638 ++}
1639 ++
1640 ++static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
1641 ++{
1642 ++ return pi_of(dl_se) != dl_se;
1643 ++}
1644 ++#else
1645 ++static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
1646 ++{
1647 ++ return dl_se;
1648 ++}
1649 ++
1650 ++static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
1651 ++{
1652 ++ return false;
1653 ++}
1654 ++#endif
1655 ++
1656 + #ifdef CONFIG_SMP
1657 + static inline struct dl_bw *dl_bw_of(int i)
1658 + {
1659 +@@ -657,7 +679,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
1660 + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1661 + struct rq *rq = rq_of_dl_rq(dl_rq);
1662 +
1663 +- WARN_ON(dl_se->dl_boosted);
1664 ++ WARN_ON(is_dl_boosted(dl_se));
1665 + WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
1666 +
1667 + /*
1668 +@@ -695,21 +717,20 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
1669 + * could happen are, typically, a entity voluntarily trying to overcome its
1670 + * runtime, or it just underestimated it during sched_setattr().
1671 + */
1672 +-static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1673 +- struct sched_dl_entity *pi_se)
1674 ++static void replenish_dl_entity(struct sched_dl_entity *dl_se)
1675 + {
1676 + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1677 + struct rq *rq = rq_of_dl_rq(dl_rq);
1678 +
1679 +- BUG_ON(pi_se->dl_runtime <= 0);
1680 ++ BUG_ON(pi_of(dl_se)->dl_runtime <= 0);
1681 +
1682 + /*
1683 + * This could be the case for a !-dl task that is boosted.
1684 + * Just go with full inherited parameters.
1685 + */
1686 + if (dl_se->dl_deadline == 0) {
1687 +- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1688 +- dl_se->runtime = pi_se->dl_runtime;
1689 ++ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1690 ++ dl_se->runtime = pi_of(dl_se)->dl_runtime;
1691 + }
1692 +
1693 + if (dl_se->dl_yielded && dl_se->runtime > 0)
1694 +@@ -722,8 +743,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1695 + * arbitrary large.
1696 + */
1697 + while (dl_se->runtime <= 0) {
1698 +- dl_se->deadline += pi_se->dl_period;
1699 +- dl_se->runtime += pi_se->dl_runtime;
1700 ++ dl_se->deadline += pi_of(dl_se)->dl_period;
1701 ++ dl_se->runtime += pi_of(dl_se)->dl_runtime;
1702 + }
1703 +
1704 + /*
1705 +@@ -737,8 +758,8 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1706 + */
1707 + if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
1708 + printk_deferred_once("sched: DL replenish lagged too much\n");
1709 +- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1710 +- dl_se->runtime = pi_se->dl_runtime;
1711 ++ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1712 ++ dl_se->runtime = pi_of(dl_se)->dl_runtime;
1713 + }
1714 +
1715 + if (dl_se->dl_yielded)
1716 +@@ -771,8 +792,7 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se,
1717 + * task with deadline equal to period this is the same of using
1718 + * dl_period instead of dl_deadline in the equation above.
1719 + */
1720 +-static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
1721 +- struct sched_dl_entity *pi_se, u64 t)
1722 ++static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
1723 + {
1724 + u64 left, right;
1725 +
1726 +@@ -794,9 +814,9 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
1727 + * of anything below microseconds resolution is actually fiction
1728 + * (but still we want to give the user that illusion >;).
1729 + */
1730 +- left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
1731 ++ left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
1732 + right = ((dl_se->deadline - t) >> DL_SCALE) *
1733 +- (pi_se->dl_runtime >> DL_SCALE);
1734 ++ (pi_of(dl_se)->dl_runtime >> DL_SCALE);
1735 +
1736 + return dl_time_before(right, left);
1737 + }
1738 +@@ -881,24 +901,23 @@ static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
1739 + * Please refer to the comments update_dl_revised_wakeup() function to find
1740 + * more about the Revised CBS rule.
1741 + */
1742 +-static void update_dl_entity(struct sched_dl_entity *dl_se,
1743 +- struct sched_dl_entity *pi_se)
1744 ++static void update_dl_entity(struct sched_dl_entity *dl_se)
1745 + {
1746 + struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1747 + struct rq *rq = rq_of_dl_rq(dl_rq);
1748 +
1749 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
1750 +- dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
1751 ++ dl_entity_overflow(dl_se, rq_clock(rq))) {
1752 +
1753 + if (unlikely(!dl_is_implicit(dl_se) &&
1754 + !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1755 +- !dl_se->dl_boosted)){
1756 ++ !is_dl_boosted(dl_se))) {
1757 + update_dl_revised_wakeup(dl_se, rq);
1758 + return;
1759 + }
1760 +
1761 +- dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
1762 +- dl_se->runtime = pi_se->dl_runtime;
1763 ++ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
1764 ++ dl_se->runtime = pi_of(dl_se)->dl_runtime;
1765 + }
1766 + }
1767 +
1768 +@@ -997,7 +1016,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1769 + * The task might have been boosted by someone else and might be in the
1770 + * boosting/deboosting path, its not throttled.
1771 + */
1772 +- if (dl_se->dl_boosted)
1773 ++ if (is_dl_boosted(dl_se))
1774 + goto unlock;
1775 +
1776 + /*
1777 +@@ -1025,7 +1044,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1778 + * but do not enqueue -- wait for our wakeup to do that.
1779 + */
1780 + if (!task_on_rq_queued(p)) {
1781 +- replenish_dl_entity(dl_se, dl_se);
1782 ++ replenish_dl_entity(dl_se);
1783 + goto unlock;
1784 + }
1785 +
1786 +@@ -1115,7 +1134,7 @@ static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1787 +
1788 + if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1789 + dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1790 +- if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1791 ++ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(p)))
1792 + return;
1793 + dl_se->dl_throttled = 1;
1794 + if (dl_se->runtime > 0)
1795 +@@ -1246,7 +1265,7 @@ throttle:
1796 + dl_se->dl_overrun = 1;
1797 +
1798 + __dequeue_task_dl(rq, curr, 0);
1799 +- if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
1800 ++ if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(curr)))
1801 + enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1802 +
1803 + if (!is_leftmost(curr, &rq->dl))
1804 +@@ -1440,8 +1459,7 @@ static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1805 + }
1806 +
1807 + static void
1808 +-enqueue_dl_entity(struct sched_dl_entity *dl_se,
1809 +- struct sched_dl_entity *pi_se, int flags)
1810 ++enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
1811 + {
1812 + BUG_ON(on_dl_rq(dl_se));
1813 +
1814 +@@ -1452,9 +1470,9 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
1815 + */
1816 + if (flags & ENQUEUE_WAKEUP) {
1817 + task_contending(dl_se, flags);
1818 +- update_dl_entity(dl_se, pi_se);
1819 ++ update_dl_entity(dl_se);
1820 + } else if (flags & ENQUEUE_REPLENISH) {
1821 +- replenish_dl_entity(dl_se, pi_se);
1822 ++ replenish_dl_entity(dl_se);
1823 + } else if ((flags & ENQUEUE_RESTORE) &&
1824 + dl_time_before(dl_se->deadline,
1825 + rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1826 +@@ -1471,28 +1489,40 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1827 +
1828 + static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1829 + {
1830 +- struct task_struct *pi_task = rt_mutex_get_top_task(p);
1831 +- struct sched_dl_entity *pi_se = &p->dl;
1832 +-
1833 +- /*
1834 +- * Use the scheduling parameters of the top pi-waiter task if:
1835 +- * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1836 +- * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1837 +- * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1838 +- * boosted due to a SCHED_DEADLINE pi-waiter).
1839 +- * Otherwise we keep our runtime and deadline.
1840 +- */
1841 +- if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
1842 +- pi_se = &pi_task->dl;
1843 ++ if (is_dl_boosted(&p->dl)) {
1844 ++ /*
1845 ++ * Because of delays in the detection of the overrun of a
1846 ++ * thread's runtime, it might be the case that a thread
1847 ++ * goes to sleep in a rt mutex with negative runtime. As
1848 ++ * a consequence, the thread will be throttled.
1849 ++ *
1850 ++ * While waiting for the mutex, this thread can also be
1851 ++ * boosted via PI, resulting in a thread that is throttled
1852 ++ * and boosted at the same time.
1853 ++ *
1854 ++ * In this case, the boost overrides the throttle.
1855 ++ */
1856 ++ if (p->dl.dl_throttled) {
1857 ++ /*
1858 ++ * The replenish timer needs to be canceled. No
1859 ++ * problem if it fires concurrently: boosted threads
1860 ++ * are ignored in dl_task_timer().
1861 ++ */
1862 ++ hrtimer_try_to_cancel(&p->dl.dl_timer);
1863 ++ p->dl.dl_throttled = 0;
1864 ++ }
1865 + } else if (!dl_prio(p->normal_prio)) {
1866 + /*
1867 +- * Special case in which we have a !SCHED_DEADLINE task
1868 +- * that is going to be deboosted, but exceeds its
1869 +- * runtime while doing so. No point in replenishing
1870 +- * it, as it's going to return back to its original
1871 +- * scheduling class after this.
1872 ++ * Special case in which we have a !SCHED_DEADLINE task that is going
1873 ++ * to be deboosted, but exceeds its runtime while doing so. No point in
1874 ++ * replenishing it, as it's going to return back to its original
1875 ++ * scheduling class after this. If it has been throttled, we need to
1876 ++ * clear the flag, otherwise the task may wake up as throttled after
1877 ++ * being boosted again with no means to replenish the runtime and clear
1878 ++ * the throttle.
1879 + */
1880 +- BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1881 ++ p->dl.dl_throttled = 0;
1882 ++ BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH);
1883 + return;
1884 + }
1885 +
1886 +@@ -1529,7 +1559,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1887 + return;
1888 + }
1889 +
1890 +- enqueue_dl_entity(&p->dl, pi_se, flags);
1891 ++ enqueue_dl_entity(&p->dl, flags);
1892 +
1893 + if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1894 + enqueue_pushable_dl_task(rq, p);
1895 +@@ -2698,11 +2728,14 @@ void __dl_clear_params(struct task_struct *p)
1896 + dl_se->dl_bw = 0;
1897 + dl_se->dl_density = 0;
1898 +
1899 +- dl_se->dl_boosted = 0;
1900 + dl_se->dl_throttled = 0;
1901 + dl_se->dl_yielded = 0;
1902 + dl_se->dl_non_contending = 0;
1903 + dl_se->dl_overrun = 0;
1904 ++
1905 ++#ifdef CONFIG_RT_MUTEXES
1906 ++ dl_se->pi_se = dl_se;
1907 ++#endif
1908 + }
1909 +
1910 + bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
1911 +diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
1912 +index 34b76895b81e8..189eed03e4e34 100644
1913 +--- a/kernel/sys_ni.c
1914 ++++ b/kernel/sys_ni.c
1915 +@@ -268,6 +268,7 @@ COND_SYSCALL_COMPAT(keyctl);
1916 +
1917 + /* mm/fadvise.c */
1918 + COND_SYSCALL(fadvise64_64);
1919 ++COND_SYSCALL_COMPAT(fadvise64_64);
1920 +
1921 + /* mm/, CONFIG_MMU only */
1922 + COND_SYSCALL(swapon);
1923 +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1924 +index 7719d444bda12..44f1469af842b 100644
1925 +--- a/kernel/trace/ftrace.c
1926 ++++ b/kernel/trace/ftrace.c
1927 +@@ -2732,6 +2732,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
1928 +
1929 + ftrace_startup_enable(command);
1930 +
1931 ++ /*
1932 ++ * If ftrace is in an undefined state, we just remove ops from list
1933 ++ * to prevent the NULL pointer, instead of totally rolling it back and
1934 ++ * free trampoline, because those actions could cause further damage.
1935 ++ */
1936 ++ if (unlikely(ftrace_disabled)) {
1937 ++ __unregister_ftrace_function(ops);
1938 ++ return -ENODEV;
1939 ++ }
1940 ++
1941 + ops->flags &= ~FTRACE_OPS_FL_ADDING;
1942 +
1943 + return 0;
1944 +diff --git a/lib/ratelimit.c b/lib/ratelimit.c
1945 +index e01a93f46f833..ce945c17980b9 100644
1946 +--- a/lib/ratelimit.c
1947 ++++ b/lib/ratelimit.c
1948 +@@ -26,10 +26,16 @@
1949 + */
1950 + int ___ratelimit(struct ratelimit_state *rs, const char *func)
1951 + {
1952 ++ /* Paired with WRITE_ONCE() in .proc_handler().
1953 ++ * Changing two values seperately could be inconsistent
1954 ++ * and some message could be lost. (See: net_ratelimit_state).
1955 ++ */
1956 ++ int interval = READ_ONCE(rs->interval);
1957 ++ int burst = READ_ONCE(rs->burst);
1958 + unsigned long flags;
1959 + int ret;
1960 +
1961 +- if (!rs->interval)
1962 ++ if (!interval)
1963 + return 1;
1964 +
1965 + /*
1966 +@@ -44,7 +50,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
1967 + if (!rs->begin)
1968 + rs->begin = jiffies;
1969 +
1970 +- if (time_is_before_jiffies(rs->begin + rs->interval)) {
1971 ++ if (time_is_before_jiffies(rs->begin + interval)) {
1972 + if (rs->missed) {
1973 + if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
1974 + printk_deferred(KERN_WARNING
1975 +@@ -56,7 +62,7 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
1976 + rs->begin = jiffies;
1977 + rs->printed = 0;
1978 + }
1979 +- if (rs->burst && rs->burst > rs->printed) {
1980 ++ if (burst && burst > rs->printed) {
1981 + rs->printed++;
1982 + ret = 1;
1983 + } else {
1984 +diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
1985 +index 45f57fd2db649..5667fb746a1fe 100644
1986 +--- a/lib/vdso/gettimeofday.c
1987 ++++ b/lib/vdso/gettimeofday.c
1988 +@@ -38,7 +38,7 @@ u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
1989 + }
1990 + #endif
1991 +
1992 +-static int do_hres(const struct vdso_data *vd, clockid_t clk,
1993 ++static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
1994 + struct __kernel_timespec *ts)
1995 + {
1996 + const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
1997 +@@ -68,8 +68,8 @@ static int do_hres(const struct vdso_data *vd, clockid_t clk,
1998 + return 0;
1999 + }
2000 +
2001 +-static void do_coarse(const struct vdso_data *vd, clockid_t clk,
2002 +- struct __kernel_timespec *ts)
2003 ++static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
2004 ++ struct __kernel_timespec *ts)
2005 + {
2006 + const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
2007 + u32 seq;
2008 +@@ -79,6 +79,8 @@ static void do_coarse(const struct vdso_data *vd, clockid_t clk,
2009 + ts->tv_sec = vdso_ts->sec;
2010 + ts->tv_nsec = vdso_ts->nsec;
2011 + } while (unlikely(vdso_read_retry(vd, seq)));
2012 ++
2013 ++ return 0;
2014 + }
2015 +
2016 + static __maybe_unused int
2017 +@@ -96,15 +98,16 @@ __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
2018 + * clocks are handled in the VDSO directly.
2019 + */
2020 + msk = 1U << clock;
2021 +- if (likely(msk & VDSO_HRES)) {
2022 +- return do_hres(&vd[CS_HRES_COARSE], clock, ts);
2023 +- } else if (msk & VDSO_COARSE) {
2024 +- do_coarse(&vd[CS_HRES_COARSE], clock, ts);
2025 +- return 0;
2026 +- } else if (msk & VDSO_RAW) {
2027 +- return do_hres(&vd[CS_RAW], clock, ts);
2028 +- }
2029 +- return -1;
2030 ++ if (likely(msk & VDSO_HRES))
2031 ++ vd = &vd[CS_HRES_COARSE];
2032 ++ else if (msk & VDSO_COARSE)
2033 ++ return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
2034 ++ else if (msk & VDSO_RAW)
2035 ++ vd = &vd[CS_RAW];
2036 ++ else
2037 ++ return -1;
2038 ++
2039 ++ return do_hres(vd, clock, ts);
2040 + }
2041 +
2042 + static __maybe_unused int
2043 +diff --git a/mm/mmap.c b/mm/mmap.c
2044 +index 8873ef114d280..e8cf6f88933c3 100644
2045 +--- a/mm/mmap.c
2046 ++++ b/mm/mmap.c
2047 +@@ -1679,8 +1679,12 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
2048 + pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
2049 + return 0;
2050 +
2051 +- /* Do we need to track softdirty? */
2052 +- if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
2053 ++ /*
2054 ++ * Do we need to track softdirty? hugetlb does not support softdirty
2055 ++ * tracking yet.
2056 ++ */
2057 ++ if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY) &&
2058 ++ !is_vm_hugetlb_page(vma))
2059 + return 1;
2060 +
2061 + /* Specialty mapping? */
2062 +@@ -2606,6 +2610,18 @@ static void unmap_region(struct mm_struct *mm,
2063 + tlb_gather_mmu(&tlb, mm, start, end);
2064 + update_hiwater_rss(mm);
2065 + unmap_vmas(&tlb, vma, start, end);
2066 ++
2067 ++ /*
2068 ++ * Ensure we have no stale TLB entries by the time this mapping is
2069 ++ * removed from the rmap.
2070 ++ * Note that we don't have to worry about nested flushes here because
2071 ++ * we're holding the mm semaphore for removing the mapping - so any
2072 ++ * concurrent flush in this region has to be coming through the rmap,
2073 ++ * and we synchronize against that using the rmap lock.
2074 ++ */
2075 ++ if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
2076 ++ tlb_flush_mmu(&tlb);
2077 ++
2078 + free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2079 + next ? next->vm_start : USER_PGTABLES_CEILING);
2080 + tlb_finish_mmu(&tlb, start, end);
2081 +diff --git a/mm/rmap.c b/mm/rmap.c
2082 +index 6d80e92688fe7..c64da910bb731 100644
2083 +--- a/mm/rmap.c
2084 ++++ b/mm/rmap.c
2085 +@@ -83,7 +83,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
2086 + anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
2087 + if (anon_vma) {
2088 + atomic_set(&anon_vma->refcount, 1);
2089 +- anon_vma->degree = 1; /* Reference for first vma */
2090 ++ anon_vma->num_children = 0;
2091 ++ anon_vma->num_active_vmas = 0;
2092 + anon_vma->parent = anon_vma;
2093 + /*
2094 + * Initialise the anon_vma root to point to itself. If called
2095 +@@ -191,6 +192,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
2096 + anon_vma = anon_vma_alloc();
2097 + if (unlikely(!anon_vma))
2098 + goto out_enomem_free_avc;
2099 ++ anon_vma->num_children++; /* self-parent link for new root */
2100 + allocated = anon_vma;
2101 + }
2102 +
2103 +@@ -200,8 +202,7 @@ int __anon_vma_prepare(struct vm_area_struct *vma)
2104 + if (likely(!vma->anon_vma)) {
2105 + vma->anon_vma = anon_vma;
2106 + anon_vma_chain_link(vma, avc, anon_vma);
2107 +- /* vma reference or self-parent link for new root */
2108 +- anon_vma->degree++;
2109 ++ anon_vma->num_active_vmas++;
2110 + allocated = NULL;
2111 + avc = NULL;
2112 + }
2113 +@@ -280,19 +281,19 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
2114 + anon_vma_chain_link(dst, avc, anon_vma);
2115 +
2116 + /*
2117 +- * Reuse existing anon_vma if its degree lower than two,
2118 +- * that means it has no vma and only one anon_vma child.
2119 ++ * Reuse existing anon_vma if it has no vma and only one
2120 ++ * anon_vma child.
2121 + *
2122 +- * Do not chose parent anon_vma, otherwise first child
2123 +- * will always reuse it. Root anon_vma is never reused:
2124 ++ * Root anon_vma is never reused:
2125 + * it has self-parent reference and at least one child.
2126 + */
2127 +- if (!dst->anon_vma && anon_vma != src->anon_vma &&
2128 +- anon_vma->degree < 2)
2129 ++ if (!dst->anon_vma &&
2130 ++ anon_vma->num_children < 2 &&
2131 ++ anon_vma->num_active_vmas == 0)
2132 + dst->anon_vma = anon_vma;
2133 + }
2134 + if (dst->anon_vma)
2135 +- dst->anon_vma->degree++;
2136 ++ dst->anon_vma->num_active_vmas++;
2137 + unlock_anon_vma_root(root);
2138 + return 0;
2139 +
2140 +@@ -342,6 +343,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
2141 + anon_vma = anon_vma_alloc();
2142 + if (!anon_vma)
2143 + goto out_error;
2144 ++ anon_vma->num_active_vmas++;
2145 + avc = anon_vma_chain_alloc(GFP_KERNEL);
2146 + if (!avc)
2147 + goto out_error_free_anon_vma;
2148 +@@ -362,7 +364,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
2149 + vma->anon_vma = anon_vma;
2150 + anon_vma_lock_write(anon_vma);
2151 + anon_vma_chain_link(vma, avc, anon_vma);
2152 +- anon_vma->parent->degree++;
2153 ++ anon_vma->parent->num_children++;
2154 + anon_vma_unlock_write(anon_vma);
2155 +
2156 + return 0;
2157 +@@ -394,7 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2158 + * to free them outside the lock.
2159 + */
2160 + if (RB_EMPTY_ROOT(&anon_vma->rb_root.rb_root)) {
2161 +- anon_vma->parent->degree--;
2162 ++ anon_vma->parent->num_children--;
2163 + continue;
2164 + }
2165 +
2166 +@@ -402,7 +404,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2167 + anon_vma_chain_free(avc);
2168 + }
2169 + if (vma->anon_vma)
2170 +- vma->anon_vma->degree--;
2171 ++ vma->anon_vma->num_active_vmas--;
2172 + unlock_anon_vma_root(root);
2173 +
2174 + /*
2175 +@@ -413,7 +415,8 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
2176 + list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
2177 + struct anon_vma *anon_vma = avc->anon_vma;
2178 +
2179 +- VM_WARN_ON(anon_vma->degree);
2180 ++ VM_WARN_ON(anon_vma->num_children);
2181 ++ VM_WARN_ON(anon_vma->num_active_vmas);
2182 + put_anon_vma(anon_vma);
2183 +
2184 + list_del(&avc->same_vma);
2185 +diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
2186 +index 7aa64f300422e..3682d2e1cd7d2 100644
2187 +--- a/net/bluetooth/l2cap_core.c
2188 ++++ b/net/bluetooth/l2cap_core.c
2189 +@@ -1835,11 +1835,11 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
2190 + src_match = !bacmp(&c->src, src);
2191 + dst_match = !bacmp(&c->dst, dst);
2192 + if (src_match && dst_match) {
2193 +- c = l2cap_chan_hold_unless_zero(c);
2194 +- if (c) {
2195 +- read_unlock(&chan_list_lock);
2196 +- return c;
2197 +- }
2198 ++ if (!l2cap_chan_hold_unless_zero(c))
2199 ++ continue;
2200 ++
2201 ++ read_unlock(&chan_list_lock);
2202 ++ return c;
2203 + }
2204 +
2205 + /* Closest match */
2206 +diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
2207 +index 1153bbcdff721..5e6428cbd7580 100644
2208 +--- a/net/bpf/test_run.c
2209 ++++ b/net/bpf/test_run.c
2210 +@@ -200,6 +200,9 @@ static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
2211 + {
2212 + struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
2213 +
2214 ++ if (!skb->len)
2215 ++ return -EINVAL;
2216 ++
2217 + if (!__skb)
2218 + return 0;
2219 +
2220 +diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
2221 +index 32bc2821027f3..57f91efce0f73 100644
2222 +--- a/net/bridge/netfilter/ebtable_broute.c
2223 ++++ b/net/bridge/netfilter/ebtable_broute.c
2224 +@@ -36,18 +36,10 @@ static struct ebt_replace_kernel initial_table = {
2225 + .entries = (char *)&initial_chain,
2226 + };
2227 +
2228 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
2229 +-{
2230 +- if (valid_hooks & ~(1 << NF_BR_BROUTING))
2231 +- return -EINVAL;
2232 +- return 0;
2233 +-}
2234 +-
2235 + static const struct ebt_table broute_table = {
2236 + .name = "broute",
2237 + .table = &initial_table,
2238 + .valid_hooks = 1 << NF_BR_BROUTING,
2239 +- .check = check,
2240 + .me = THIS_MODULE,
2241 + };
2242 +
2243 +diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
2244 +index bcf982e12f16b..7f2e620f4978f 100644
2245 +--- a/net/bridge/netfilter/ebtable_filter.c
2246 ++++ b/net/bridge/netfilter/ebtable_filter.c
2247 +@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
2248 + .entries = (char *)initial_chains,
2249 + };
2250 +
2251 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
2252 +-{
2253 +- if (valid_hooks & ~FILTER_VALID_HOOKS)
2254 +- return -EINVAL;
2255 +- return 0;
2256 +-}
2257 +-
2258 + static const struct ebt_table frame_filter = {
2259 + .name = "filter",
2260 + .table = &initial_table,
2261 + .valid_hooks = FILTER_VALID_HOOKS,
2262 +- .check = check,
2263 + .me = THIS_MODULE,
2264 + };
2265 +
2266 +diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
2267 +index 0d092773f8161..1743a105485c4 100644
2268 +--- a/net/bridge/netfilter/ebtable_nat.c
2269 ++++ b/net/bridge/netfilter/ebtable_nat.c
2270 +@@ -43,18 +43,10 @@ static struct ebt_replace_kernel initial_table = {
2271 + .entries = (char *)initial_chains,
2272 + };
2273 +
2274 +-static int check(const struct ebt_table_info *info, unsigned int valid_hooks)
2275 +-{
2276 +- if (valid_hooks & ~NAT_VALID_HOOKS)
2277 +- return -EINVAL;
2278 +- return 0;
2279 +-}
2280 +-
2281 + static const struct ebt_table frame_nat = {
2282 + .name = "nat",
2283 + .table = &initial_table,
2284 + .valid_hooks = NAT_VALID_HOOKS,
2285 +- .check = check,
2286 + .me = THIS_MODULE,
2287 + };
2288 +
2289 +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
2290 +index d9375c52f50e6..ddb988c339c17 100644
2291 +--- a/net/bridge/netfilter/ebtables.c
2292 ++++ b/net/bridge/netfilter/ebtables.c
2293 +@@ -999,8 +999,7 @@ static int do_replace_finish(struct net *net, struct ebt_replace *repl,
2294 + goto free_iterate;
2295 + }
2296 +
2297 +- /* the table doesn't like it */
2298 +- if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
2299 ++ if (repl->valid_hooks != t->valid_hooks)
2300 + goto free_unlock;
2301 +
2302 + if (repl->num_counters && repl->num_counters != t->private->nentries) {
2303 +@@ -1193,11 +1192,6 @@ int ebt_register_table(struct net *net, const struct ebt_table *input_table,
2304 + if (ret != 0)
2305 + goto free_chainstack;
2306 +
2307 +- if (table->check && table->check(newinfo, table->valid_hooks)) {
2308 +- ret = -EINVAL;
2309 +- goto free_chainstack;
2310 +- }
2311 +-
2312 + table->private = newinfo;
2313 + rwlock_init(&table->lock);
2314 + mutex_lock(&ebt_mutex);
2315 +diff --git a/net/core/dev.c b/net/core/dev.c
2316 +index a03036456221b..84bc6d0e8560b 100644
2317 +--- a/net/core/dev.c
2318 ++++ b/net/core/dev.c
2319 +@@ -3712,6 +3712,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
2320 + bool again = false;
2321 +
2322 + skb_reset_mac_header(skb);
2323 ++ skb_assert_len(skb);
2324 +
2325 + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2326 + __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2327 +@@ -4411,7 +4412,7 @@ static int netif_rx_internal(struct sk_buff *skb)
2328 + {
2329 + int ret;
2330 +
2331 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
2332 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
2333 +
2334 + trace_netif_rx(skb);
2335 +
2336 +@@ -4753,7 +4754,7 @@ static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
2337 + int ret = NET_RX_DROP;
2338 + __be16 type;
2339 +
2340 +- net_timestamp_check(!netdev_tstamp_prequeue, skb);
2341 ++ net_timestamp_check(!READ_ONCE(netdev_tstamp_prequeue), skb);
2342 +
2343 + trace_netif_receive_skb(skb);
2344 +
2345 +@@ -5135,7 +5136,7 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
2346 + {
2347 + int ret;
2348 +
2349 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
2350 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
2351 +
2352 + if (skb_defer_rx_timestamp(skb))
2353 + return NET_RX_SUCCESS;
2354 +@@ -5165,7 +5166,7 @@ static void netif_receive_skb_list_internal(struct list_head *head)
2355 +
2356 + INIT_LIST_HEAD(&sublist);
2357 + list_for_each_entry_safe(skb, next, head, list) {
2358 +- net_timestamp_check(netdev_tstamp_prequeue, skb);
2359 ++ net_timestamp_check(READ_ONCE(netdev_tstamp_prequeue), skb);
2360 + skb_list_del_init(skb);
2361 + if (!skb_defer_rx_timestamp(skb))
2362 + list_add_tail(&skb->list, &sublist);
2363 +@@ -5892,7 +5893,7 @@ static int process_backlog(struct napi_struct *napi, int quota)
2364 + net_rps_action_and_irq_enable(sd);
2365 + }
2366 +
2367 +- napi->weight = dev_rx_weight;
2368 ++ napi->weight = READ_ONCE(dev_rx_weight);
2369 + while (again) {
2370 + struct sk_buff *skb;
2371 +
2372 +@@ -6393,8 +6394,8 @@ static __latent_entropy void net_rx_action(struct softirq_action *h)
2373 + {
2374 + struct softnet_data *sd = this_cpu_ptr(&softnet_data);
2375 + unsigned long time_limit = jiffies +
2376 +- usecs_to_jiffies(netdev_budget_usecs);
2377 +- int budget = netdev_budget;
2378 ++ usecs_to_jiffies(READ_ONCE(netdev_budget_usecs));
2379 ++ int budget = READ_ONCE(netdev_budget);
2380 + LIST_HEAD(list);
2381 + LIST_HEAD(repoll);
2382 +
2383 +diff --git a/net/core/neighbour.c b/net/core/neighbour.c
2384 +index 8b6140e67e7f8..aa81aead0a654 100644
2385 +--- a/net/core/neighbour.c
2386 ++++ b/net/core/neighbour.c
2387 +@@ -280,11 +280,26 @@ static int neigh_del_timer(struct neighbour *n)
2388 + return 0;
2389 + }
2390 +
2391 +-static void pneigh_queue_purge(struct sk_buff_head *list)
2392 ++static void pneigh_queue_purge(struct sk_buff_head *list, struct net *net)
2393 + {
2394 ++ struct sk_buff_head tmp;
2395 ++ unsigned long flags;
2396 + struct sk_buff *skb;
2397 +
2398 +- while ((skb = skb_dequeue(list)) != NULL) {
2399 ++ skb_queue_head_init(&tmp);
2400 ++ spin_lock_irqsave(&list->lock, flags);
2401 ++ skb = skb_peek(list);
2402 ++ while (skb != NULL) {
2403 ++ struct sk_buff *skb_next = skb_peek_next(skb, list);
2404 ++ if (net == NULL || net_eq(dev_net(skb->dev), net)) {
2405 ++ __skb_unlink(skb, list);
2406 ++ __skb_queue_tail(&tmp, skb);
2407 ++ }
2408 ++ skb = skb_next;
2409 ++ }
2410 ++ spin_unlock_irqrestore(&list->lock, flags);
2411 ++
2412 ++ while ((skb = __skb_dequeue(&tmp))) {
2413 + dev_put(skb->dev);
2414 + kfree_skb(skb);
2415 + }
2416 +@@ -358,9 +373,9 @@ static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
2417 + write_lock_bh(&tbl->lock);
2418 + neigh_flush_dev(tbl, dev, skip_perm);
2419 + pneigh_ifdown_and_unlock(tbl, dev);
2420 +-
2421 +- del_timer_sync(&tbl->proxy_timer);
2422 +- pneigh_queue_purge(&tbl->proxy_queue);
2423 ++ pneigh_queue_purge(&tbl->proxy_queue, dev_net(dev));
2424 ++ if (skb_queue_empty_lockless(&tbl->proxy_queue))
2425 ++ del_timer_sync(&tbl->proxy_timer);
2426 + return 0;
2427 + }
2428 +
2429 +@@ -1741,7 +1756,7 @@ int neigh_table_clear(int index, struct neigh_table *tbl)
2430 + /* It is not clean... Fix it to unload IPv6 module safely */
2431 + cancel_delayed_work_sync(&tbl->gc_work);
2432 + del_timer_sync(&tbl->proxy_timer);
2433 +- pneigh_queue_purge(&tbl->proxy_queue);
2434 ++ pneigh_queue_purge(&tbl->proxy_queue, NULL);
2435 + neigh_ifdown(tbl, NULL);
2436 + if (atomic_read(&tbl->entries))
2437 + pr_crit("neighbour leakage\n");
2438 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2439 +index 5bdb3cd20d619..c9fe2c0b8cae3 100644
2440 +--- a/net/core/skbuff.c
2441 ++++ b/net/core/skbuff.c
2442 +@@ -4564,7 +4564,7 @@ static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
2443 + {
2444 + bool ret;
2445 +
2446 +- if (likely(sysctl_tstamp_allow_data || tsonly))
2447 ++ if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
2448 + return true;
2449 +
2450 + read_lock_bh(&sk->sk_callback_lock);
2451 +diff --git a/net/core/sock.c b/net/core/sock.c
2452 +index c84f68bff7f58..a2b12a5cf42bc 100644
2453 +--- a/net/core/sock.c
2454 ++++ b/net/core/sock.c
2455 +@@ -2946,7 +2946,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
2456 +
2457 + #ifdef CONFIG_NET_RX_BUSY_POLL
2458 + sk->sk_napi_id = 0;
2459 +- sk->sk_ll_usec = sysctl_net_busy_read;
2460 ++ sk->sk_ll_usec = READ_ONCE(sysctl_net_busy_read);
2461 + #endif
2462 +
2463 + sk->sk_max_pacing_rate = ~0UL;
2464 +diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
2465 +index 48041f50ecfb4..586598887095d 100644
2466 +--- a/net/core/sysctl_net_core.c
2467 ++++ b/net/core/sysctl_net_core.c
2468 +@@ -238,14 +238,17 @@ static int set_default_qdisc(struct ctl_table *table, int write,
2469 + static int proc_do_dev_weight(struct ctl_table *table, int write,
2470 + void __user *buffer, size_t *lenp, loff_t *ppos)
2471 + {
2472 +- int ret;
2473 ++ static DEFINE_MUTEX(dev_weight_mutex);
2474 ++ int ret, weight;
2475 +
2476 ++ mutex_lock(&dev_weight_mutex);
2477 + ret = proc_dointvec(table, write, buffer, lenp, ppos);
2478 +- if (ret != 0)
2479 +- return ret;
2480 +-
2481 +- dev_rx_weight = weight_p * dev_weight_rx_bias;
2482 +- dev_tx_weight = weight_p * dev_weight_tx_bias;
2483 ++ if (!ret && write) {
2484 ++ weight = READ_ONCE(weight_p);
2485 ++ WRITE_ONCE(dev_rx_weight, weight * dev_weight_rx_bias);
2486 ++ WRITE_ONCE(dev_tx_weight, weight * dev_weight_tx_bias);
2487 ++ }
2488 ++ mutex_unlock(&dev_weight_mutex);
2489 +
2490 + return ret;
2491 + }
2492 +diff --git a/net/key/af_key.c b/net/key/af_key.c
2493 +index 32fe99cd01fc8..c06cc48c68c90 100644
2494 +--- a/net/key/af_key.c
2495 ++++ b/net/key/af_key.c
2496 +@@ -1701,9 +1701,12 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
2497 + pfk->registered |= (1<<hdr->sadb_msg_satype);
2498 + }
2499 +
2500 ++ mutex_lock(&pfkey_mutex);
2501 + xfrm_probe_algs();
2502 +
2503 + supp_skb = compose_sadb_supported(hdr, GFP_KERNEL | __GFP_ZERO);
2504 ++ mutex_unlock(&pfkey_mutex);
2505 ++
2506 + if (!supp_skb) {
2507 + if (hdr->sadb_msg_satype != SADB_SATYPE_UNSPEC)
2508 + pfk->registered &= ~(1<<hdr->sadb_msg_satype);
2509 +diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
2510 +index ef72819d9d315..d569915da003c 100644
2511 +--- a/net/netfilter/Kconfig
2512 ++++ b/net/netfilter/Kconfig
2513 +@@ -118,7 +118,6 @@ config NF_CONNTRACK_ZONES
2514 +
2515 + config NF_CONNTRACK_PROCFS
2516 + bool "Supply CT list in procfs (OBSOLETE)"
2517 +- default y
2518 + depends on PROC_FS
2519 + ---help---
2520 + This option enables for the list of known conntrack entries
2521 +diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
2522 +index 4911f8eb394ff..d966a3aff1d33 100644
2523 +--- a/net/netfilter/nft_osf.c
2524 ++++ b/net/netfilter/nft_osf.c
2525 +@@ -115,9 +115,21 @@ static int nft_osf_validate(const struct nft_ctx *ctx,
2526 + const struct nft_expr *expr,
2527 + const struct nft_data **data)
2528 + {
2529 +- return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
2530 +- (1 << NF_INET_PRE_ROUTING) |
2531 +- (1 << NF_INET_FORWARD));
2532 ++ unsigned int hooks;
2533 ++
2534 ++ switch (ctx->family) {
2535 ++ case NFPROTO_IPV4:
2536 ++ case NFPROTO_IPV6:
2537 ++ case NFPROTO_INET:
2538 ++ hooks = (1 << NF_INET_LOCAL_IN) |
2539 ++ (1 << NF_INET_PRE_ROUTING) |
2540 ++ (1 << NF_INET_FORWARD);
2541 ++ break;
2542 ++ default:
2543 ++ return -EOPNOTSUPP;
2544 ++ }
2545 ++
2546 ++ return nft_chain_validate_hooks(ctx->chain, hooks);
2547 + }
2548 +
2549 + static struct nft_expr_type nft_osf_type;
2550 +diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
2551 +index cf0512fc648e7..6ed6ccef5e1ad 100644
2552 +--- a/net/netfilter/nft_payload.c
2553 ++++ b/net/netfilter/nft_payload.c
2554 +@@ -558,6 +558,8 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
2555 + const struct nlattr * const tb[])
2556 + {
2557 + struct nft_payload_set *priv = nft_expr_priv(expr);
2558 ++ u32 csum_offset, csum_type = NFT_PAYLOAD_CSUM_NONE;
2559 ++ int err;
2560 +
2561 + priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
2562 + priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
2563 +@@ -565,11 +567,15 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
2564 + priv->sreg = nft_parse_register(tb[NFTA_PAYLOAD_SREG]);
2565 +
2566 + if (tb[NFTA_PAYLOAD_CSUM_TYPE])
2567 +- priv->csum_type =
2568 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
2569 +- if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
2570 +- priv->csum_offset =
2571 +- ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
2572 ++ csum_type = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
2573 ++ if (tb[NFTA_PAYLOAD_CSUM_OFFSET]) {
2574 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_CSUM_OFFSET], U8_MAX,
2575 ++ &csum_offset);
2576 ++ if (err < 0)
2577 ++ return err;
2578 ++
2579 ++ priv->csum_offset = csum_offset;
2580 ++ }
2581 + if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
2582 + u32 flags;
2583 +
2584 +@@ -580,13 +586,14 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
2585 + priv->csum_flags = flags;
2586 + }
2587 +
2588 +- switch (priv->csum_type) {
2589 ++ switch (csum_type) {
2590 + case NFT_PAYLOAD_CSUM_NONE:
2591 + case NFT_PAYLOAD_CSUM_INET:
2592 + break;
2593 + default:
2594 + return -EOPNOTSUPP;
2595 + }
2596 ++ priv->csum_type = csum_type;
2597 +
2598 + return nft_validate_register_load(priv->sreg, priv->len);
2599 + }
2600 +@@ -624,6 +631,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
2601 + {
2602 + enum nft_payload_bases base;
2603 + unsigned int offset, len;
2604 ++ int err;
2605 +
2606 + if (tb[NFTA_PAYLOAD_BASE] == NULL ||
2607 + tb[NFTA_PAYLOAD_OFFSET] == NULL ||
2608 +@@ -649,8 +657,13 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
2609 + if (tb[NFTA_PAYLOAD_DREG] == NULL)
2610 + return ERR_PTR(-EINVAL);
2611 +
2612 +- offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
2613 +- len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
2614 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_OFFSET], U8_MAX, &offset);
2615 ++ if (err < 0)
2616 ++ return ERR_PTR(err);
2617 ++
2618 ++ err = nft_parse_u32_check(tb[NFTA_PAYLOAD_LEN], U8_MAX, &len);
2619 ++ if (err < 0)
2620 ++ return ERR_PTR(err);
2621 +
2622 + if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
2623 + base != NFT_PAYLOAD_LL_HEADER)
2624 +diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
2625 +index 1effd4878619f..4e850c81ad8d8 100644
2626 +--- a/net/netfilter/nft_tunnel.c
2627 ++++ b/net/netfilter/nft_tunnel.c
2628 +@@ -134,6 +134,7 @@ static const struct nft_expr_ops nft_tunnel_get_ops = {
2629 +
2630 + static struct nft_expr_type nft_tunnel_type __read_mostly = {
2631 + .name = "tunnel",
2632 ++ .family = NFPROTO_NETDEV,
2633 + .ops = &nft_tunnel_get_ops,
2634 + .policy = nft_tunnel_policy,
2635 + .maxattr = NFTA_TUNNEL_MAX,
2636 +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2637 +index a2696acbcd9d2..8f5ef28411992 100644
2638 +--- a/net/packet/af_packet.c
2639 ++++ b/net/packet/af_packet.c
2640 +@@ -2960,8 +2960,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2641 + if (err)
2642 + goto out_free;
2643 +
2644 +- if (sock->type == SOCK_RAW &&
2645 +- !dev_validate_header(dev, skb->data, len)) {
2646 ++ if ((sock->type == SOCK_RAW &&
2647 ++ !dev_validate_header(dev, skb->data, len)) || !skb->len) {
2648 + err = -EINVAL;
2649 + goto out_free;
2650 + }
2651 +diff --git a/net/rose/rose_loopback.c b/net/rose/rose_loopback.c
2652 +index 11c45c8c6c164..036d92c0ad794 100644
2653 +--- a/net/rose/rose_loopback.c
2654 ++++ b/net/rose/rose_loopback.c
2655 +@@ -96,7 +96,8 @@ static void rose_loopback_timer(struct timer_list *unused)
2656 + }
2657 +
2658 + if (frametype == ROSE_CALL_REQUEST) {
2659 +- if (!rose_loopback_neigh->dev) {
2660 ++ if (!rose_loopback_neigh->dev &&
2661 ++ !rose_loopback_neigh->loopback) {
2662 + kfree_skb(skb);
2663 + continue;
2664 + }
2665 +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
2666 +index ae5847de94c88..81fcf6c5bde96 100644
2667 +--- a/net/sched/sch_generic.c
2668 ++++ b/net/sched/sch_generic.c
2669 +@@ -403,7 +403,7 @@ static inline bool qdisc_restart(struct Qdisc *q, int *packets)
2670 +
2671 + void __qdisc_run(struct Qdisc *q)
2672 + {
2673 +- int quota = dev_tx_weight;
2674 ++ int quota = READ_ONCE(dev_tx_weight);
2675 + int packets;
2676 +
2677 + while (qdisc_restart(q, &packets)) {
2678 +diff --git a/net/socket.c b/net/socket.c
2679 +index 94358566c9d10..02feaf5bd84a3 100644
2680 +--- a/net/socket.c
2681 ++++ b/net/socket.c
2682 +@@ -1661,7 +1661,7 @@ int __sys_listen(int fd, int backlog)
2683 +
2684 + sock = sockfd_lookup_light(fd, &err, &fput_needed);
2685 + if (sock) {
2686 +- somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
2687 ++ somaxconn = READ_ONCE(sock_net(sock->sk)->core.sysctl_somaxconn);
2688 + if ((unsigned int)backlog > somaxconn)
2689 + backlog = somaxconn;
2690 +
2691 +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2692 +index 08e1ccc01e983..1893203cc94fc 100644
2693 +--- a/net/sunrpc/clnt.c
2694 ++++ b/net/sunrpc/clnt.c
2695 +@@ -1896,7 +1896,7 @@ call_encode(struct rpc_task *task)
2696 + break;
2697 + case -EKEYEXPIRED:
2698 + if (!task->tk_cred_retry) {
2699 +- rpc_exit(task, task->tk_status);
2700 ++ rpc_call_rpcerror(task, task->tk_status);
2701 + } else {
2702 + task->tk_action = call_refresh;
2703 + task->tk_cred_retry--;
2704 +diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
2705 +index 28a8cdef8e51f..6f58be5a17711 100644
2706 +--- a/net/xfrm/xfrm_policy.c
2707 ++++ b/net/xfrm/xfrm_policy.c
2708 +@@ -3619,6 +3619,7 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
2709 + if (pols[1]) {
2710 + if (IS_ERR(pols[1])) {
2711 + XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
2712 ++ xfrm_pol_put(pols[0]);
2713 + return 0;
2714 + }
2715 + pols[1]->curlft.use_time = ktime_get_real_seconds();
2716 +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
2717 +index 952fff4855467..2dde6e5e9e69f 100644
2718 +--- a/scripts/Makefile.modpost
2719 ++++ b/scripts/Makefile.modpost
2720 +@@ -75,8 +75,7 @@ obj := $(KBUILD_EXTMOD)
2721 + src := $(obj)
2722 +
2723 + # Include the module's Makefile to find KBUILD_EXTRA_SYMBOLS
2724 +-include $(if $(wildcard $(KBUILD_EXTMOD)/Kbuild), \
2725 +- $(KBUILD_EXTMOD)/Kbuild, $(KBUILD_EXTMOD)/Makefile)
2726 ++include $(if $(wildcard $(src)/Kbuild), $(src)/Kbuild, $(src)/Makefile)
2727 + endif
2728 +
2729 + MODPOST += $(subst -i,-n,$(filter -i,$(MAKEFLAGS))) -s -T - $(wildcard vmlinux)
2730 +diff --git a/tools/testing/selftests/bpf/test_align.c b/tools/testing/selftests/bpf/test_align.c
2731 +index 4b9a26caa2c2e..6cc29b58d6707 100644
2732 +--- a/tools/testing/selftests/bpf/test_align.c
2733 ++++ b/tools/testing/selftests/bpf/test_align.c
2734 +@@ -475,10 +475,10 @@ static struct bpf_align_test tests[] = {
2735 + */
2736 + {7, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
2737 + /* Checked s>=0 */
2738 +- {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2739 ++ {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2740 + /* packet pointer + nonnegative (4n+2) */
2741 +- {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2742 +- {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2743 ++ {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2744 ++ {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2745 + /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
2746 + * We checked the bounds, but it might have been able
2747 + * to overflow if the packet pointer started in the
2748 +@@ -486,7 +486,7 @@ static struct bpf_align_test tests[] = {
2749 + * So we did not get a 'range' on R6, and the access
2750 + * attempt will fail.
2751 + */
2752 +- {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
2753 ++ {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
2754 + }
2755 + },
2756 + {
2757 +@@ -580,18 +580,18 @@ static struct bpf_align_test tests[] = {
2758 + /* Adding 14 makes R6 be (4n+2) */
2759 + {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
2760 + /* Subtracting from packet pointer overflows ubounds */
2761 +- {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
2762 ++ {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
2763 + /* New unknown value in R7 is (4n), >= 76 */
2764 + {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
2765 + /* Adding it to packet pointer gives nice bounds again */
2766 +- {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
2767 ++ {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
2768 + /* At the time the word size load is performed from R5,
2769 + * its total fixed offset is NET_IP_ALIGN + reg->off (0)
2770 + * which is 2. Then the variable offset is (4n+2), so
2771 + * the total offset is 4-byte aligned and meets the
2772 + * load's requirements.
2773 + */
2774 +- {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
2775 ++ {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
2776 + },
2777 + },
2778 + };