Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.12 commit in: /
Date: Wed, 23 Jun 2021 15:15:09
Message-Id: 1624461293.1a7f084fb13953ad56900d4a19ac2e2aecf413af.mpagano@gentoo
1 commit: 1a7f084fb13953ad56900d4a19ac2e2aecf413af
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jun 23 15:14:53 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jun 23 15:14:53 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=1a7f084f
7
8 Linux patch 5.12.13
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1012_linux-5.12.13.patch | 6477 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 6481 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 07044b3..34c90d1 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -91,6 +91,10 @@ Patch: 1011_linux-5.12.12.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.12.12
23
24 +Patch: 1012_linux-5.12.13.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.12.13
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1012_linux-5.12.13.patch b/1012_linux-5.12.13.patch
33 new file mode 100644
34 index 0000000..ef75d57
35 --- /dev/null
36 +++ b/1012_linux-5.12.13.patch
37 @@ -0,0 +1,6477 @@
38 +diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
39 +index 03f294a638bd8..d3028554b1e9c 100644
40 +--- a/Documentation/vm/slub.rst
41 ++++ b/Documentation/vm/slub.rst
42 +@@ -181,7 +181,7 @@ SLUB Debug output
43 + Here is a sample of slub debug output::
44 +
45 + ====================================================================
46 +- BUG kmalloc-8: Redzone overwritten
47 ++ BUG kmalloc-8: Right Redzone overwritten
48 + --------------------------------------------------------------------
49 +
50 + INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
51 +@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
52 + INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
53 + INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
54 +
55 +- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
56 +- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
57 +- Redzone 0xc90f6d28: 00 cc cc cc .
58 +- Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
59 ++ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
60 ++ Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
61 ++ Redzone (0xc90f6d28): 00 cc cc cc .
62 ++ Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
63 +
64 + [<c010523d>] dump_trace+0x63/0x1eb
65 + [<c01053df>] show_trace_log_lvl+0x1a/0x2f
66 +diff --git a/Makefile b/Makefile
67 +index e0a252b644630..d2fe36db78aed 100644
68 +--- a/Makefile
69 ++++ b/Makefile
70 +@@ -1,7 +1,7 @@
71 + # SPDX-License-Identifier: GPL-2.0
72 + VERSION = 5
73 + PATCHLEVEL = 12
74 +-SUBLEVEL = 12
75 ++SUBLEVEL = 13
76 + EXTRAVERSION =
77 + NAME = Frozen Wasteland
78 +
79 +@@ -913,11 +913,14 @@ CC_FLAGS_LTO += -fvisibility=hidden
80 + # Limit inlining across translation units to reduce binary size
81 + KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
82 +
83 +-# Check for frame size exceeding threshold during prolog/epilog insertion.
84 ++# Check for frame size exceeding threshold during prolog/epilog insertion
85 ++# when using lld < 13.0.0.
86 + ifneq ($(CONFIG_FRAME_WARN),0)
87 ++ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 130000; echo $$?),0)
88 + KBUILD_LDFLAGS += -plugin-opt=-warn-stack-size=$(CONFIG_FRAME_WARN)
89 + endif
90 + endif
91 ++endif
92 +
93 + ifdef CONFIG_LTO
94 + KBUILD_CFLAGS += -fno-lto $(CC_FLAGS_LTO)
95 +diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
96 +index 95f8a4380e110..7a5449dfcb290 100644
97 +--- a/arch/arc/include/uapi/asm/sigcontext.h
98 ++++ b/arch/arc/include/uapi/asm/sigcontext.h
99 +@@ -18,6 +18,7 @@
100 + */
101 + struct sigcontext {
102 + struct user_regs_struct regs;
103 ++ struct user_regs_arcv2 v2abi;
104 + };
105 +
106 + #endif /* _ASM_ARC_SIGCONTEXT_H */
107 +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
108 +index fdbe06c98895e..4868bdebf586d 100644
109 +--- a/arch/arc/kernel/signal.c
110 ++++ b/arch/arc/kernel/signal.c
111 +@@ -61,6 +61,41 @@ struct rt_sigframe {
112 + unsigned int sigret_magic;
113 + };
114 +
115 ++static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
116 ++{
117 ++ int err = 0;
118 ++#ifndef CONFIG_ISA_ARCOMPACT
119 ++ struct user_regs_arcv2 v2abi;
120 ++
121 ++ v2abi.r30 = regs->r30;
122 ++#ifdef CONFIG_ARC_HAS_ACCL_REGS
123 ++ v2abi.r58 = regs->r58;
124 ++ v2abi.r59 = regs->r59;
125 ++#else
126 ++ v2abi.r58 = v2abi.r59 = 0;
127 ++#endif
128 ++ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
129 ++#endif
130 ++ return err;
131 ++}
132 ++
133 ++static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
134 ++{
135 ++ int err = 0;
136 ++#ifndef CONFIG_ISA_ARCOMPACT
137 ++ struct user_regs_arcv2 v2abi;
138 ++
139 ++ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
140 ++
141 ++ regs->r30 = v2abi.r30;
142 ++#ifdef CONFIG_ARC_HAS_ACCL_REGS
143 ++ regs->r58 = v2abi.r58;
144 ++ regs->r59 = v2abi.r59;
145 ++#endif
146 ++#endif
147 ++ return err;
148 ++}
149 ++
150 + static int
151 + stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
152 + sigset_t *set)
153 +@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
154 +
155 + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
156 + sizeof(sf->uc.uc_mcontext.regs.scratch));
157 ++
158 ++ if (is_isa_arcv2())
159 ++ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
160 ++
161 + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
162 +
163 + return err ? -EFAULT : 0;
164 +@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
165 + err |= __copy_from_user(&uregs.scratch,
166 + &(sf->uc.uc_mcontext.regs.scratch),
167 + sizeof(sf->uc.uc_mcontext.regs.scratch));
168 ++
169 ++ if (is_isa_arcv2())
170 ++ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
171 ++
172 + if (err)
173 + return -EFAULT;
174 +
175 +diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
176 +index 766f064f00fbf..167fde7f2fce1 100644
177 +--- a/arch/powerpc/perf/core-book3s.c
178 ++++ b/arch/powerpc/perf/core-book3s.c
179 +@@ -2242,7 +2242,7 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs)
180 + bool use_siar = regs_use_siar(regs);
181 + unsigned long siar = mfspr(SPRN_SIAR);
182 +
183 +- if (ppmu->flags & PPMU_P10_DD1) {
184 ++ if (ppmu && (ppmu->flags & PPMU_P10_DD1)) {
185 + if (siar)
186 + return siar;
187 + else
188 +diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
189 +index d9522fc35ca5a..4f116be9152f5 100644
190 +--- a/arch/riscv/Kconfig
191 ++++ b/arch/riscv/Kconfig
192 +@@ -54,11 +54,11 @@ config RISCV
193 + select GENERIC_TIME_VSYSCALL if MMU && 64BIT
194 + select HANDLE_DOMAIN_IRQ
195 + select HAVE_ARCH_AUDITSYSCALL
196 +- select HAVE_ARCH_JUMP_LABEL
197 +- select HAVE_ARCH_JUMP_LABEL_RELATIVE
198 ++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
199 ++ select HAVE_ARCH_JUMP_LABEL_RELATIVE if !XIP_KERNEL
200 + select HAVE_ARCH_KASAN if MMU && 64BIT
201 + select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
202 +- select HAVE_ARCH_KGDB
203 ++ select HAVE_ARCH_KGDB if !XIP_KERNEL
204 + select HAVE_ARCH_KGDB_QXFER_PKT
205 + select HAVE_ARCH_MMAP_RND_BITS if MMU
206 + select HAVE_ARCH_SECCOMP_FILTER
207 +@@ -73,9 +73,9 @@ config RISCV
208 + select HAVE_GCC_PLUGINS
209 + select HAVE_GENERIC_VDSO if MMU && 64BIT
210 + select HAVE_IRQ_TIME_ACCOUNTING
211 +- select HAVE_KPROBES
212 +- select HAVE_KPROBES_ON_FTRACE
213 +- select HAVE_KRETPROBES
214 ++ select HAVE_KPROBES if !XIP_KERNEL
215 ++ select HAVE_KPROBES_ON_FTRACE if !XIP_KERNEL
216 ++ select HAVE_KRETPROBES if !XIP_KERNEL
217 + select HAVE_PCI
218 + select HAVE_PERF_EVENTS
219 + select HAVE_PERF_REGS
220 +@@ -227,11 +227,11 @@ config ARCH_RV64I
221 + bool "RV64I"
222 + select 64BIT
223 + select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
224 +- select HAVE_DYNAMIC_FTRACE if MMU && $(cc-option,-fpatchable-function-entry=8)
225 ++ select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && $(cc-option,-fpatchable-function-entry=8)
226 + select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
227 +- select HAVE_FTRACE_MCOUNT_RECORD
228 ++ select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
229 + select HAVE_FUNCTION_GRAPH_TRACER
230 +- select HAVE_FUNCTION_TRACER
231 ++ select HAVE_FUNCTION_TRACER if !XIP_KERNEL
232 + select SWIOTLB if MMU
233 +
234 + endchoice
235 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
236 +index 12de7a9c85b35..9cc71ca9a88f9 100644
237 +--- a/arch/s390/kernel/entry.S
238 ++++ b/arch/s390/kernel/entry.S
239 +@@ -651,9 +651,9 @@ ENDPROC(stack_overflow)
240 + .Lcleanup_sie_mcck:
241 + larl %r13,.Lsie_entry
242 + slgr %r9,%r13
243 +- larl %r13,.Lsie_skip
244 ++ lghi %r13,.Lsie_skip - .Lsie_entry
245 + clgr %r9,%r13
246 +- jh .Lcleanup_sie_int
247 ++ jhe .Lcleanup_sie_int
248 + oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
249 + .Lcleanup_sie_int:
250 + BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
251 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
252 +index ceeba9f631722..fdee23ea4e173 100644
253 +--- a/arch/x86/include/asm/fpu/internal.h
254 ++++ b/arch/x86/include/asm/fpu/internal.h
255 +@@ -578,10 +578,17 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
256 + * PKRU state is switched eagerly because it needs to be valid before we
257 + * return to userland e.g. for a copy_to_user() operation.
258 + */
259 +- if (current->mm) {
260 ++ if (!(current->flags & PF_KTHREAD)) {
261 ++ /*
262 ++ * If the PKRU bit in xsave.header.xfeatures is not set,
263 ++ * then the PKRU component was in init state, which means
264 ++ * XRSTOR will set PKRU to 0. If the bit is not set then
265 ++ * get_xsave_addr() will return NULL because the PKRU value
266 ++ * in memory is not valid. This means pkru_val has to be
267 ++ * set to 0 and not to init_pkru_value.
268 ++ */
269 + pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
270 +- if (pk)
271 +- pkru_val = pk->pkru;
272 ++ pkru_val = pk ? pk->pkru : 0;
273 + }
274 + __write_pkru(pkru_val);
275 + }
276 +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
277 +index a4ec65317a7fa..ec3ae30547920 100644
278 +--- a/arch/x86/kernel/fpu/signal.c
279 ++++ b/arch/x86/kernel/fpu/signal.c
280 +@@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
281 + return 0;
282 + }
283 +
284 +- if (!access_ok(buf, size))
285 +- return -EACCES;
286 ++ if (!access_ok(buf, size)) {
287 ++ ret = -EACCES;
288 ++ goto out;
289 ++ }
290 +
291 +- if (!static_cpu_has(X86_FEATURE_FPU))
292 +- return fpregs_soft_set(current, NULL,
293 +- 0, sizeof(struct user_i387_ia32_struct),
294 +- NULL, buf) != 0;
295 ++ if (!static_cpu_has(X86_FEATURE_FPU)) {
296 ++ ret = fpregs_soft_set(current, NULL, 0,
297 ++ sizeof(struct user_i387_ia32_struct),
298 ++ NULL, buf);
299 ++ goto out;
300 ++ }
301 +
302 + if (use_xsave()) {
303 + struct _fpx_sw_bytes fx_sw_user;
304 +@@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
305 + fpregs_unlock();
306 + return 0;
307 + }
308 ++
309 ++ /*
310 ++ * The above did an FPU restore operation, restricted to
311 ++ * the user portion of the registers, and failed, but the
312 ++ * microcode might have modified the FPU registers
313 ++ * nevertheless.
314 ++ *
315 ++ * If the FPU registers do not belong to current, then
316 ++ * invalidate the FPU register state otherwise the task might
317 ++ * preempt current and return to user space with corrupted
318 ++ * FPU registers.
319 ++ *
320 ++ * In case current owns the FPU registers then no further
321 ++ * action is required. The fixup below will handle it
322 ++ * correctly.
323 ++ */
324 ++ if (test_thread_flag(TIF_NEED_FPU_LOAD))
325 ++ __cpu_invalidate_fpregs_state();
326 ++
327 + fpregs_unlock();
328 + } else {
329 + /*
330 +@@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
331 + */
332 + ret = __copy_from_user(&env, buf, sizeof(env));
333 + if (ret)
334 +- goto err_out;
335 ++ goto out;
336 + envp = &env;
337 + }
338 +
339 +@@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
340 + if (use_xsave() && !fx_only) {
341 + u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
342 +
343 +- if (using_compacted_format()) {
344 +- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
345 +- } else {
346 +- ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
347 +-
348 +- if (!ret && state_size > offsetof(struct xregs_state, header))
349 +- ret = validate_user_xstate_header(&fpu->state.xsave.header);
350 +- }
351 ++ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
352 + if (ret)
353 +- goto err_out;
354 ++ goto out;
355 +
356 + sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
357 + fx_only);
358 +@@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
359 + ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
360 + if (ret) {
361 + ret = -EFAULT;
362 +- goto err_out;
363 ++ goto out;
364 + }
365 +
366 + sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
367 +@@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
368 + } else {
369 + ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
370 + if (ret)
371 +- goto err_out;
372 ++ goto out;
373 +
374 + fpregs_lock();
375 + ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
376 +@@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
377 + fpregs_deactivate(fpu);
378 + fpregs_unlock();
379 +
380 +-err_out:
381 ++out:
382 + if (ret)
383 + fpu__clear_user_states(fpu);
384 + return ret;
385 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
386 +index fa023f3feb25d..43013ac0fd4d9 100644
387 +--- a/arch/x86/kvm/lapic.c
388 ++++ b/arch/x86/kvm/lapic.c
389 +@@ -1410,6 +1410,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
390 + if (!apic_x2apic_mode(apic))
391 + valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
392 +
393 ++ if (alignment + len > 4)
394 ++ return 1;
395 ++
396 + if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
397 + return 1;
398 +
399 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
400 +index cd0faa1876743..676ec0d1e6be4 100644
401 +--- a/arch/x86/kvm/mmu/mmu.c
402 ++++ b/arch/x86/kvm/mmu/mmu.c
403 +@@ -4726,9 +4726,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
404 + context->inject_page_fault = kvm_inject_page_fault;
405 + }
406 +
407 ++static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
408 ++{
409 ++ union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
410 ++
411 ++ /*
412 ++ * Nested MMUs are used only for walking L2's gva->gpa, they never have
413 ++ * shadow pages of their own and so "direct" has no meaning. Set it
414 ++ * to "true" to try to detect bogus usage of the nested MMU.
415 ++ */
416 ++ role.base.direct = true;
417 ++
418 ++ if (!is_paging(vcpu))
419 ++ role.base.level = 0;
420 ++ else if (is_long_mode(vcpu))
421 ++ role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
422 ++ PT64_ROOT_4LEVEL;
423 ++ else if (is_pae(vcpu))
424 ++ role.base.level = PT32E_ROOT_LEVEL;
425 ++ else
426 ++ role.base.level = PT32_ROOT_LEVEL;
427 ++
428 ++ return role;
429 ++}
430 ++
431 + static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
432 + {
433 +- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
434 ++ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
435 + struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
436 +
437 + if (new_role.as_u64 == g_context->mmu_role.as_u64)
438 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
439 +index cf37205784297..a6ca7e657af27 100644
440 +--- a/arch/x86/kvm/x86.c
441 ++++ b/arch/x86/kvm/x86.c
442 +@@ -6991,7 +6991,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
443 +
444 + static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
445 + {
446 +- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
447 ++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
448 ++
449 ++ vcpu->arch.hflags = emul_flags;
450 ++ kvm_mmu_reset_context(vcpu);
451 + }
452 +
453 + static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
454 +@@ -8147,6 +8150,7 @@ void kvm_arch_exit(void)
455 + kvm_x86_ops.hardware_enable = NULL;
456 + kvm_mmu_module_exit();
457 + free_percpu(user_return_msrs);
458 ++ kmem_cache_destroy(x86_emulator_cache);
459 + kmem_cache_destroy(x86_fpu_cache);
460 + #ifdef CONFIG_KVM_XEN
461 + static_key_deferred_flush(&kvm_xen_enabled);
462 +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
463 +index 9e5ccc56f8e07..356b746dfbe7a 100644
464 +--- a/arch/x86/mm/ioremap.c
465 ++++ b/arch/x86/mm/ioremap.c
466 +@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
467 + if (!IS_ENABLED(CONFIG_EFI))
468 + return;
469 +
470 +- if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
471 ++ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
472 ++ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
473 ++ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
474 + desc->flags |= IORES_MAP_ENCRYPTED;
475 + }
476 +
477 +diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
478 +index 5eb4dc2b97dac..e94da744386f3 100644
479 +--- a/arch/x86/mm/numa.c
480 ++++ b/arch/x86/mm/numa.c
481 +@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
482 +
483 + /* make sure all non-reserved blocks are inside the limits */
484 + bi->start = max(bi->start, low);
485 +- bi->end = min(bi->end, high);
486 ++
487 ++ /* preserve info for non-RAM areas above 'max_pfn': */
488 ++ if (bi->end > high) {
489 ++ numa_add_memblk_to(bi->nid, high, bi->end,
490 ++ &numa_reserved_meminfo);
491 ++ bi->end = high;
492 ++ }
493 +
494 + /* and there's no empty block */
495 + if (bi->start >= bi->end)
496 +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
497 +index 0c2827fd8c195..03b1b03349477 100644
498 +--- a/drivers/dma/Kconfig
499 ++++ b/drivers/dma/Kconfig
500 +@@ -59,6 +59,7 @@ config DMA_OF
501 + #devices
502 + config ALTERA_MSGDMA
503 + tristate "Altera / Intel mSGDMA Engine"
504 ++ depends on HAS_IOMEM
505 + select DMA_ENGINE
506 + help
507 + Enable support for Altera / Intel mSGDMA controller.
508 +diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
509 +index 4ec909e0b8106..4ae057922ef1f 100644
510 +--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
511 ++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
512 +@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
513 + }
514 +
515 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
516 ++ err = -EINVAL;
517 + dev_err(dev, "DPDMAI major version mismatch\n"
518 + "Found %u.%u, supported version is %u.%u\n",
519 + priv->dpdmai_attr.version.major,
520 +@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
521 + }
522 +
523 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
524 ++ err = -EINVAL;
525 + dev_err(dev, "DPDMAI minor version mismatch\n"
526 + "Found %u.%u, supported version is %u.%u\n",
527 + priv->dpdmai_attr.version.major,
528 +@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
529 + ppriv->store =
530 + dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
531 + if (!ppriv->store) {
532 ++ err = -ENOMEM;
533 + dev_err(dev, "dpaa2_io_store_create() failed\n");
534 + goto err_store;
535 + }
536 +diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
537 +index 59f2104ffc771..eb41bb9df0fd9 100644
538 +--- a/drivers/dma/idxd/init.c
539 ++++ b/drivers/dma/idxd/init.c
540 +@@ -218,6 +218,7 @@ static int idxd_setup_engines(struct idxd_device *idxd)
541 + engine->idxd = idxd;
542 + device_initialize(&engine->conf_dev);
543 + engine->conf_dev.parent = &idxd->conf_dev;
544 ++ engine->conf_dev.bus = &dsa_bus_type;
545 + engine->conf_dev.type = &idxd_engine_device_type;
546 + rc = dev_set_name(&engine->conf_dev, "engine%d.%d", idxd->id, engine->id);
547 + if (rc < 0) {
548 +@@ -718,6 +719,7 @@ module_init(idxd_init_module);
549 +
550 + static void __exit idxd_exit_module(void)
551 + {
552 ++ idxd_unregister_driver();
553 + pci_unregister_driver(&idxd_pci_driver);
554 + idxd_cdev_remove();
555 + idxd_unregister_bus_type();
556 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
557 +index fd8d2bc3be9f5..110de8a600588 100644
558 +--- a/drivers/dma/pl330.c
559 ++++ b/drivers/dma/pl330.c
560 +@@ -2694,13 +2694,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
561 + for (i = 0; i < len / period_len; i++) {
562 + desc = pl330_get_desc(pch);
563 + if (!desc) {
564 ++ unsigned long iflags;
565 ++
566 + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
567 + __func__, __LINE__);
568 +
569 + if (!first)
570 + return NULL;
571 +
572 +- spin_lock_irqsave(&pl330->pool_lock, flags);
573 ++ spin_lock_irqsave(&pl330->pool_lock, iflags);
574 +
575 + while (!list_empty(&first->node)) {
576 + desc = list_entry(first->node.next,
577 +@@ -2710,7 +2712,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
578 +
579 + list_move_tail(&first->node, &pl330->desc_pool);
580 +
581 +- spin_unlock_irqrestore(&pl330->pool_lock, flags);
582 ++ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
583 +
584 + return NULL;
585 + }
586 +diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
587 +index 365f94eb3b081..3f926a653bd88 100644
588 +--- a/drivers/dma/qcom/Kconfig
589 ++++ b/drivers/dma/qcom/Kconfig
590 +@@ -33,6 +33,7 @@ config QCOM_GPI_DMA
591 +
592 + config QCOM_HIDMA_MGMT
593 + tristate "Qualcomm Technologies HIDMA Management support"
594 ++ depends on HAS_IOMEM
595 + select DMA_ENGINE
596 + help
597 + Enable support for the Qualcomm Technologies HIDMA Management.
598 +diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
599 +index f8ffa02e279ff..ba46a0a15a936 100644
600 +--- a/drivers/dma/sf-pdma/Kconfig
601 ++++ b/drivers/dma/sf-pdma/Kconfig
602 +@@ -1,5 +1,6 @@
603 + config SF_PDMA
604 + tristate "Sifive PDMA controller driver"
605 ++ depends on HAS_IOMEM
606 + select DMA_ENGINE
607 + select DMA_VIRTUAL_CHANNELS
608 + help
609 +diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
610 +index 265d7c07b348e..e1827393143f1 100644
611 +--- a/drivers/dma/ste_dma40.c
612 ++++ b/drivers/dma/ste_dma40.c
613 +@@ -3675,6 +3675,9 @@ static int __init d40_probe(struct platform_device *pdev)
614 +
615 + kfree(base->lcla_pool.base_unaligned);
616 +
617 ++ if (base->lcpa_base)
618 ++ iounmap(base->lcpa_base);
619 ++
620 + if (base->phy_lcpa)
621 + release_mem_region(base->phy_lcpa,
622 + base->lcpa_size);
623 +diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
624 +index 70b29bd079c9f..ff7dfb3fdeb47 100644
625 +--- a/drivers/dma/xilinx/xilinx_dpdma.c
626 ++++ b/drivers/dma/xilinx/xilinx_dpdma.c
627 +@@ -1459,7 +1459,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
628 + */
629 + static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
630 + {
631 +- dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
632 ++ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
633 + dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
634 + }
635 +
636 +@@ -1596,6 +1596,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
637 + return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
638 + }
639 +
640 ++static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
641 ++{
642 ++ unsigned int i;
643 ++ void __iomem *reg;
644 ++
645 ++ /* Disable all interrupts */
646 ++ xilinx_dpdma_disable_irq(xdev);
647 ++
648 ++ /* Stop all channels */
649 ++ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
650 ++ reg = xdev->reg + XILINX_DPDMA_CH_BASE
651 ++ + XILINX_DPDMA_CH_OFFSET * i;
652 ++ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
653 ++ }
654 ++
655 ++ /* Clear the interrupt status registers */
656 ++ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
657 ++ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
658 ++}
659 ++
660 + static int xilinx_dpdma_probe(struct platform_device *pdev)
661 + {
662 + struct xilinx_dpdma_device *xdev;
663 +@@ -1622,6 +1642,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
664 + if (IS_ERR(xdev->reg))
665 + return PTR_ERR(xdev->reg);
666 +
667 ++ dpdma_hw_init(xdev);
668 ++
669 + xdev->irq = platform_get_irq(pdev, 0);
670 + if (xdev->irq < 0) {
671 + dev_err(xdev->dev, "failed to get platform irq\n");
672 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
673 +index 2342c5d216f9b..72d23651501d4 100644
674 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
675 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
676 +@@ -6769,8 +6769,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
677 + if (ring->use_doorbell) {
678 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
679 + (adev->doorbell_index.kiq * 2) << 2);
680 ++ /* If GC has entered CGPG, ringing doorbell > first page doesn't
681 ++ * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
682 ++ * this issue.
683 ++ */
684 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
685 +- (adev->doorbell_index.userqueue_end * 2) << 2);
686 ++ (adev->doorbell.size - 4));
687 + }
688 +
689 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
690 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
691 +index d2c020a91c0be..1fdfb7783404e 100644
692 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
693 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
694 +@@ -3623,8 +3623,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
695 + if (ring->use_doorbell) {
696 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
697 + (adev->doorbell_index.kiq * 2) << 2);
698 ++ /* If GC has entered CGPG, ringing doorbell > first page doesn't
699 ++ * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
700 ++ * this issue.
701 ++ */
702 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
703 +- (adev->doorbell_index.userqueue_end * 2) << 2);
704 ++ (adev->doorbell.size - 4));
705 + }
706 +
707 + WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
708 +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
709 +index dfa9fdbe98da2..06bb24d7a9fee 100644
710 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c
711 ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
712 +@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
713 + if (rdev->uvd.vcpu_bo == NULL)
714 + return -EINVAL;
715 +
716 +- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
717 ++ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
718 +
719 + size = radeon_bo_size(rdev->uvd.vcpu_bo);
720 + size -= rdev->uvd_fw->size;
721 +@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
722 + ptr = rdev->uvd.cpu_addr;
723 + ptr += rdev->uvd_fw->size;
724 +
725 +- memset(ptr, 0, size);
726 ++ memset_io((void __iomem *)ptr, 0, size);
727 +
728 + return 0;
729 + }
730 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
731 +index bbdfd5e26ec88..f75fb157f2ff7 100644
732 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
733 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
734 +@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
735 + goto err_disable_clk_tmds;
736 + }
737 +
738 +- ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
739 ++ ret = sun8i_hdmi_phy_get(hdmi, phy_node);
740 + of_node_put(phy_node);
741 + if (ret) {
742 + dev_err(dev, "Couldn't get the HDMI PHY\n");
743 +@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
744 +
745 + cleanup_encoder:
746 + drm_encoder_cleanup(encoder);
747 +- sun8i_hdmi_phy_remove(hdmi);
748 + err_disable_clk_tmds:
749 + clk_disable_unprepare(hdmi->clk_tmds);
750 + err_assert_ctrl_reset:
751 +@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
752 + struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
753 +
754 + dw_hdmi_unbind(hdmi->hdmi);
755 +- sun8i_hdmi_phy_remove(hdmi);
756 + clk_disable_unprepare(hdmi->clk_tmds);
757 + reset_control_assert(hdmi->rst_ctrl);
758 + gpiod_set_value(hdmi->ddc_en, 0);
759 +@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
760 + .of_match_table = sun8i_dw_hdmi_dt_ids,
761 + },
762 + };
763 +-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
764 ++
765 ++static int __init sun8i_dw_hdmi_init(void)
766 ++{
767 ++ int ret;
768 ++
769 ++ ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
770 ++ if (ret)
771 ++ return ret;
772 ++
773 ++ ret = platform_driver_register(&sun8i_hdmi_phy_driver);
774 ++ if (ret) {
775 ++ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
776 ++ return ret;
777 ++ }
778 ++
779 ++ return ret;
780 ++}
781 ++
782 ++static void __exit sun8i_dw_hdmi_exit(void)
783 ++{
784 ++ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
785 ++ platform_driver_unregister(&sun8i_hdmi_phy_driver);
786 ++}
787 ++
788 ++module_init(sun8i_dw_hdmi_init);
789 ++module_exit(sun8i_dw_hdmi_exit);
790 +
791 + MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@××××.net>");
792 + MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
793 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
794 +index d4b55af0592f8..74f6ed0e25709 100644
795 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
796 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
797 +@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
798 + struct gpio_desc *ddc_en;
799 + };
800 +
801 ++extern struct platform_driver sun8i_hdmi_phy_driver;
802 ++
803 + static inline struct sun8i_dw_hdmi *
804 + encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
805 + {
806 + return container_of(encoder, struct sun8i_dw_hdmi, encoder);
807 + }
808 +
809 +-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
810 +-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
811 ++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
812 +
813 + void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
814 + void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
815 +diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
816 +index 9994edf675096..c9239708d398c 100644
817 +--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
818 ++++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
819 +@@ -5,6 +5,7 @@
820 +
821 + #include <linux/delay.h>
822 + #include <linux/of_address.h>
823 ++#include <linux/of_platform.h>
824 +
825 + #include "sun8i_dw_hdmi.h"
826 +
827 +@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
828 + { /* sentinel */ }
829 + };
830 +
831 +-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
832 ++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
833 ++{
834 ++ struct platform_device *pdev = of_find_device_by_node(node);
835 ++ struct sun8i_hdmi_phy *phy;
836 ++
837 ++ if (!pdev)
838 ++ return -EPROBE_DEFER;
839 ++
840 ++ phy = platform_get_drvdata(pdev);
841 ++ if (!phy)
842 ++ return -EPROBE_DEFER;
843 ++
844 ++ hdmi->phy = phy;
845 ++
846 ++ put_device(&pdev->dev);
847 ++
848 ++ return 0;
849 ++}
850 ++
851 ++static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
852 + {
853 + const struct of_device_id *match;
854 +- struct device *dev = hdmi->dev;
855 ++ struct device *dev = &pdev->dev;
856 ++ struct device_node *node = dev->of_node;
857 + struct sun8i_hdmi_phy *phy;
858 + struct resource res;
859 + void __iomem *regs;
860 +@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
861 + clk_prepare_enable(phy->clk_phy);
862 + }
863 +
864 +- hdmi->phy = phy;
865 ++ platform_set_drvdata(pdev, phy);
866 +
867 + return 0;
868 +
869 +@@ -728,9 +749,9 @@ err_put_clk_bus:
870 + return ret;
871 + }
872 +
873 +-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
874 ++static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
875 + {
876 +- struct sun8i_hdmi_phy *phy = hdmi->phy;
877 ++ struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
878 +
879 + clk_disable_unprepare(phy->clk_mod);
880 + clk_disable_unprepare(phy->clk_bus);
881 +@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
882 + clk_put(phy->clk_pll1);
883 + clk_put(phy->clk_mod);
884 + clk_put(phy->clk_bus);
885 ++ return 0;
886 + }
887 ++
888 ++struct platform_driver sun8i_hdmi_phy_driver = {
889 ++ .probe = sun8i_hdmi_phy_probe,
890 ++ .remove = sun8i_hdmi_phy_remove,
891 ++ .driver = {
892 ++ .name = "sun8i-hdmi-phy",
893 ++ .of_match_table = sun8i_hdmi_phy_of_table,
894 ++ },
895 ++};
896 +diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
897 +index 25aac40f2764a..919877970ae3b 100644
898 +--- a/drivers/hwmon/scpi-hwmon.c
899 ++++ b/drivers/hwmon/scpi-hwmon.c
900 +@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
901 +
902 + scpi_scale_reading(&value, sensor);
903 +
904 ++ /*
905 ++ * Temperature sensor values are treated as signed values based on
906 ++ * observation even though that is not explicitly specified, and
907 ++ * because an unsigned u64 temperature does not really make practical
908 ++ * sense especially when the temperature is below zero degrees Celsius.
909 ++ */
910 ++ if (sensor->info.class == TEMPERATURE)
911 ++ return sprintf(buf, "%lld\n", (s64)value);
912 ++
913 + return sprintf(buf, "%llu\n", value);
914 + }
915 +
916 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
917 +index 00404024d7cd5..fea237838bb0a 100644
918 +--- a/drivers/irqchip/irq-gic-v3.c
919 ++++ b/drivers/irqchip/irq-gic-v3.c
920 +@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
921 + nmi_exit();
922 + }
923 +
924 ++static u32 do_read_iar(struct pt_regs *regs)
925 ++{
926 ++ u32 iar;
927 ++
928 ++ if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
929 ++ u64 pmr;
930 ++
931 ++ /*
932 ++ * We were in a context with IRQs disabled. However, the
933 ++ * entry code has set PMR to a value that allows any
934 ++ * interrupt to be acknowledged, and not just NMIs. This can
935 ++ * lead to surprising effects if the NMI has been retired in
936 ++ * the meantime, and that there is an IRQ pending. The IRQ
937 ++ * would then be taken in NMI context, something that nobody
938 ++ * wants to debug twice.
939 ++ *
940 ++ * Until we sort this, drop PMR again to a level that will
941 ++ * actually only allow NMIs before reading IAR, and then
942 ++ * restore it to what it was.
943 ++ */
944 ++ pmr = gic_read_pmr();
945 ++ gic_pmr_mask_irqs();
946 ++ isb();
947 ++
948 ++ iar = gic_read_iar();
949 ++
950 ++ gic_write_pmr(pmr);
951 ++ } else {
952 ++ iar = gic_read_iar();
953 ++ }
954 ++
955 ++ return iar;
956 ++}
957 ++
958 + static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
959 + {
960 + u32 irqnr;
961 +
962 +- irqnr = gic_read_iar();
963 ++ irqnr = do_read_iar(regs);
964 +
965 + /* Check for special IDs first */
966 + if ((irqnr >= 1020 && irqnr <= 1023))
967 +diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
968 +index 1f649d1780107..4484f9c079b75 100644
969 +--- a/drivers/net/can/usb/mcba_usb.c
970 ++++ b/drivers/net/can/usb/mcba_usb.c
971 +@@ -82,6 +82,8 @@ struct mcba_priv {
972 + bool can_ka_first_pass;
973 + bool can_speed_check;
974 + atomic_t free_ctx_cnt;
975 ++ void *rxbuf[MCBA_MAX_RX_URBS];
976 ++ dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
977 + };
978 +
979 + /* CAN frame */
980 +@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
981 + for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
982 + struct urb *urb = NULL;
983 + u8 *buf;
984 ++ dma_addr_t buf_dma;
985 +
986 + /* create a URB, and a buffer for it */
987 + urb = usb_alloc_urb(0, GFP_KERNEL);
988 +@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
989 + }
990 +
991 + buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
992 +- GFP_KERNEL, &urb->transfer_dma);
993 ++ GFP_KERNEL, &buf_dma);
994 + if (!buf) {
995 + netdev_err(netdev, "No memory left for USB buffer\n");
996 + usb_free_urb(urb);
997 +@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
998 + if (err) {
999 + usb_unanchor_urb(urb);
1000 + usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
1001 +- buf, urb->transfer_dma);
1002 ++ buf, buf_dma);
1003 + usb_free_urb(urb);
1004 + break;
1005 + }
1006 +
1007 ++ priv->rxbuf[i] = buf;
1008 ++ priv->rxbuf_dma[i] = buf_dma;
1009 ++
1010 + /* Drop reference, USB core will take care of freeing it */
1011 + usb_free_urb(urb);
1012 + }
1013 +@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
1014 +
1015 + static void mcba_urb_unlink(struct mcba_priv *priv)
1016 + {
1017 ++ int i;
1018 ++
1019 + usb_kill_anchored_urbs(&priv->rx_submitted);
1020 ++
1021 ++ for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
1022 ++ usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
1023 ++ priv->rxbuf[i], priv->rxbuf_dma[i]);
1024 ++
1025 + usb_kill_anchored_urbs(&priv->tx_submitted);
1026 + }
1027 +
1028 +diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1029 +index 102f2c91fdb85..20f8012bbe04a 100644
1030 +--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1031 ++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1032 +@@ -236,36 +236,48 @@ static int ena_xdp_io_poll(struct napi_struct *napi, int budget)
1033 + static int ena_xdp_tx_map_frame(struct ena_ring *xdp_ring,
1034 + struct ena_tx_buffer *tx_info,
1035 + struct xdp_frame *xdpf,
1036 +- void **push_hdr,
1037 +- u32 *push_len)
1038 ++ struct ena_com_tx_ctx *ena_tx_ctx)
1039 + {
1040 + struct ena_adapter *adapter = xdp_ring->adapter;
1041 + struct ena_com_buf *ena_buf;
1042 +- dma_addr_t dma = 0;
1043 ++ int push_len = 0;
1044 ++ dma_addr_t dma;
1045 ++ void *data;
1046 + u32 size;
1047 +
1048 + tx_info->xdpf = xdpf;
1049 ++ data = tx_info->xdpf->data;
1050 + size = tx_info->xdpf->len;
1051 +- ena_buf = tx_info->bufs;
1052 +
1053 +- /* llq push buffer */
1054 +- *push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
1055 +- *push_hdr = tx_info->xdpf->data;
1056 ++ if (xdp_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1057 ++ /* Designate part of the packet for LLQ */
1058 ++ push_len = min_t(u32, size, xdp_ring->tx_max_header_size);
1059 ++
1060 ++ ena_tx_ctx->push_header = data;
1061 ++
1062 ++ size -= push_len;
1063 ++ data += push_len;
1064 ++ }
1065 ++
1066 ++ ena_tx_ctx->header_len = push_len;
1067 +
1068 +- if (size - *push_len > 0) {
1069 ++ if (size > 0) {
1070 + dma = dma_map_single(xdp_ring->dev,
1071 +- *push_hdr + *push_len,
1072 +- size - *push_len,
1073 ++ data,
1074 ++ size,
1075 + DMA_TO_DEVICE);
1076 + if (unlikely(dma_mapping_error(xdp_ring->dev, dma)))
1077 + goto error_report_dma_error;
1078 +
1079 +- tx_info->map_linear_data = 1;
1080 +- tx_info->num_of_bufs = 1;
1081 +- }
1082 ++ tx_info->map_linear_data = 0;
1083 +
1084 +- ena_buf->paddr = dma;
1085 +- ena_buf->len = size;
1086 ++ ena_buf = tx_info->bufs;
1087 ++ ena_buf->paddr = dma;
1088 ++ ena_buf->len = size;
1089 ++
1090 ++ ena_tx_ctx->ena_bufs = ena_buf;
1091 ++ ena_tx_ctx->num_bufs = tx_info->num_of_bufs = 1;
1092 ++ }
1093 +
1094 + return 0;
1095 +
1096 +@@ -274,10 +286,6 @@ error_report_dma_error:
1097 + &xdp_ring->syncp);
1098 + netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
1099 +
1100 +- xdp_return_frame_rx_napi(tx_info->xdpf);
1101 +- tx_info->xdpf = NULL;
1102 +- tx_info->num_of_bufs = 0;
1103 +-
1104 + return -EINVAL;
1105 + }
1106 +
1107 +@@ -289,8 +297,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
1108 + struct ena_com_tx_ctx ena_tx_ctx = {};
1109 + struct ena_tx_buffer *tx_info;
1110 + u16 next_to_use, req_id;
1111 +- void *push_hdr;
1112 +- u32 push_len;
1113 + int rc;
1114 +
1115 + next_to_use = xdp_ring->next_to_use;
1116 +@@ -298,15 +304,11 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
1117 + tx_info = &xdp_ring->tx_buffer_info[req_id];
1118 + tx_info->num_of_bufs = 0;
1119 +
1120 +- rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
1121 ++ rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &ena_tx_ctx);
1122 + if (unlikely(rc))
1123 + goto error_drop_packet;
1124 +
1125 +- ena_tx_ctx.ena_bufs = tx_info->bufs;
1126 +- ena_tx_ctx.push_header = push_hdr;
1127 +- ena_tx_ctx.num_bufs = tx_info->num_of_bufs;
1128 + ena_tx_ctx.req_id = req_id;
1129 +- ena_tx_ctx.header_len = push_len;
1130 +
1131 + rc = ena_xmit_common(dev,
1132 + xdp_ring,
1133 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
1134 +index 9e02f88645931..5e90df42b2013 100644
1135 +--- a/drivers/net/ethernet/atheros/alx/main.c
1136 ++++ b/drivers/net/ethernet/atheros/alx/main.c
1137 +@@ -1849,6 +1849,7 @@ out_free_netdev:
1138 + free_netdev(netdev);
1139 + out_pci_release:
1140 + pci_release_mem_regions(pdev);
1141 ++ pci_disable_pcie_error_reporting(pdev);
1142 + out_pci_disable:
1143 + pci_disable_device(pdev);
1144 + return err;
1145 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1146 +index 027997c711aba..c118de27bc5c3 100644
1147 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1148 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1149 +@@ -7295,7 +7295,7 @@ skip_rdma:
1150 + entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
1151 + 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
1152 + entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
1153 +- entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
1154 ++ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
1155 + entries = roundup(entries, ctx->tqm_entries_multiple);
1156 + entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
1157 + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
1158 +@@ -11573,6 +11573,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
1159 + bnxt_hwrm_coal_params_qcaps(bp);
1160 + }
1161 +
1162 ++static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
1163 ++
1164 + static int bnxt_fw_init_one(struct bnxt *bp)
1165 + {
1166 + int rc;
1167 +@@ -11587,6 +11589,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
1168 + netdev_err(bp->dev, "Firmware init phase 2 failed\n");
1169 + return rc;
1170 + }
1171 ++ rc = bnxt_probe_phy(bp, false);
1172 ++ if (rc)
1173 ++ return rc;
1174 + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
1175 + if (rc)
1176 + return rc;
1177 +@@ -12976,6 +12981,7 @@ init_err_pci_clean:
1178 + bnxt_hwrm_func_drv_unrgtr(bp);
1179 + bnxt_free_hwrm_short_cmd_req(bp);
1180 + bnxt_free_hwrm_resources(bp);
1181 ++ bnxt_ethtool_free(bp);
1182 + kfree(bp->fw_health);
1183 + bp->fw_health = NULL;
1184 + bnxt_cleanup_pci(bp);
1185 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1186 +index 61ea3ec5c3fcc..83ed10ac86606 100644
1187 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1188 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
1189 +@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
1190 + return ret;
1191 + }
1192 +
1193 +- spin_lock_bh(&adap->win0_lock);
1194 ++ /* We have to RESET the chip/firmware because we need the
1195 ++ * chip in uninitialized state for loading new PHY image.
1196 ++ * Otherwise, the running firmware will only store the PHY
1197 ++ * image in local RAM which will be lost after next reset.
1198 ++ */
1199 ++ ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
1200 ++ if (ret < 0) {
1201 ++ dev_err(adap->pdev_dev,
1202 ++ "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
1203 ++ ret);
1204 ++ return ret;
1205 ++ }
1206 ++
1207 + ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1208 +- spin_unlock_bh(&adap->win0_lock);
1209 +- if (ret)
1210 +- dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
1211 ++ if (ret < 0) {
1212 ++ dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
1213 ++ ret);
1214 ++ return ret;
1215 ++ }
1216 +
1217 +- return ret;
1218 ++ return 0;
1219 + }
1220 +
1221 + static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1222 +@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1223 + u32 ftid)
1224 + {
1225 + struct tid_info *t = &adap->tids;
1226 +- struct filter_entry *f;
1227 +
1228 +- if (ftid < t->nhpftids)
1229 +- f = &adap->tids.hpftid_tab[ftid];
1230 +- else if (ftid < t->nftids)
1231 +- f = &adap->tids.ftid_tab[ftid - t->nhpftids];
1232 +- else
1233 +- f = lookup_tid(&adap->tids, ftid);
1234 ++ if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
1235 ++ return &t->hpftid_tab[ftid - t->hpftid_base];
1236 +
1237 +- return f;
1238 ++ if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
1239 ++ return &t->ftid_tab[ftid - t->ftid_base];
1240 ++
1241 ++ return lookup_tid(t, ftid);
1242 + }
1243 +
1244 + static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1245 +@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
1246 + filter_id = filter_info->loc_array[cmd->fs.location];
1247 + f = cxgb4_get_filter_entry(adapter, filter_id);
1248 +
1249 ++ if (f->fs.prio)
1250 ++ filter_id -= adapter->tids.hpftid_base;
1251 ++ else if (!f->fs.hash)
1252 ++ filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
1253 ++
1254 + ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1255 + if (ret)
1256 + goto err;
1257 +@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1258 +
1259 + filter_info = &adapter->ethtool_filters->port[pi->port_id];
1260 +
1261 ++ if (fs.prio)
1262 ++ tid += adapter->tids.hpftid_base;
1263 ++ else if (!fs.hash)
1264 ++ tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
1265 ++
1266 + filter_info->loc_array[cmd->fs.location] = tid;
1267 + set_bit(cmd->fs.location, filter_info->bmap);
1268 + filter_info->in_use++;
1269 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
1270 +index e664e05b9f026..5fbc087268dbe 100644
1271 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
1272 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
1273 +@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
1274 + WORD_MASK, f->fs.nat_lip[3] |
1275 + f->fs.nat_lip[2] << 8 |
1276 + f->fs.nat_lip[1] << 16 |
1277 +- (u64)f->fs.nat_lip[0] << 25, 1);
1278 ++ (u64)f->fs.nat_lip[0] << 24, 1);
1279 + }
1280 + }
1281 +
1282 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1283 +index 1f601de02e706..762113a04dde6 100644
1284 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1285 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1286 +@@ -4424,10 +4424,8 @@ static int adap_init0_phy(struct adapter *adap)
1287 +
1288 + /* Load PHY Firmware onto adapter.
1289 + */
1290 +- spin_lock_bh(&adap->win0_lock);
1291 + ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
1292 + (u8 *)phyf->data, phyf->size);
1293 +- spin_unlock_bh(&adap->win0_lock);
1294 + if (ret < 0)
1295 + dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
1296 + -ret);
1297 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1298 +index 80882cfc370f5..601853bb34c91 100644
1299 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1300 ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1301 +@@ -3060,16 +3060,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
1302 + * @addr: the start address to write
1303 + * @n: length of data to write in bytes
1304 + * @data: the data to write
1305 ++ * @byte_oriented: whether to store data as bytes or as words
1306 + *
1307 + * Writes up to a page of data (256 bytes) to the serial flash starting
1308 + * at the given address. All the data must be written to the same page.
1309 ++ * If @byte_oriented is set the write data is stored as byte stream
1310 ++ * (i.e. matches what on disk), otherwise in big-endian.
1311 + */
1312 + static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1313 +- unsigned int n, const u8 *data)
1314 ++ unsigned int n, const u8 *data, bool byte_oriented)
1315 + {
1316 +- int ret;
1317 +- u32 buf[64];
1318 + unsigned int i, c, left, val, offset = addr & 0xff;
1319 ++ u32 buf[64];
1320 ++ int ret;
1321 +
1322 + if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
1323 + return -EINVAL;
1324 +@@ -3080,10 +3083,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1325 + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
1326 + goto unlock;
1327 +
1328 +- for (left = n; left; left -= c) {
1329 ++ for (left = n; left; left -= c, data += c) {
1330 + c = min(left, 4U);
1331 +- for (val = 0, i = 0; i < c; ++i)
1332 +- val = (val << 8) + *data++;
1333 ++ for (val = 0, i = 0; i < c; ++i) {
1334 ++ if (byte_oriented)
1335 ++ val = (val << 8) + data[i];
1336 ++ else
1337 ++ val = (val << 8) + data[c - i - 1];
1338 ++ }
1339 +
1340 + ret = sf1_write(adapter, c, c != left, 1, val);
1341 + if (ret)
1342 +@@ -3096,7 +3103,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1343 + t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1344 +
1345 + /* Read the page to verify the write succeeded */
1346 +- ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
1347 ++ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1348 ++ byte_oriented);
1349 + if (ret)
1350 + return ret;
1351 +
1352 +@@ -3692,7 +3700,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1353 + */
1354 + memcpy(first_page, fw_data, SF_PAGE_SIZE);
1355 + ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
1356 +- ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
1357 ++ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
1358 + if (ret)
1359 + goto out;
1360 +
1361 +@@ -3700,14 +3708,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1362 + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1363 + addr += SF_PAGE_SIZE;
1364 + fw_data += SF_PAGE_SIZE;
1365 +- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1366 ++ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
1367 + if (ret)
1368 + goto out;
1369 + }
1370 +
1371 +- ret = t4_write_flash(adap,
1372 +- fw_start + offsetof(struct fw_hdr, fw_ver),
1373 +- sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1374 ++ ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
1375 ++ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
1376 ++ true);
1377 + out:
1378 + if (ret)
1379 + dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1380 +@@ -3812,9 +3820,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
1381 + /* Copy the supplied PHY Firmware image to the adapter memory location
1382 + * allocated by the adapter firmware.
1383 + */
1384 ++ spin_lock_bh(&adap->win0_lock);
1385 + ret = t4_memory_rw(adap, win, mtype, maddr,
1386 + phy_fw_size, (__be32 *)phy_fw_data,
1387 + T4_MEMORY_WRITE);
1388 ++ spin_unlock_bh(&adap->win0_lock);
1389 + if (ret)
1390 + return ret;
1391 +
1392 +@@ -10208,7 +10218,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1393 + n = size - i;
1394 + else
1395 + n = SF_PAGE_SIZE;
1396 +- ret = t4_write_flash(adap, addr, n, cfg_data);
1397 ++ ret = t4_write_flash(adap, addr, n, cfg_data, true);
1398 + if (ret)
1399 + goto out;
1400 +
1401 +@@ -10677,13 +10687,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
1402 + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1403 + addr += SF_PAGE_SIZE;
1404 + boot_data += SF_PAGE_SIZE;
1405 +- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
1406 ++ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
1407 ++ false);
1408 + if (ret)
1409 + goto out;
1410 + }
1411 +
1412 + ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
1413 +- (const u8 *)header);
1414 ++ (const u8 *)header, false);
1415 +
1416 + out:
1417 + if (ret)
1418 +@@ -10758,7 +10769,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1419 + for (i = 0; i < size; i += SF_PAGE_SIZE) {
1420 + n = min_t(u32, size - i, SF_PAGE_SIZE);
1421 +
1422 +- ret = t4_write_flash(adap, addr, n, cfg_data);
1423 ++ ret = t4_write_flash(adap, addr, n, cfg_data, false);
1424 + if (ret)
1425 + goto out;
1426 +
1427 +@@ -10770,7 +10781,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1428 + for (i = 0; i < npad; i++) {
1429 + u8 data = 0;
1430 +
1431 +- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
1432 ++ ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
1433 ++ false);
1434 + if (ret)
1435 + goto out;
1436 + }
1437 +diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
1438 +index 46b0dbab8aadc..7c992172933bc 100644
1439 +--- a/drivers/net/ethernet/ec_bhf.c
1440 ++++ b/drivers/net/ethernet/ec_bhf.c
1441 +@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
1442 + struct ec_bhf_priv *priv = netdev_priv(net_dev);
1443 +
1444 + unregister_netdev(net_dev);
1445 +- free_netdev(net_dev);
1446 +
1447 + pci_iounmap(dev, priv->dma_io);
1448 + pci_iounmap(dev, priv->io);
1449 ++
1450 ++ free_netdev(net_dev);
1451 ++
1452 + pci_release_regions(dev);
1453 + pci_clear_master(dev);
1454 + pci_disable_device(dev);
1455 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1456 +index b6eba29d8e99e..7968568bbe214 100644
1457 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1458 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1459 +@@ -5897,6 +5897,7 @@ drv_cleanup:
1460 + unmap_bars:
1461 + be_unmap_pci_bars(adapter);
1462 + free_netdev:
1463 ++ pci_disable_pcie_error_reporting(pdev);
1464 + free_netdev(netdev);
1465 + rel_reg:
1466 + pci_release_regions(pdev);
1467 +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
1468 +index 1753807cbf97e..d71eac7e19249 100644
1469 +--- a/drivers/net/ethernet/freescale/fec_ptp.c
1470 ++++ b/drivers/net/ethernet/freescale/fec_ptp.c
1471 +@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
1472 + {
1473 + struct fec_enet_private *fep =
1474 + container_of(cc, struct fec_enet_private, cc);
1475 +- const struct platform_device_id *id_entry =
1476 +- platform_get_device_id(fep->pdev);
1477 + u32 tempval;
1478 +
1479 + tempval = readl(fep->hwp + FEC_ATIME_CTRL);
1480 + tempval |= FEC_T_CTRL_CAPTURE;
1481 + writel(tempval, fep->hwp + FEC_ATIME_CTRL);
1482 +
1483 +- if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
1484 ++ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
1485 + udelay(1);
1486 +
1487 + return readl(fep->hwp + FEC_ATIME);
1488 +@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
1489 + fep->ptp_caps.enable = fec_ptp_enable;
1490 +
1491 + fep->cycle_speed = clk_get_rate(fep->clk_ptp);
1492 ++ if (!fep->cycle_speed) {
1493 ++ fep->cycle_speed = NSEC_PER_SEC;
1494 ++ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
1495 ++ }
1496 + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
1497 +
1498 + spin_lock_init(&fep->tmreg_lock);
1499 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
1500 +index 27e439853c3b0..55432ea360ad4 100644
1501 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
1502 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
1503 +@@ -1715,12 +1715,13 @@ setup_rings:
1504 + * ice_vsi_cfg_txqs - Configure the VSI for Tx
1505 + * @vsi: the VSI being configured
1506 + * @rings: Tx ring array to be configured
1507 ++ * @count: number of Tx ring array elements
1508 + *
1509 + * Return 0 on success and a negative value on error
1510 + * Configure the Tx VSI for operation.
1511 + */
1512 + static int
1513 +-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1514 ++ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
1515 + {
1516 + struct ice_aqc_add_tx_qgrp *qg_buf;
1517 + u16 q_idx = 0;
1518 +@@ -1732,7 +1733,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1519 +
1520 + qg_buf->num_txqs = 1;
1521 +
1522 +- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1523 ++ for (q_idx = 0; q_idx < count; q_idx++) {
1524 + err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1525 + if (err)
1526 + goto err_cfg_txqs;
1527 +@@ -1752,7 +1753,7 @@ err_cfg_txqs:
1528 + */
1529 + int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1530 + {
1531 +- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1532 ++ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1533 + }
1534 +
1535 + /**
1536 +@@ -1767,7 +1768,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1537 + int ret;
1538 + int i;
1539 +
1540 +- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
1541 ++ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1542 + if (ret)
1543 + return ret;
1544 +
1545 +@@ -1965,17 +1966,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1546 + * @rst_src: reset source
1547 + * @rel_vmvf_num: Relative ID of VF/VM
1548 + * @rings: Tx ring array to be stopped
1549 ++ * @count: number of Tx ring array elements
1550 + */
1551 + static int
1552 + ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1553 +- u16 rel_vmvf_num, struct ice_ring **rings)
1554 ++ u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
1555 + {
1556 + u16 q_idx;
1557 +
1558 + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1559 + return -EINVAL;
1560 +
1561 +- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1562 ++ for (q_idx = 0; q_idx < count; q_idx++) {
1563 + struct ice_txq_meta txq_meta = { };
1564 + int status;
1565 +
1566 +@@ -2003,7 +2005,7 @@ int
1567 + ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1568 + u16 rel_vmvf_num)
1569 + {
1570 +- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1571 ++ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
1572 + }
1573 +
1574 + /**
1575 +@@ -2012,7 +2014,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1576 + */
1577 + int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
1578 + {
1579 +- return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
1580 ++ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
1581 + }
1582 +
1583 + /**
1584 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1585 +index d821c687f239c..b61cd84be97fd 100644
1586 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
1587 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
1588 +@@ -2554,6 +2554,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
1589 + return (ret || xdp_ring_err) ? -ENOMEM : 0;
1590 + }
1591 +
1592 ++/**
1593 ++ * ice_xdp_safe_mode - XDP handler for safe mode
1594 ++ * @dev: netdevice
1595 ++ * @xdp: XDP command
1596 ++ */
1597 ++static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
1598 ++ struct netdev_bpf *xdp)
1599 ++{
1600 ++ NL_SET_ERR_MSG_MOD(xdp->extack,
1601 ++ "Please provide working DDP firmware package in order to use XDP\n"
1602 ++ "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
1603 ++ return -EOPNOTSUPP;
1604 ++}
1605 ++
1606 + /**
1607 + * ice_xdp - implements XDP handler
1608 + * @dev: netdevice
1609 +@@ -6805,6 +6819,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
1610 + .ndo_change_mtu = ice_change_mtu,
1611 + .ndo_get_stats64 = ice_get_stats64,
1612 + .ndo_tx_timeout = ice_tx_timeout,
1613 ++ .ndo_bpf = ice_xdp_safe_mode,
1614 + };
1615 +
1616 + static const struct net_device_ops ice_netdev_ops = {
1617 +diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
1618 +index 135ba5b6ae980..072075bc60ee9 100644
1619 +--- a/drivers/net/ethernet/lantiq_xrx200.c
1620 ++++ b/drivers/net/ethernet/lantiq_xrx200.c
1621 +@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
1622 +
1623 + static int xrx200_alloc_skb(struct xrx200_chan *ch)
1624 + {
1625 ++ struct sk_buff *skb = ch->skb[ch->dma.desc];
1626 + dma_addr_t mapping;
1627 + int ret = 0;
1628 +
1629 +@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
1630 + XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
1631 + if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
1632 + dev_kfree_skb_any(ch->skb[ch->dma.desc]);
1633 ++ ch->skb[ch->dma.desc] = skb;
1634 + ret = -ENOMEM;
1635 + goto skip;
1636 + }
1637 +@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
1638 + ch->dma.desc %= LTQ_DESC_NUM;
1639 +
1640 + if (ret) {
1641 +- ch->skb[ch->dma.desc] = skb;
1642 + net_dev->stats.rx_dropped++;
1643 + netdev_err(net_dev, "failed to allocate new rx buffer\n");
1644 + return ret;
1645 +@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
1646 + struct xrx200_chan *ch = ptr;
1647 +
1648 + if (napi_schedule_prep(&ch->napi)) {
1649 +- __napi_schedule(&ch->napi);
1650 + ltq_dma_disable_irq(&ch->dma);
1651 ++ __napi_schedule(&ch->napi);
1652 + }
1653 +
1654 + ltq_dma_ack_irq(&ch->dma);
1655 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
1656 +index 9153c9bda96fa..897853a68cd03 100644
1657 +--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
1658 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
1659 +@@ -306,6 +306,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
1660 + int ret = 0, i;
1661 +
1662 + mutex_lock(&mlx5_intf_mutex);
1663 ++ priv->flags &= ~MLX5_PRIV_FLAGS_DETACH;
1664 + for (i = 0; i < ARRAY_SIZE(mlx5_adev_devices); i++) {
1665 + if (!priv->adev[i]) {
1666 + bool is_supported = false;
1667 +@@ -323,6 +324,16 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
1668 + }
1669 + } else {
1670 + adev = &priv->adev[i]->adev;
1671 ++
1672 ++ /* Pay attention that this is not PCI driver that
1673 ++ * mlx5_core_dev is connected, but auxiliary driver.
1674 ++ *
1675 ++ * Here we can race of module unload with devlink
1676 ++ * reload, but we don't need to take extra lock because
1677 ++ * we are holding global mlx5_intf_mutex.
1678 ++ */
1679 ++ if (!adev->dev.driver)
1680 ++ continue;
1681 + adrv = to_auxiliary_drv(adev->dev.driver);
1682 +
1683 + if (adrv->resume)
1684 +@@ -353,6 +364,10 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
1685 + continue;
1686 +
1687 + adev = &priv->adev[i]->adev;
1688 ++ /* Auxiliary driver was unbind manually through sysfs */
1689 ++ if (!adev->dev.driver)
1690 ++ goto skip_suspend;
1691 ++
1692 + adrv = to_auxiliary_drv(adev->dev.driver);
1693 +
1694 + if (adrv->suspend) {
1695 +@@ -360,9 +375,11 @@ void mlx5_detach_device(struct mlx5_core_dev *dev)
1696 + continue;
1697 + }
1698 +
1699 ++skip_suspend:
1700 + del_adev(&priv->adev[i]->adev);
1701 + priv->adev[i] = NULL;
1702 + }
1703 ++ priv->flags |= MLX5_PRIV_FLAGS_DETACH;
1704 + mutex_unlock(&mlx5_intf_mutex);
1705 + }
1706 +
1707 +@@ -451,6 +468,8 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
1708 + struct mlx5_priv *priv = &dev->priv;
1709 +
1710 + lockdep_assert_held(&mlx5_intf_mutex);
1711 ++ if (priv->flags & MLX5_PRIV_FLAGS_DETACH)
1712 ++ return 0;
1713 +
1714 + delete_drivers(dev);
1715 + if (priv->flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)
1716 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
1717 +index be0ee03de7217..2e9bee4e5209b 100644
1718 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
1719 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
1720 +@@ -129,10 +129,9 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
1721 + work);
1722 + struct mlx5e_neigh_hash_entry *nhe = update_work->nhe;
1723 + struct neighbour *n = update_work->n;
1724 ++ struct mlx5e_encap_entry *e = NULL;
1725 + bool neigh_connected, same_dev;
1726 +- struct mlx5e_encap_entry *e;
1727 + unsigned char ha[ETH_ALEN];
1728 +- struct mlx5e_priv *priv;
1729 + u8 nud_state, dead;
1730 +
1731 + rtnl_lock();
1732 +@@ -156,14 +155,12 @@ static void mlx5e_rep_neigh_update(struct work_struct *work)
1733 + if (!same_dev)
1734 + goto out;
1735 +
1736 +- list_for_each_entry(e, &nhe->encap_list, encap_list) {
1737 +- if (!mlx5e_encap_take(e))
1738 +- continue;
1739 ++ /* mlx5e_get_next_init_encap() releases previous encap before returning
1740 ++ * the next one.
1741 ++ */
1742 ++ while ((e = mlx5e_get_next_init_encap(nhe, e)) != NULL)
1743 ++ mlx5e_rep_update_flows(netdev_priv(e->out_dev), e, neigh_connected, ha);
1744 +
1745 +- priv = netdev_priv(e->out_dev);
1746 +- mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
1747 +- mlx5e_encap_put(priv, e);
1748 +- }
1749 + out:
1750 + rtnl_unlock();
1751 + mlx5e_release_neigh_update_work(update_work);
1752 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
1753 +index 96ba027dbef3d..9992f94f794b6 100644
1754 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
1755 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
1756 +@@ -93,13 +93,9 @@ void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
1757 +
1758 + ASSERT_RTNL();
1759 +
1760 +- /* wait for encap to be fully initialized */
1761 +- wait_for_completion(&e->res_ready);
1762 +-
1763 + mutex_lock(&esw->offloads.encap_tbl_lock);
1764 + encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
1765 +- if (e->compl_result < 0 || (encap_connected == neigh_connected &&
1766 +- ether_addr_equal(e->h_dest, ha)))
1767 ++ if (encap_connected == neigh_connected && ether_addr_equal(e->h_dest, ha))
1768 + goto unlock;
1769 +
1770 + mlx5e_take_all_encap_flows(e, &flow_list);
1771 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1772 +index 1560fcbf4ac7c..a17d79effa273 100644
1773 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1774 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
1775 +@@ -250,9 +250,12 @@ static void mlx5e_take_all_route_decap_flows(struct mlx5e_route_entry *r,
1776 + mlx5e_take_tmp_flow(flow, flow_list, 0);
1777 + }
1778 +
1779 ++typedef bool (match_cb)(struct mlx5e_encap_entry *);
1780 ++
1781 + static struct mlx5e_encap_entry *
1782 +-mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1783 +- struct mlx5e_encap_entry *e)
1784 ++mlx5e_get_next_matching_encap(struct mlx5e_neigh_hash_entry *nhe,
1785 ++ struct mlx5e_encap_entry *e,
1786 ++ match_cb match)
1787 + {
1788 + struct mlx5e_encap_entry *next = NULL;
1789 +
1790 +@@ -287,7 +290,7 @@ retry:
1791 + /* wait for encap to be fully initialized */
1792 + wait_for_completion(&next->res_ready);
1793 + /* continue searching if encap entry is not in valid state after completion */
1794 +- if (!(next->flags & MLX5_ENCAP_ENTRY_VALID)) {
1795 ++ if (!match(next)) {
1796 + e = next;
1797 + goto retry;
1798 + }
1799 +@@ -295,6 +298,30 @@ retry:
1800 + return next;
1801 + }
1802 +
1803 ++static bool mlx5e_encap_valid(struct mlx5e_encap_entry *e)
1804 ++{
1805 ++ return e->flags & MLX5_ENCAP_ENTRY_VALID;
1806 ++}
1807 ++
1808 ++static struct mlx5e_encap_entry *
1809 ++mlx5e_get_next_valid_encap(struct mlx5e_neigh_hash_entry *nhe,
1810 ++ struct mlx5e_encap_entry *e)
1811 ++{
1812 ++ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_valid);
1813 ++}
1814 ++
1815 ++static bool mlx5e_encap_initialized(struct mlx5e_encap_entry *e)
1816 ++{
1817 ++ return e->compl_result >= 0;
1818 ++}
1819 ++
1820 ++struct mlx5e_encap_entry *
1821 ++mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
1822 ++ struct mlx5e_encap_entry *e)
1823 ++{
1824 ++ return mlx5e_get_next_matching_encap(nhe, e, mlx5e_encap_initialized);
1825 ++}
1826 ++
1827 + void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1828 + {
1829 + struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1830 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1831 +index 3d45341e2216f..26f7fab109d97 100644
1832 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1833 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1834 +@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1835 + struct mlx5_core_dev *mdev = priv->mdev;
1836 + struct net_device *netdev = priv->netdev;
1837 +
1838 +- if (!priv->ipsec)
1839 +- return;
1840 +-
1841 + if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
1842 + !MLX5_CAP_ETH(mdev, swp)) {
1843 + mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
1844 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1845 +index 99dc9f2beed5b..16b8f52450329 100644
1846 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1847 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1848 +@@ -5168,22 +5168,15 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
1849 + }
1850 +
1851 + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
1852 +- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
1853 +- NETIF_F_GSO_UDP_TUNNEL_CSUM;
1854 +- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
1855 +- NETIF_F_GSO_UDP_TUNNEL_CSUM;
1856 +- netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
1857 +- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
1858 +- NETIF_F_GSO_UDP_TUNNEL_CSUM;
1859 ++ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
1860 ++ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1861 ++ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
1862 + }
1863 +
1864 + if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
1865 +- netdev->hw_features |= NETIF_F_GSO_GRE |
1866 +- NETIF_F_GSO_GRE_CSUM;
1867 +- netdev->hw_enc_features |= NETIF_F_GSO_GRE |
1868 +- NETIF_F_GSO_GRE_CSUM;
1869 +- netdev->gso_partial_features |= NETIF_F_GSO_GRE |
1870 +- NETIF_F_GSO_GRE_CSUM;
1871 ++ netdev->hw_features |= NETIF_F_GSO_GRE;
1872 ++ netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1873 ++ netdev->gso_partial_features |= NETIF_F_GSO_GRE;
1874 + }
1875 +
1876 + if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
1877 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1878 +index b633f669ea57f..b3b8e44540a5d 100644
1879 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1880 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1881 +@@ -4622,7 +4622,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
1882 + list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
1883 + wait_for_completion(&hpe->res_ready);
1884 + if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
1885 +- hpe->hp->pair->peer_gone = true;
1886 ++ mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
1887 +
1888 + mlx5e_hairpin_put(priv, hpe);
1889 + }
1890 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
1891 +index 25c091795bcd8..17027536efbaa 100644
1892 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
1893 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
1894 +@@ -178,6 +178,9 @@ void mlx5e_take_all_encap_flows(struct mlx5e_encap_entry *e, struct list_head *f
1895 + void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list);
1896 +
1897 + struct mlx5e_neigh_hash_entry;
1898 ++struct mlx5e_encap_entry *
1899 ++mlx5e_get_next_init_encap(struct mlx5e_neigh_hash_entry *nhe,
1900 ++ struct mlx5e_encap_entry *e);
1901 + void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe);
1902 +
1903 + void mlx5e_tc_reoffload_flows_work(struct work_struct *work);
1904 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1905 +index 1fa9c18563da9..31c6a3b91f4a9 100644
1906 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1907 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
1908 +@@ -136,7 +136,7 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
1909 +
1910 + eqe = next_eqe_sw(eq);
1911 + if (!eqe)
1912 +- return 0;
1913 ++ goto out;
1914 +
1915 + do {
1916 + struct mlx5_core_cq *cq;
1917 +@@ -161,6 +161,8 @@ static int mlx5_eq_comp_int(struct notifier_block *nb,
1918 + ++eq->cons_index;
1919 +
1920 + } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
1921 ++
1922 ++out:
1923 + eq_update_ci(eq, 1);
1924 +
1925 + if (cqn != -1)
1926 +@@ -248,9 +250,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
1927 + ++eq->cons_index;
1928 +
1929 + } while ((++num_eqes < MLX5_EQ_POLLING_BUDGET) && (eqe = next_eqe_sw(eq)));
1930 +- eq_update_ci(eq, 1);
1931 +
1932 + out:
1933 ++ eq_update_ci(eq, 1);
1934 + mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
1935 +
1936 + return unlikely(recovery) ? num_eqes : 0;
1937 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1938 +index 2c6d95900e3c9..a3edeea4ddd78 100644
1939 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1940 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1941 +@@ -1308,6 +1308,12 @@ int mlx5_esw_vport_enable(struct mlx5_eswitch *esw, u16 vport_num,
1942 + goto err_vhca_mapping;
1943 + }
1944 +
1945 ++ /* External controller host PF has factory programmed MAC.
1946 ++ * Read it from the device.
1947 ++ */
1948 ++ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
1949 ++ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
1950 ++
1951 + esw_vport_change_handle_locked(vport);
1952 +
1953 + esw->enabled_vports++;
1954 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1955 +index efb93d63e54cb..58b8f75d7a01e 100644
1956 +--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
1957 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
1958 +@@ -1157,7 +1157,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
1959 + err = mlx5_core_set_hca_defaults(dev);
1960 + if (err) {
1961 + mlx5_core_err(dev, "Failed to set hca defaults\n");
1962 +- goto err_sriov;
1963 ++ goto err_set_hca;
1964 + }
1965 +
1966 + mlx5_vhca_event_start(dev);
1967 +@@ -1190,6 +1190,7 @@ err_ec:
1968 + mlx5_sf_hw_table_destroy(dev);
1969 + err_vhca:
1970 + mlx5_vhca_event_stop(dev);
1971 ++err_set_hca:
1972 + mlx5_cleanup_fs(dev);
1973 + err_fs:
1974 + mlx5_accel_tls_cleanup(dev);
1975 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
1976 +index 50af84e76fb6a..174f71ed52800 100644
1977 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
1978 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
1979 +@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1980 + mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
1981 + mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1982 + mkey->size = MLX5_GET64(mkc, mkc, len);
1983 +- mkey->key |= mlx5_idx_to_mkey(mkey_index);
1984 ++ mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
1985 + mkey->pd = MLX5_GET(mkc, mkc, pd);
1986 + init_waitqueue_head(&mkey->wait);
1987 +
1988 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
1989 +index 8e0dddc6383f0..2389239acadc9 100644
1990 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
1991 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
1992 +@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
1993 + {
1994 + int err;
1995 +
1996 ++ if (!MLX5_CAP_GEN(dev, roce))
1997 ++ return;
1998 ++
1999 + err = mlx5_nic_vport_enable_roce(dev);
2000 + if (err) {
2001 + mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
2002 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
2003 +index 90b524c59f3c3..c4139f4648bf1 100644
2004 +--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
2005 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
2006 +@@ -153,6 +153,7 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
2007 + sf_index = event->function_id - MLX5_CAP_GEN(table->dev, sf_base_id);
2008 + sf_dev = xa_load(&table->devices, sf_index);
2009 + switch (event->new_vhca_state) {
2010 ++ case MLX5_VHCA_STATE_INVALID:
2011 + case MLX5_VHCA_STATE_ALLOCATED:
2012 + if (sf_dev)
2013 + mlx5_sf_dev_del(table->dev, sf_dev, sf_index);
2014 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
2015 +index f146c618a78e7..46ef45fa91675 100644
2016 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
2017 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
2018 +@@ -712,7 +712,11 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
2019 + if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
2020 + return -EINVAL;
2021 +
2022 +- memcpy(padded_data, data, data_sz);
2023 ++ inline_data_sz =
2024 ++ MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
2025 ++
2026 ++ /* Add an alignment padding */
2027 ++ memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
2028 +
2029 + /* Remove L2L3 outer headers */
2030 + MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
2031 +@@ -724,32 +728,34 @@ static int dr_ste_v1_set_action_decap_l3_list(void *data,
2032 + hw_action += DR_STE_ACTION_DOUBLE_SZ;
2033 + used_actions++; /* Remove and NOP are a single double action */
2034 +
2035 +- inline_data_sz =
2036 +- MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
2037 ++ /* Point to the last dword of the header */
2038 ++ data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
2039 +
2040 +- /* Add the new header inline + 2 extra bytes */
2041 ++ /* Add the new header using inline action 4Byte at a time, the header
2042 ++ * is added in reversed order to the beginning of the packet to avoid
2043 ++ * incorrect parsing by the HW. Since header is 14B or 18B an extra
2044 ++ * two bytes are padded and later removed.
2045 ++ */
2046 + for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
2047 + void *addr_inline;
2048 +
2049 + MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
2050 + DR_STE_V1_ACTION_ID_INSERT_INLINE);
2051 + /* The hardware expects here offset to words (2 bytes) */
2052 +- MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset,
2053 +- i * 2);
2054 ++ MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
2055 +
2056 + /* Copy bytes one by one to avoid endianness problem */
2057 + addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
2058 + hw_action, inline_data);
2059 +- memcpy(addr_inline, data_ptr, inline_data_sz);
2060 ++ memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
2061 + hw_action += DR_STE_ACTION_DOUBLE_SZ;
2062 +- data_ptr += inline_data_sz;
2063 + used_actions++;
2064 + }
2065 +
2066 +- /* Remove 2 extra bytes */
2067 ++ /* Remove first 2 extra bytes */
2068 + MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
2069 + DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
2070 +- MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, data_sz / 2);
2071 ++ MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
2072 + /* The hardware expects here size in words (2 bytes) */
2073 + MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
2074 + used_actions++;
2075 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
2076 +index 612b0ac31db23..9737565cd8d43 100644
2077 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
2078 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
2079 +@@ -124,10 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
2080 + static inline bool
2081 + mlx5dr_is_supported(struct mlx5_core_dev *dev)
2082 + {
2083 +- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
2084 +- (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
2085 +- (MLX5_CAP_GEN(dev, steering_format_version) <=
2086 +- MLX5_STEERING_FORMAT_CONNECTX_6DX));
2087 ++ return MLX5_CAP_GEN(dev, roce) &&
2088 ++ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
2089 ++ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
2090 ++ (MLX5_CAP_GEN(dev, steering_format_version) <=
2091 ++ MLX5_STEERING_FORMAT_CONNECTX_6DX)));
2092 + }
2093 +
2094 + /* buddy functions & structure */
2095 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
2096 +index 01cc00ad8acf2..b6931bbe52d29 100644
2097 +--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
2098 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
2099 +@@ -424,6 +424,15 @@ err_modify_sq:
2100 + return err;
2101 + }
2102 +
2103 ++static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
2104 ++{
2105 ++ int i;
2106 ++
2107 ++ for (i = 0; i < hp->num_channels; i++)
2108 ++ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
2109 ++ MLX5_SQC_STATE_RST, 0, 0);
2110 ++}
2111 ++
2112 + static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
2113 + {
2114 + int i;
2115 +@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
2116 + for (i = 0; i < hp->num_channels; i++)
2117 + mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
2118 + MLX5_RQC_STATE_RST, 0, 0);
2119 +-
2120 + /* unset peer SQs */
2121 +- if (hp->peer_gone)
2122 +- return;
2123 +- for (i = 0; i < hp->num_channels; i++)
2124 +- mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
2125 +- MLX5_SQC_STATE_RST, 0, 0);
2126 ++ if (!hp->peer_gone)
2127 ++ mlx5_hairpin_unpair_peer_sq(hp);
2128 + }
2129 +
2130 + struct mlx5_hairpin *
2131 +@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
2132 + mlx5_hairpin_destroy_queues(hp);
2133 + kfree(hp);
2134 + }
2135 ++
2136 ++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
2137 ++{
2138 ++ int i;
2139 ++
2140 ++ mlx5_hairpin_unpair_peer_sq(hp);
2141 ++
2142 ++ /* destroy peer SQ */
2143 ++ for (i = 0; i < hp->num_channels; i++)
2144 ++ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
2145 ++
2146 ++ hp->peer_gone = true;
2147 ++}
2148 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
2149 +index e05c5c0f3ae1d..7d21fbb9192f6 100644
2150 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
2151 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
2152 +@@ -465,8 +465,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
2153 + void *in;
2154 + int err;
2155 +
2156 +- if (!vport)
2157 +- return -EINVAL;
2158 + if (!MLX5_CAP_GEN(mdev, vport_group_manager))
2159 + return -EACCES;
2160 +
2161 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
2162 +index bf85ce9835d7f..42e4437ac3c16 100644
2163 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
2164 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
2165 +@@ -708,7 +708,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
2166 + MLXSW_THERMAL_TRIP_MASK,
2167 + module_tz,
2168 + &mlxsw_thermal_module_ops,
2169 +- NULL, 0, 0);
2170 ++ NULL, 0,
2171 ++ module_tz->parent->polling_delay);
2172 + if (IS_ERR(module_tz->tzdev)) {
2173 + err = PTR_ERR(module_tz->tzdev);
2174 + return err;
2175 +@@ -830,7 +831,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
2176 + MLXSW_THERMAL_TRIP_MASK,
2177 + gearbox_tz,
2178 + &mlxsw_thermal_gearbox_ops,
2179 +- NULL, 0, 0);
2180 ++ NULL, 0,
2181 ++ gearbox_tz->parent->polling_delay);
2182 + if (IS_ERR(gearbox_tz->tzdev))
2183 + return PTR_ERR(gearbox_tz->tzdev);
2184 +
2185 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
2186 +index c4adc7f740d3e..769386971ac3b 100644
2187 +--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
2188 ++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
2189 +@@ -3863,7 +3863,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
2190 + #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
2191 + #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
2192 + #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
2193 +-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
2194 ++#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
2195 +
2196 + static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
2197 + enum mlxsw_reg_qeec_hr hr, u8 index,
2198 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
2199 +index 46e5c9136bacd..0c4c976548c85 100644
2200 +--- a/drivers/net/ethernet/mscc/ocelot.c
2201 ++++ b/drivers/net/ethernet/mscc/ocelot.c
2202 +@@ -378,6 +378,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
2203 +
2204 + int ocelot_port_flush(struct ocelot *ocelot, int port)
2205 + {
2206 ++ unsigned int pause_ena;
2207 + int err, val;
2208 +
2209 + /* Disable dequeuing from the egress queues */
2210 +@@ -386,6 +387,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
2211 + QSYS_PORT_MODE, port);
2212 +
2213 + /* Disable flow control */
2214 ++ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
2215 + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
2216 +
2217 + /* Disable priority flow control */
2218 +@@ -421,6 +423,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
2219 + /* Clear flushing again. */
2220 + ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
2221 +
2222 ++ /* Re-enable flow control */
2223 ++ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
2224 ++
2225 + return err;
2226 + }
2227 + EXPORT_SYMBOL(ocelot_port_flush);
2228 +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2229 +index 7e6bac85495d3..344ea11434549 100644
2230 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2231 ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
2232 +@@ -1602,6 +1602,8 @@ err_out_free_netdev:
2233 + free_netdev(netdev);
2234 +
2235 + err_out_free_res:
2236 ++ if (NX_IS_REVISION_P3(pdev->revision))
2237 ++ pci_disable_pcie_error_reporting(pdev);
2238 + pci_release_regions(pdev);
2239 +
2240 + err_out_disable_pdev:
2241 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
2242 +index 96b947fde646b..3beafc60747e6 100644
2243 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
2244 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
2245 +@@ -2690,6 +2690,7 @@ err_out_free_hw_res:
2246 + kfree(ahw);
2247 +
2248 + err_out_free_res:
2249 ++ pci_disable_pcie_error_reporting(pdev);
2250 + pci_release_regions(pdev);
2251 +
2252 + err_out_disable_pdev:
2253 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
2254 +index 41fbd2ceeede4..ab1e0fcccabb6 100644
2255 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
2256 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
2257 +@@ -126,24 +126,24 @@ static void rmnet_get_stats64(struct net_device *dev,
2258 + struct rtnl_link_stats64 *s)
2259 + {
2260 + struct rmnet_priv *priv = netdev_priv(dev);
2261 +- struct rmnet_vnd_stats total_stats;
2262 ++ struct rmnet_vnd_stats total_stats = { };
2263 + struct rmnet_pcpu_stats *pcpu_ptr;
2264 ++ struct rmnet_vnd_stats snapshot;
2265 + unsigned int cpu, start;
2266 +
2267 +- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
2268 +-
2269 + for_each_possible_cpu(cpu) {
2270 + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
2271 +
2272 + do {
2273 + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
2274 +- total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
2275 +- total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
2276 +- total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
2277 +- total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
2278 ++ snapshot = pcpu_ptr->stats; /* struct assignment */
2279 + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
2280 +
2281 +- total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
2282 ++ total_stats.rx_pkts += snapshot.rx_pkts;
2283 ++ total_stats.rx_bytes += snapshot.rx_bytes;
2284 ++ total_stats.tx_pkts += snapshot.tx_pkts;
2285 ++ total_stats.tx_bytes += snapshot.tx_bytes;
2286 ++ total_stats.tx_drops += snapshot.tx_drops;
2287 + }
2288 +
2289 + s->rx_packets = total_stats.rx_pkts;
2290 +@@ -354,4 +354,4 @@ int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
2291 + }
2292 +
2293 + return 0;
2294 +-}
2295 +\ No newline at end of file
2296 ++}
2297 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
2298 +index b70d44ac09906..3c73453725f94 100644
2299 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
2300 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
2301 +@@ -76,10 +76,10 @@ enum power_event {
2302 + #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
2303 +
2304 + /* GMAC HW ADDR regs */
2305 +-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
2306 +- (reg * 8))
2307 +-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
2308 +- (reg * 8))
2309 ++#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
2310 ++ 0x00000040 + (reg * 8))
2311 ++#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
2312 ++ 0x00000044 + (reg * 8))
2313 + #define GMAC_MAX_PERFECT_ADDRESSES 1
2314 +
2315 + #define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
2316 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2317 +index 6dc9f10414e47..7e6bead6429c5 100644
2318 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2319 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2320 +@@ -622,6 +622,8 @@ error_pclk_get:
2321 + void stmmac_remove_config_dt(struct platform_device *pdev,
2322 + struct plat_stmmacenet_data *plat)
2323 + {
2324 ++ clk_disable_unprepare(plat->stmmac_clk);
2325 ++ clk_disable_unprepare(plat->pclk);
2326 + of_node_put(plat->phy_node);
2327 + of_node_put(plat->mdio_node);
2328 + }
2329 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
2330 +index 030185301014c..01bb36e7cff0a 100644
2331 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
2332 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
2333 +@@ -849,7 +849,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2334 + smp_mb();
2335 +
2336 + /* Space might have just been freed - check again */
2337 +- if (temac_check_tx_bd_space(lp, num_frag))
2338 ++ if (temac_check_tx_bd_space(lp, num_frag + 1))
2339 + return NETDEV_TX_BUSY;
2340 +
2341 + netif_wake_queue(ndev);
2342 +@@ -876,7 +876,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2343 + return NETDEV_TX_OK;
2344 + }
2345 + cur_p->phys = cpu_to_be32(skb_dma_addr);
2346 +- ptr_to_txbd((void *)skb, cur_p);
2347 +
2348 + for (ii = 0; ii < num_frag; ii++) {
2349 + if (++lp->tx_bd_tail >= lp->tx_bd_num)
2350 +@@ -915,6 +914,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2351 + }
2352 + cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
2353 +
2354 ++ /* Mark last fragment with skb address, so it can be consumed
2355 ++ * in temac_start_xmit_done()
2356 ++ */
2357 ++ ptr_to_txbd((void *)skb, cur_p);
2358 ++
2359 + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
2360 + lp->tx_bd_tail++;
2361 + if (lp->tx_bd_tail >= lp->tx_bd_num)
2362 +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
2363 +index 17be2bb2985cd..920e9f888cc35 100644
2364 +--- a/drivers/net/hamradio/mkiss.c
2365 ++++ b/drivers/net/hamradio/mkiss.c
2366 +@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
2367 + ax->tty = NULL;
2368 +
2369 + unregister_netdev(ax->dev);
2370 ++ free_netdev(ax->dev);
2371 + }
2372 +
2373 + /* Perform I/O control on an active ax25 channel. */
2374 +diff --git a/drivers/net/mhi/net.c b/drivers/net/mhi/net.c
2375 +index f59960876083f..8e7f8728998f1 100644
2376 +--- a/drivers/net/mhi/net.c
2377 ++++ b/drivers/net/mhi/net.c
2378 +@@ -49,7 +49,7 @@ static int mhi_ndo_stop(struct net_device *ndev)
2379 + return 0;
2380 + }
2381 +
2382 +-static int mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
2383 ++static netdev_tx_t mhi_ndo_xmit(struct sk_buff *skb, struct net_device *ndev)
2384 + {
2385 + struct mhi_net_dev *mhi_netdev = netdev_priv(ndev);
2386 + const struct mhi_net_proto *proto = mhi_netdev->proto;
2387 +diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
2388 +index 0eeec80bec311..e4a5703666461 100644
2389 +--- a/drivers/net/usb/cdc_eem.c
2390 ++++ b/drivers/net/usb/cdc_eem.c
2391 +@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
2392 + }
2393 +
2394 + skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
2395 ++ dev_kfree_skb_any(skb);
2396 + if (!skb2)
2397 + return NULL;
2398 +
2399 +- dev_kfree_skb_any(skb);
2400 + skb = skb2;
2401 +
2402 + done:
2403 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2404 +index 8acf301154282..dc3d84b43e4e8 100644
2405 +--- a/drivers/net/usb/cdc_ncm.c
2406 ++++ b/drivers/net/usb/cdc_ncm.c
2407 +@@ -1902,7 +1902,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
2408 + static const struct driver_info cdc_ncm_info = {
2409 + .description = "CDC NCM",
2410 + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
2411 +- | FLAG_LINK_INTR,
2412 ++ | FLAG_LINK_INTR | FLAG_ETHER,
2413 + .bind = cdc_ncm_bind,
2414 + .unbind = cdc_ncm_unbind,
2415 + .manage_power = usbnet_manage_power,
2416 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2417 +index 76ed79bb1e3f1..5281291711aff 100644
2418 +--- a/drivers/net/usb/smsc75xx.c
2419 ++++ b/drivers/net/usb/smsc75xx.c
2420 +@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2421 + ret = smsc75xx_wait_ready(dev, 0);
2422 + if (ret < 0) {
2423 + netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
2424 +- goto err;
2425 ++ goto free_pdata;
2426 + }
2427 +
2428 + smsc75xx_init_mac_address(dev);
2429 +@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2430 + ret = smsc75xx_reset(dev);
2431 + if (ret < 0) {
2432 + netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
2433 +- goto err;
2434 ++ goto cancel_work;
2435 + }
2436 +
2437 + dev->net->netdev_ops = &smsc75xx_netdev_ops;
2438 +@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2439 + dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
2440 + return 0;
2441 +
2442 +-err:
2443 ++cancel_work:
2444 ++ cancel_work_sync(&pdata->set_multicast);
2445 ++free_pdata:
2446 + kfree(pdata);
2447 ++ dev->data[0] = 0;
2448 + return ret;
2449 + }
2450 +
2451 +@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
2452 + cancel_work_sync(&pdata->set_multicast);
2453 + netif_dbg(dev, ifdown, dev->net, "free pdata\n");
2454 + kfree(pdata);
2455 +- pdata = NULL;
2456 + dev->data[0] = 0;
2457 + }
2458 + }
2459 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
2460 +index 503e2fd7ce518..28a6c4cfe9b8c 100644
2461 +--- a/drivers/net/vrf.c
2462 ++++ b/drivers/net/vrf.c
2463 +@@ -1183,9 +1183,6 @@ static int vrf_dev_init(struct net_device *dev)
2464 +
2465 + dev->flags = IFF_MASTER | IFF_NOARP;
2466 +
2467 +- /* MTU is irrelevant for VRF device; set to 64k similar to lo */
2468 +- dev->mtu = 64 * 1024;
2469 +-
2470 + /* similarly, oper state is irrelevant; set to up to avoid confusion */
2471 + dev->operstate = IF_OPER_UP;
2472 + netdev_lockdep_set_classes(dev);
2473 +@@ -1685,7 +1682,8 @@ static void vrf_setup(struct net_device *dev)
2474 + * which breaks networking.
2475 + */
2476 + dev->min_mtu = IPV6_MIN_MTU;
2477 +- dev->max_mtu = ETH_MAX_MTU;
2478 ++ dev->max_mtu = IP6_MAX_MTU;
2479 ++ dev->mtu = dev->max_mtu;
2480 + }
2481 +
2482 + static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
2483 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
2484 +index 051b48bd7985d..e3f5e7ab76063 100644
2485 +--- a/drivers/pci/controller/pci-aardvark.c
2486 ++++ b/drivers/pci/controller/pci-aardvark.c
2487 +@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2488 + udelay(PIO_RETRY_DELAY);
2489 + }
2490 +
2491 +- dev_err(dev, "config read/write timed out\n");
2492 ++ dev_err(dev, "PIO read/write transfer time out\n");
2493 + return -ETIMEDOUT;
2494 + }
2495 +
2496 +@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2497 + return true;
2498 + }
2499 +
2500 ++static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
2501 ++{
2502 ++ struct device *dev = &pcie->pdev->dev;
2503 ++
2504 ++ /*
2505 ++ * Trying to start a new PIO transfer when previous has not completed
2506 ++ * cause External Abort on CPU which results in kernel panic:
2507 ++ *
2508 ++ * SError Interrupt on CPU0, code 0xbf000002 -- SError
2509 ++ * Kernel panic - not syncing: Asynchronous SError Interrupt
2510 ++ *
2511 ++ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
2512 ++ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
2513 ++ * concurrent calls at the same time. But because PIO transfer may take
2514 ++ * about 1.5s when link is down or card is disconnected, it means that
2515 ++ * advk_pcie_wait_pio() does not always have to wait for completion.
2516 ++ *
2517 ++ * Some versions of ARM Trusted Firmware handles this External Abort at
2518 ++ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
2519 ++ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
2520 ++ */
2521 ++ if (advk_readl(pcie, PIO_START)) {
2522 ++ dev_err(dev, "Previous PIO read/write transfer is still running\n");
2523 ++ return true;
2524 ++ }
2525 ++
2526 ++ return false;
2527 ++}
2528 ++
2529 + static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2530 + int where, int size, u32 *val)
2531 + {
2532 +@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2533 + return pci_bridge_emul_conf_read(&pcie->bridge, where,
2534 + size, val);
2535 +
2536 +- /* Start PIO */
2537 +- advk_writel(pcie, 0, PIO_START);
2538 +- advk_writel(pcie, 1, PIO_ISR);
2539 ++ if (advk_pcie_pio_is_running(pcie)) {
2540 ++ *val = 0xffffffff;
2541 ++ return PCIBIOS_SET_FAILED;
2542 ++ }
2543 +
2544 + /* Program the control register */
2545 + reg = advk_readl(pcie, PIO_CTRL);
2546 +@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2547 + /* Program the data strobe */
2548 + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
2549 +
2550 +- /* Start the transfer */
2551 ++ /* Clear PIO DONE ISR and start the transfer */
2552 ++ advk_writel(pcie, 1, PIO_ISR);
2553 + advk_writel(pcie, 1, PIO_START);
2554 +
2555 + ret = advk_pcie_wait_pio(pcie);
2556 +@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2557 + if (where % size)
2558 + return PCIBIOS_SET_FAILED;
2559 +
2560 +- /* Start PIO */
2561 +- advk_writel(pcie, 0, PIO_START);
2562 +- advk_writel(pcie, 1, PIO_ISR);
2563 ++ if (advk_pcie_pio_is_running(pcie))
2564 ++ return PCIBIOS_SET_FAILED;
2565 +
2566 + /* Program the control register */
2567 + reg = advk_readl(pcie, PIO_CTRL);
2568 +@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2569 + /* Program the data strobe */
2570 + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
2571 +
2572 +- /* Start the transfer */
2573 ++ /* Clear PIO DONE ISR and start the transfer */
2574 ++ advk_writel(pcie, 1, PIO_ISR);
2575 + advk_writel(pcie, 1, PIO_START);
2576 +
2577 + ret = advk_pcie_wait_pio(pcie);
2578 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2579 +index 653660e3ba9ef..7bf76bca888da 100644
2580 +--- a/drivers/pci/quirks.c
2581 ++++ b/drivers/pci/quirks.c
2582 +@@ -3558,6 +3558,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
2583 + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
2584 + }
2585 +
2586 ++/*
2587 ++ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
2588 ++ * prevented for those affected devices.
2589 ++ */
2590 ++static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
2591 ++{
2592 ++ if ((dev->device & 0xffc0) == 0x2340)
2593 ++ quirk_no_bus_reset(dev);
2594 ++}
2595 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
2596 ++ quirk_nvidia_no_bus_reset);
2597 ++
2598 + /*
2599 + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
2600 + * The device will throw a Link Down error on AER-capable systems and
2601 +@@ -3578,6 +3590,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
2602 + */
2603 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
2604 +
2605 ++/*
2606 ++ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
2607 ++ * automatically disables LTSSM when Secondary Bus Reset is received and
2608 ++ * the device stops working. Prevent bus reset for these devices. With
2609 ++ * this change, the device can be assigned to VMs with VFIO, but it will
2610 ++ * leak state between VMs. Reference
2611 ++ * https://e2e.ti.com/support/processors/f/791/t/954382
2612 ++ */
2613 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
2614 ++
2615 + static void quirk_no_pm_reset(struct pci_dev *dev)
2616 + {
2617 + /*
2618 +@@ -3913,6 +3935,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
2619 + return 0;
2620 + }
2621 +
2622 ++#define PCI_DEVICE_ID_HINIC_VF 0x375E
2623 ++#define HINIC_VF_FLR_TYPE 0x1000
2624 ++#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
2625 ++#define HINIC_VF_OP 0xE80
2626 ++#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
2627 ++#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
2628 ++
2629 ++/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
2630 ++static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
2631 ++{
2632 ++ unsigned long timeout;
2633 ++ void __iomem *bar;
2634 ++ u32 val;
2635 ++
2636 ++ if (probe)
2637 ++ return 0;
2638 ++
2639 ++ bar = pci_iomap(pdev, 0, 0);
2640 ++ if (!bar)
2641 ++ return -ENOTTY;
2642 ++
2643 ++ /* Get and check firmware capabilities */
2644 ++ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
2645 ++ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
2646 ++ pci_iounmap(pdev, bar);
2647 ++ return -ENOTTY;
2648 ++ }
2649 ++
2650 ++ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
2651 ++ val = ioread32be(bar + HINIC_VF_OP);
2652 ++ val = val | HINIC_VF_FLR_PROC_BIT;
2653 ++ iowrite32be(val, bar + HINIC_VF_OP);
2654 ++
2655 ++ pcie_flr(pdev);
2656 ++
2657 ++ /*
2658 ++ * The device must recapture its Bus and Device Numbers after FLR
2659 ++ * in order generate Completions. Issue a config write to let the
2660 ++ * device capture this information.
2661 ++ */
2662 ++ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
2663 ++
2664 ++ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
2665 ++ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
2666 ++ do {
2667 ++ val = ioread32be(bar + HINIC_VF_OP);
2668 ++ if (!(val & HINIC_VF_FLR_PROC_BIT))
2669 ++ goto reset_complete;
2670 ++ msleep(20);
2671 ++ } while (time_before(jiffies, timeout));
2672 ++
2673 ++ val = ioread32be(bar + HINIC_VF_OP);
2674 ++ if (!(val & HINIC_VF_FLR_PROC_BIT))
2675 ++ goto reset_complete;
2676 ++
2677 ++ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
2678 ++
2679 ++reset_complete:
2680 ++ pci_iounmap(pdev, bar);
2681 ++
2682 ++ return 0;
2683 ++}
2684 ++
2685 + static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
2686 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
2687 + reset_intel_82599_sfp_virtfn },
2688 +@@ -3924,6 +4009,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
2689 + { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
2690 + { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
2691 + reset_chelsio_generic_dev },
2692 ++ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
2693 ++ reset_hinic_vf_dev },
2694 + { 0 }
2695 + };
2696 +
2697 +@@ -4764,6 +4851,8 @@ static const struct pci_dev_acs_enabled {
2698 + { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
2699 + { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
2700 + { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
2701 ++ /* Broadcom multi-function device */
2702 ++ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
2703 + { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
2704 + /* Amazon Annapurna Labs */
2705 + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
2706 +@@ -5165,7 +5254,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
2707 + static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
2708 + {
2709 + if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
2710 +- (pdev->device == 0x7340 && pdev->revision != 0xc5))
2711 ++ (pdev->device == 0x7340 && pdev->revision != 0xc5) ||
2712 ++ (pdev->device == 0x7341 && pdev->revision != 0x00))
2713 + return;
2714 +
2715 + if (pdev->device == 0x15d8) {
2716 +@@ -5192,6 +5282,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
2717 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
2718 + /* AMD Navi14 dGPU */
2719 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
2720 ++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats);
2721 + /* AMD Raven platform iGPU */
2722 + DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats);
2723 + #endif /* CONFIG_PCI_ATS */
2724 +diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
2725 +index cdbcc49f71152..731c483a04dea 100644
2726 +--- a/drivers/phy/mediatek/phy-mtk-tphy.c
2727 ++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
2728 +@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
2729 + break;
2730 + default:
2731 + dev_err(tphy->dev, "incompatible PHY type\n");
2732 ++ clk_disable_unprepare(instance->ref_clk);
2733 ++ clk_disable_unprepare(instance->da_ref_clk);
2734 + return -EINVAL;
2735 + }
2736 +
2737 +diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c
2738 +index 1f4bca854add5..a9b511c7e8500 100644
2739 +--- a/drivers/pinctrl/ralink/pinctrl-rt2880.c
2740 ++++ b/drivers/pinctrl/ralink/pinctrl-rt2880.c
2741 +@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
2742 + if (p->groups[group].enabled) {
2743 + dev_err(p->dev, "%s is already enabled\n",
2744 + p->groups[group].name);
2745 +- return -EBUSY;
2746 ++ return 0;
2747 + }
2748 +
2749 + p->groups[group].enabled = 1;
2750 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
2751 +index 61f1c91c62de2..3390168ac0793 100644
2752 +--- a/drivers/platform/x86/thinkpad_acpi.c
2753 ++++ b/drivers/platform/x86/thinkpad_acpi.c
2754 +@@ -8808,6 +8808,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
2755 + TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
2756 + TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
2757 + TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
2758 ++ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
2759 + };
2760 +
2761 + static int __init fan_init(struct ibm_init_struct *iibm)
2762 +diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
2763 +index 03a246e60fd98..21c4c34c52d8d 100644
2764 +--- a/drivers/ptp/ptp_clock.c
2765 ++++ b/drivers/ptp/ptp_clock.c
2766 +@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
2767 + spin_unlock_irqrestore(&queue->lock, flags);
2768 + }
2769 +
2770 +-s32 scaled_ppm_to_ppb(long ppm)
2771 ++long scaled_ppm_to_ppb(long ppm)
2772 + {
2773 + /*
2774 + * The 'freq' field in the 'struct timex' is in parts per
2775 +@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
2776 + s64 ppb = 1 + ppm;
2777 + ppb *= 125;
2778 + ppb >>= 13;
2779 +- return (s32) ppb;
2780 ++ return (long) ppb;
2781 + }
2782 + EXPORT_SYMBOL(scaled_ppm_to_ppb);
2783 +
2784 +@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
2785 + delta = ktime_to_ns(kt);
2786 + err = ops->adjtime(ops, delta);
2787 + } else if (tx->modes & ADJ_FREQUENCY) {
2788 +- s32 ppb = scaled_ppm_to_ppb(tx->freq);
2789 ++ long ppb = scaled_ppm_to_ppb(tx->freq);
2790 + if (ppb > ops->max_adj || ppb < -ops->max_adj)
2791 + return -ERANGE;
2792 + if (ops->adjfine)
2793 +diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
2794 +index eb3fc1db4edc8..c4754f3cf2337 100644
2795 +--- a/drivers/regulator/cros-ec-regulator.c
2796 ++++ b/drivers/regulator/cros-ec-regulator.c
2797 +@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
2798 +
2799 + drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
2800 + if (IS_ERR(drvdata->dev)) {
2801 ++ ret = PTR_ERR(drvdata->dev);
2802 + dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
2803 +- return PTR_ERR(drvdata->dev);
2804 ++ return ret;
2805 + }
2806 +
2807 + platform_set_drvdata(pdev, drvdata);
2808 +diff --git a/drivers/regulator/mt6315-regulator.c b/drivers/regulator/mt6315-regulator.c
2809 +index 9edc34981ee0a..6b8be52c3772a 100644
2810 +--- a/drivers/regulator/mt6315-regulator.c
2811 ++++ b/drivers/regulator/mt6315-regulator.c
2812 +@@ -59,7 +59,7 @@ static const struct linear_range mt_volt_range1[] = {
2813 + REGULATOR_LINEAR_RANGE(0, 0, 0xbf, 6250),
2814 + };
2815 +
2816 +-static unsigned int mt6315_map_mode(u32 mode)
2817 ++static unsigned int mt6315_map_mode(unsigned int mode)
2818 + {
2819 + switch (mode) {
2820 + case MT6315_BUCK_MODE_AUTO:
2821 +diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
2822 +index 2055a9cb13ba5..7a87788d3f092 100644
2823 +--- a/drivers/regulator/rt4801-regulator.c
2824 ++++ b/drivers/regulator/rt4801-regulator.c
2825 +@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
2826 + struct gpio_descs *gpios = priv->enable_gpios;
2827 + int id = rdev_get_id(rdev), ret;
2828 +
2829 +- if (gpios->ndescs <= id) {
2830 ++ if (!gpios || gpios->ndescs <= id) {
2831 + dev_warn(&rdev->dev, "no dedicated gpio can control\n");
2832 + goto bypass_gpio;
2833 + }
2834 +@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
2835 + struct gpio_descs *gpios = priv->enable_gpios;
2836 + int id = rdev_get_id(rdev);
2837 +
2838 +- if (gpios->ndescs <= id) {
2839 ++ if (!gpios || gpios->ndescs <= id) {
2840 + dev_warn(&rdev->dev, "no dedicated gpio can control\n");
2841 + goto bypass_gpio;
2842 + }
2843 +diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
2844 +index 5adc552dffd58..4bca64de0f672 100644
2845 +--- a/drivers/regulator/rtmv20-regulator.c
2846 ++++ b/drivers/regulator/rtmv20-regulator.c
2847 +@@ -27,6 +27,7 @@
2848 + #define RTMV20_REG_LDIRQ 0x30
2849 + #define RTMV20_REG_LDSTAT 0x40
2850 + #define RTMV20_REG_LDMASK 0x50
2851 ++#define RTMV20_MAX_REGS (RTMV20_REG_LDMASK + 1)
2852 +
2853 + #define RTMV20_VID_MASK GENMASK(7, 4)
2854 + #define RICHTEK_VID 0x80
2855 +@@ -313,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
2856 + .val_bits = 8,
2857 + .cache_type = REGCACHE_RBTREE,
2858 + .max_register = RTMV20_REG_LDMASK,
2859 ++ .num_reg_defaults_raw = RTMV20_MAX_REGS,
2860 +
2861 + .writeable_reg = rtmv20_is_accessible_reg,
2862 + .readable_reg = rtmv20_is_accessible_reg,
2863 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
2864 +index ecefc25eff0c0..337353c9655ed 100644
2865 +--- a/drivers/s390/crypto/ap_queue.c
2866 ++++ b/drivers/s390/crypto/ap_queue.c
2867 +@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
2868 + {
2869 + struct ap_queue_status status;
2870 + struct ap_message *ap_msg;
2871 ++ bool found = false;
2872 +
2873 + status = ap_dqap(aq->qid, &aq->reply->psmid,
2874 + aq->reply->msg, aq->reply->len);
2875 + switch (status.response_code) {
2876 + case AP_RESPONSE_NORMAL:
2877 +- aq->queue_count--;
2878 ++ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
2879 + if (aq->queue_count > 0)
2880 + mod_timer(&aq->timeout,
2881 + jiffies + aq->request_timeout);
2882 +@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
2883 + list_del_init(&ap_msg->list);
2884 + aq->pendingq_count--;
2885 + ap_msg->receive(aq, ap_msg, aq->reply);
2886 ++ found = true;
2887 + break;
2888 + }
2889 ++ if (!found) {
2890 ++ AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
2891 ++ __func__, aq->reply->psmid,
2892 ++ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
2893 ++ }
2894 + fallthrough;
2895 + case AP_RESPONSE_NO_PENDING_REPLY:
2896 + if (!status.queue_empty || aq->queue_count <= 0)
2897 +@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
2898 + ap_msg->flags & AP_MSG_FLAG_SPECIAL);
2899 + switch (status.response_code) {
2900 + case AP_RESPONSE_NORMAL:
2901 +- aq->queue_count++;
2902 ++ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
2903 + if (aq->queue_count == 1)
2904 + mod_timer(&aq->timeout, jiffies + aq->request_timeout);
2905 + list_move_tail(&ap_msg->list, &aq->pendingq);
2906 +diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
2907 +index 2786470a52011..4f24f63922126 100644
2908 +--- a/drivers/spi/spi-stm32-qspi.c
2909 ++++ b/drivers/spi/spi-stm32-qspi.c
2910 +@@ -293,7 +293,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
2911 + int err = 0;
2912 +
2913 + if (!op->data.nbytes)
2914 +- return stm32_qspi_wait_nobusy(qspi);
2915 ++ goto wait_nobusy;
2916 +
2917 + if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
2918 + goto out;
2919 +@@ -314,6 +314,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
2920 + out:
2921 + /* clear flags */
2922 + writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
2923 ++wait_nobusy:
2924 ++ if (!err)
2925 ++ err = stm32_qspi_wait_nobusy(qspi);
2926 +
2927 + return err;
2928 + }
2929 +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
2930 +index 2765289028fae..68193db8b2e3c 100644
2931 +--- a/drivers/spi/spi-zynq-qspi.c
2932 ++++ b/drivers/spi/spi-zynq-qspi.c
2933 +@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
2934 + xqspi->irq = platform_get_irq(pdev, 0);
2935 + if (xqspi->irq <= 0) {
2936 + ret = -ENXIO;
2937 +- goto remove_master;
2938 ++ goto clk_dis_all;
2939 + }
2940 + ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
2941 + 0, pdev->name, xqspi);
2942 + if (ret != 0) {
2943 + ret = -ENXIO;
2944 + dev_err(&pdev->dev, "request_irq failed\n");
2945 +- goto remove_master;
2946 ++ goto clk_dis_all;
2947 + }
2948 +
2949 + ret = of_property_read_u32(np, "num-cs",
2950 +@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
2951 + if (ret < 0) {
2952 + ctlr->num_chipselect = 1;
2953 + } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
2954 ++ ret = -EINVAL;
2955 + dev_err(&pdev->dev, "only 2 chip selects are available\n");
2956 +- goto remove_master;
2957 ++ goto clk_dis_all;
2958 + } else {
2959 + ctlr->num_chipselect = num_cs;
2960 + }
2961 +diff --git a/drivers/staging/hikey9xx/hi6421v600-regulator.c b/drivers/staging/hikey9xx/hi6421v600-regulator.c
2962 +index f6a14e9c3cbfe..e10fe3058176d 100644
2963 +--- a/drivers/staging/hikey9xx/hi6421v600-regulator.c
2964 ++++ b/drivers/staging/hikey9xx/hi6421v600-regulator.c
2965 +@@ -83,7 +83,7 @@ static const unsigned int ldo34_voltages[] = {
2966 + .owner = THIS_MODULE, \
2967 + .volt_table = vtable, \
2968 + .n_voltages = ARRAY_SIZE(vtable), \
2969 +- .vsel_mask = (1 << (ARRAY_SIZE(vtable) - 1)) - 1, \
2970 ++ .vsel_mask = ARRAY_SIZE(vtable) - 1, \
2971 + .vsel_reg = vreg, \
2972 + .enable_reg = ereg, \
2973 + .enable_mask = emask, \
2974 +diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
2975 +index cbec65e5a4645..62ea47f9fee5e 100644
2976 +--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
2977 ++++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
2978 +@@ -2579,7 +2579,7 @@ static int rtw_cfg80211_add_monitor_if(struct adapter *padapter, char *name, str
2979 + mon_wdev->iftype = NL80211_IFTYPE_MONITOR;
2980 + mon_ndev->ieee80211_ptr = mon_wdev;
2981 +
2982 +- ret = register_netdevice(mon_ndev);
2983 ++ ret = cfg80211_register_netdevice(mon_ndev);
2984 + if (ret) {
2985 + goto out;
2986 + }
2987 +@@ -2661,7 +2661,7 @@ static int cfg80211_rtw_del_virtual_intf(struct wiphy *wiphy,
2988 + adapter = rtw_netdev_priv(ndev);
2989 + pwdev_priv = adapter_wdev_data(adapter);
2990 +
2991 +- unregister_netdevice(ndev);
2992 ++ cfg80211_unregister_netdevice(ndev);
2993 +
2994 + if (ndev == pwdev_priv->pmon_ndev) {
2995 + pwdev_priv->pmon_ndev = NULL;
2996 +diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
2997 +index 4545b23bda3f1..bac0f5458cab9 100644
2998 +--- a/drivers/usb/chipidea/usbmisc_imx.c
2999 ++++ b/drivers/usb/chipidea/usbmisc_imx.c
3000 +@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
3001 + int val;
3002 + unsigned long flags;
3003 +
3004 ++ /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
3005 ++ spin_lock_irqsave(&usbmisc->lock, flags);
3006 ++ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
3007 ++ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
3008 ++ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
3009 ++ spin_unlock_irqrestore(&usbmisc->lock, flags);
3010 ++
3011 ++ /* TVDMSRC_DIS */
3012 ++ msleep(20);
3013 ++
3014 + /* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
3015 + spin_lock_irqsave(&usbmisc->lock, flags);
3016 + val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
3017 +@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
3018 + usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
3019 + spin_unlock_irqrestore(&usbmisc->lock, flags);
3020 +
3021 +- usleep_range(1000, 2000);
3022 ++ /* TVDMSRC_ON */
3023 ++ msleep(40);
3024 +
3025 + /*
3026 + * Per BC 1.2, check voltage of D+:
3027 +@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
3028 + usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
3029 + spin_unlock_irqrestore(&usbmisc->lock, flags);
3030 +
3031 +- usleep_range(1000, 2000);
3032 ++ /* TVDPSRC_ON */
3033 ++ msleep(40);
3034 +
3035 + /* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
3036 + val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
3037 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
3038 +index 13fe37fbbd2c8..6ebb8bd92e9df 100644
3039 +--- a/drivers/usb/core/hub.c
3040 ++++ b/drivers/usb/core/hub.c
3041 +@@ -40,6 +40,8 @@
3042 + #define USB_VENDOR_GENESYS_LOGIC 0x05e3
3043 + #define USB_VENDOR_SMSC 0x0424
3044 + #define USB_PRODUCT_USB5534B 0x5534
3045 ++#define USB_VENDOR_CYPRESS 0x04b4
3046 ++#define USB_PRODUCT_CY7C65632 0x6570
3047 + #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
3048 + #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
3049 +
3050 +@@ -5644,6 +5646,11 @@ static const struct usb_device_id hub_id_table[] = {
3051 + .idProduct = USB_PRODUCT_USB5534B,
3052 + .bInterfaceClass = USB_CLASS_HUB,
3053 + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
3054 ++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
3055 ++ | USB_DEVICE_ID_MATCH_PRODUCT,
3056 ++ .idVendor = USB_VENDOR_CYPRESS,
3057 ++ .idProduct = USB_PRODUCT_CY7C65632,
3058 ++ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
3059 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
3060 + | USB_DEVICE_ID_MATCH_INT_CLASS,
3061 + .idVendor = USB_VENDOR_GENESYS_LOGIC,
3062 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
3063 +index 126f0e10b3ef4..0022039bc2355 100644
3064 +--- a/drivers/usb/dwc3/core.c
3065 ++++ b/drivers/usb/dwc3/core.c
3066 +@@ -1657,8 +1657,8 @@ static int dwc3_remove(struct platform_device *pdev)
3067 +
3068 + pm_runtime_get_sync(&pdev->dev);
3069 +
3070 +- dwc3_debugfs_exit(dwc);
3071 + dwc3_core_exit_mode(dwc);
3072 ++ dwc3_debugfs_exit(dwc);
3073 +
3074 + dwc3_core_exit(dwc);
3075 + dwc3_ulpi_exit(dwc);
3076 +diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
3077 +index 8ab3949423604..74d9c2c38193d 100644
3078 +--- a/drivers/usb/dwc3/debug.h
3079 ++++ b/drivers/usb/dwc3/debug.h
3080 +@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
3081 +
3082 +
3083 + #ifdef CONFIG_DEBUG_FS
3084 ++extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
3085 + extern void dwc3_debugfs_init(struct dwc3 *d);
3086 + extern void dwc3_debugfs_exit(struct dwc3 *d);
3087 + #else
3088 ++static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
3089 ++{ }
3090 + static inline void dwc3_debugfs_init(struct dwc3 *d)
3091 + { }
3092 + static inline void dwc3_debugfs_exit(struct dwc3 *d)
3093 +diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
3094 +index 5da4f6082d930..3ebe3e6c284d2 100644
3095 +--- a/drivers/usb/dwc3/debugfs.c
3096 ++++ b/drivers/usb/dwc3/debugfs.c
3097 +@@ -890,30 +890,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
3098 + }
3099 + }
3100 +
3101 +-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
3102 +- struct dentry *parent)
3103 ++void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
3104 + {
3105 + struct dentry *dir;
3106 +
3107 +- dir = debugfs_create_dir(dep->name, parent);
3108 ++ dir = debugfs_create_dir(dep->name, dep->dwc->root);
3109 + dwc3_debugfs_create_endpoint_files(dep, dir);
3110 + }
3111 +
3112 +-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
3113 +- struct dentry *parent)
3114 +-{
3115 +- int i;
3116 +-
3117 +- for (i = 0; i < dwc->num_eps; i++) {
3118 +- struct dwc3_ep *dep = dwc->eps[i];
3119 +-
3120 +- if (!dep)
3121 +- continue;
3122 +-
3123 +- dwc3_debugfs_create_endpoint_dir(dep, parent);
3124 +- }
3125 +-}
3126 +-
3127 + void dwc3_debugfs_init(struct dwc3 *dwc)
3128 + {
3129 + struct dentry *root;
3130 +@@ -944,7 +928,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
3131 + &dwc3_testmode_fops);
3132 + debugfs_create_file("link_state", 0644, root, dwc,
3133 + &dwc3_link_state_fops);
3134 +- dwc3_debugfs_create_endpoint_dirs(dwc, root);
3135 + }
3136 + }
3137 +
3138 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3139 +index 1f9454e0d447b..755ab6fc0791f 100644
3140 +--- a/drivers/usb/dwc3/gadget.c
3141 ++++ b/drivers/usb/dwc3/gadget.c
3142 +@@ -2719,6 +2719,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
3143 + INIT_LIST_HEAD(&dep->started_list);
3144 + INIT_LIST_HEAD(&dep->cancelled_list);
3145 +
3146 ++ dwc3_debugfs_create_endpoint_dir(dep);
3147 ++
3148 + return 0;
3149 + }
3150 +
3151 +@@ -2762,6 +2764,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
3152 + list_del(&dep->endpoint.ep_list);
3153 + }
3154 +
3155 ++ debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
3156 + kfree(dep);
3157 + }
3158 + }
3159 +diff --git a/fs/afs/main.c b/fs/afs/main.c
3160 +index b2975256dadbd..179004b15566d 100644
3161 +--- a/fs/afs/main.c
3162 ++++ b/fs/afs/main.c
3163 +@@ -203,8 +203,8 @@ static int __init afs_init(void)
3164 + goto error_fs;
3165 +
3166 + afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
3167 +- if (IS_ERR(afs_proc_symlink)) {
3168 +- ret = PTR_ERR(afs_proc_symlink);
3169 ++ if (!afs_proc_symlink) {
3170 ++ ret = -ENOMEM;
3171 + goto error_proc;
3172 + }
3173 +
3174 +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
3175 +index a7d9e147dee62..595fd083c4ad1 100644
3176 +--- a/fs/btrfs/block-group.c
3177 ++++ b/fs/btrfs/block-group.c
3178 +@@ -2347,16 +2347,16 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
3179 + spin_lock(&sinfo->lock);
3180 + spin_lock(&cache->lock);
3181 + if (!--cache->ro) {
3182 +- num_bytes = cache->length - cache->reserved -
3183 +- cache->pinned - cache->bytes_super -
3184 +- cache->zone_unusable - cache->used;
3185 +- sinfo->bytes_readonly -= num_bytes;
3186 + if (btrfs_is_zoned(cache->fs_info)) {
3187 + /* Migrate zone_unusable bytes back */
3188 + cache->zone_unusable = cache->alloc_offset - cache->used;
3189 + sinfo->bytes_zone_unusable += cache->zone_unusable;
3190 + sinfo->bytes_readonly -= cache->zone_unusable;
3191 + }
3192 ++ num_bytes = cache->length - cache->reserved -
3193 ++ cache->pinned - cache->bytes_super -
3194 ++ cache->zone_unusable - cache->used;
3195 ++ sinfo->bytes_readonly -= num_bytes;
3196 + list_del_init(&cache->ro_list);
3197 + }
3198 + spin_unlock(&cache->lock);
3199 +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
3200 +index c63d0a7f7ba4f..527c972b562dd 100644
3201 +--- a/fs/hugetlbfs/inode.c
3202 ++++ b/fs/hugetlbfs/inode.c
3203 +@@ -738,6 +738,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
3204 + __SetPageUptodate(page);
3205 + error = huge_add_to_page_cache(page, mapping, index);
3206 + if (unlikely(error)) {
3207 ++ restore_reserve_on_error(h, &pseudo_vma, addr, page);
3208 + put_page(page);
3209 + mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3210 + goto out;
3211 +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
3212 +index 9e0c1afac8bdf..c175523b0a2c1 100644
3213 +--- a/fs/notify/fanotify/fanotify_user.c
3214 ++++ b/fs/notify/fanotify/fanotify_user.c
3215 +@@ -378,7 +378,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
3216 + info_type, fanotify_info_name(info),
3217 + info->name_len, buf, count);
3218 + if (ret < 0)
3219 +- return ret;
3220 ++ goto out_close_fd;
3221 +
3222 + buf += ret;
3223 + count -= ret;
3224 +@@ -426,7 +426,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
3225 + fanotify_event_object_fh(event),
3226 + info_type, dot, dot_len, buf, count);
3227 + if (ret < 0)
3228 +- return ret;
3229 ++ goto out_close_fd;
3230 +
3231 + buf += ret;
3232 + count -= ret;
3233 +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
3234 +index cccd1aab69dd1..5dae4187210d9 100644
3235 +--- a/include/linux/hugetlb.h
3236 ++++ b/include/linux/hugetlb.h
3237 +@@ -145,6 +145,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
3238 + long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
3239 + long freed);
3240 + bool isolate_huge_page(struct page *page, struct list_head *list);
3241 ++int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
3242 + void putback_active_hugepage(struct page *page);
3243 + void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
3244 + void free_huge_page(struct page *page);
3245 +@@ -330,6 +331,11 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
3246 + return false;
3247 + }
3248 +
3249 ++static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
3250 ++{
3251 ++ return 0;
3252 ++}
3253 ++
3254 + static inline void putback_active_hugepage(struct page *page)
3255 + {
3256 + }
3257 +@@ -591,6 +597,8 @@ struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
3258 + unsigned long address);
3259 + int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3260 + pgoff_t idx);
3261 ++void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
3262 ++ unsigned long address, struct page *page);
3263 +
3264 + /* arch callback */
3265 + int __init __alloc_bootmem_huge_page(struct hstate *h);
3266 +diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
3267 +index a57af878fd0cd..4a5966475a35a 100644
3268 +--- a/include/linux/mfd/rohm-bd70528.h
3269 ++++ b/include/linux/mfd/rohm-bd70528.h
3270 +@@ -26,9 +26,7 @@ struct bd70528_data {
3271 + struct mutex rtc_timer_lock;
3272 + };
3273 +
3274 +-#define BD70528_BUCK_VOLTS 17
3275 +-#define BD70528_BUCK_VOLTS 17
3276 +-#define BD70528_BUCK_VOLTS 17
3277 ++#define BD70528_BUCK_VOLTS 0x10
3278 + #define BD70528_LDO_VOLTS 0x20
3279 +
3280 + #define BD70528_REG_BUCK1_EN 0x0F
3281 +diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
3282 +index 133967c40214b..6a31bbba1b6f1 100644
3283 +--- a/include/linux/mlx5/driver.h
3284 ++++ b/include/linux/mlx5/driver.h
3285 +@@ -541,6 +541,10 @@ struct mlx5_core_roce {
3286 + enum {
3287 + MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
3288 + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
3289 ++ /* Set during device detach to block any further devices
3290 ++ * creation/deletion on drivers rescan. Unset during device attach.
3291 ++ */
3292 ++ MLX5_PRIV_FLAGS_DETACH = 1 << 2,
3293 + };
3294 +
3295 + struct mlx5_adev {
3296 +diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
3297 +index 028f442530cf5..60ffeb6b67ae7 100644
3298 +--- a/include/linux/mlx5/transobj.h
3299 ++++ b/include/linux/mlx5/transobj.h
3300 +@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
3301 + struct mlx5_hairpin_params *params);
3302 +
3303 + void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
3304 ++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
3305 + #endif /* __TRANSOBJ_H__ */
3306 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
3307 +index 5aacc1c10a45a..8f0fb62e8975c 100644
3308 +--- a/include/linux/mm_types.h
3309 ++++ b/include/linux/mm_types.h
3310 +@@ -445,13 +445,6 @@ struct mm_struct {
3311 + */
3312 + atomic_t has_pinned;
3313 +
3314 +- /**
3315 +- * @write_protect_seq: Locked when any thread is write
3316 +- * protecting pages mapped by this mm to enforce a later COW,
3317 +- * for instance during page table copying for fork().
3318 +- */
3319 +- seqcount_t write_protect_seq;
3320 +-
3321 + #ifdef CONFIG_MMU
3322 + atomic_long_t pgtables_bytes; /* PTE page table pages */
3323 + #endif
3324 +@@ -460,6 +453,18 @@ struct mm_struct {
3325 + spinlock_t page_table_lock; /* Protects page tables and some
3326 + * counters
3327 + */
3328 ++ /*
3329 ++ * With some kernel config, the current mmap_lock's offset
3330 ++ * inside 'mm_struct' is at 0x120, which is very optimal, as
3331 ++ * its two hot fields 'count' and 'owner' sit in 2 different
3332 ++ * cachelines, and when mmap_lock is highly contended, both
3333 ++ * of the 2 fields will be accessed frequently, current layout
3334 ++ * will help to reduce cache bouncing.
3335 ++ *
3336 ++ * So please be careful with adding new fields before
3337 ++ * mmap_lock, which can easily push the 2 fields into one
3338 ++ * cacheline.
3339 ++ */
3340 + struct rw_semaphore mmap_lock;
3341 +
3342 + struct list_head mmlist; /* List of maybe swapped mm's. These
3343 +@@ -480,7 +485,15 @@ struct mm_struct {
3344 + unsigned long stack_vm; /* VM_STACK */
3345 + unsigned long def_flags;
3346 +
3347 ++ /**
3348 ++ * @write_protect_seq: Locked when any thread is write
3349 ++ * protecting pages mapped by this mm to enforce a later COW,
3350 ++ * for instance during page table copying for fork().
3351 ++ */
3352 ++ seqcount_t write_protect_seq;
3353 ++
3354 + spinlock_t arg_lock; /* protect the below fields */
3355 ++
3356 + unsigned long start_code, end_code, start_data, end_data;
3357 + unsigned long start_brk, brk, start_stack;
3358 + unsigned long arg_start, arg_end, env_start, env_end;
3359 +diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
3360 +index 0d47fd33b2285..51d7f1b8b32aa 100644
3361 +--- a/include/linux/ptp_clock_kernel.h
3362 ++++ b/include/linux/ptp_clock_kernel.h
3363 +@@ -235,7 +235,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
3364 + * @ppm: Parts per million, but with a 16 bit binary fractional field
3365 + */
3366 +
3367 +-extern s32 scaled_ppm_to_ppb(long ppm);
3368 ++extern long scaled_ppm_to_ppb(long ppm);
3369 +
3370 + /**
3371 + * ptp_find_pin() - obtain the pin index of a given auxiliary function
3372 +diff --git a/include/linux/socket.h b/include/linux/socket.h
3373 +index 385894b4a8bba..42222a84167f3 100644
3374 +--- a/include/linux/socket.h
3375 ++++ b/include/linux/socket.h
3376 +@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
3377 + int __user *usockvec);
3378 + extern int __sys_shutdown_sock(struct socket *sock, int how);
3379 + extern int __sys_shutdown(int fd, int how);
3380 +-
3381 +-extern struct ns_common *get_net_ns(struct ns_common *ns);
3382 + #endif /* _LINUX_SOCKET_H */
3383 +diff --git a/include/linux/swapops.h b/include/linux/swapops.h
3384 +index d9b7c9132c2f6..6430a94c69818 100644
3385 +--- a/include/linux/swapops.h
3386 ++++ b/include/linux/swapops.h
3387 +@@ -23,6 +23,16 @@
3388 + #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
3389 + #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
3390 +
3391 ++/* Clear all flags but only keep swp_entry_t related information */
3392 ++static inline pte_t pte_swp_clear_flags(pte_t pte)
3393 ++{
3394 ++ if (pte_swp_soft_dirty(pte))
3395 ++ pte = pte_swp_clear_soft_dirty(pte);
3396 ++ if (pte_swp_uffd_wp(pte))
3397 ++ pte = pte_swp_clear_uffd_wp(pte);
3398 ++ return pte;
3399 ++}
3400 ++
3401 + /*
3402 + * Store a type+offset into a swp_entry_t in an arch-independent format
3403 + */
3404 +@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
3405 + {
3406 + swp_entry_t arch_entry;
3407 +
3408 +- if (pte_swp_soft_dirty(pte))
3409 +- pte = pte_swp_clear_soft_dirty(pte);
3410 +- if (pte_swp_uffd_wp(pte))
3411 +- pte = pte_swp_clear_uffd_wp(pte);
3412 ++ pte = pte_swp_clear_flags(pte);
3413 + arch_entry = __pte_to_swp_entry(pte);
3414 + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
3415 + }
3416 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3417 +index 2d1d629e5d14b..a5ca18cfdb6fb 100644
3418 +--- a/include/net/mac80211.h
3419 ++++ b/include/net/mac80211.h
3420 +@@ -6388,7 +6388,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
3421 +
3422 + /**
3423 + * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
3424 +- * of injected frames
3425 ++ * of injected frames.
3426 ++ *
3427 ++ * To accurately parse and take into account rate and retransmission fields,
3428 ++ * you must initialize the chandef field in the ieee80211_tx_info structure
3429 ++ * of the skb before calling this function.
3430 ++ *
3431 + * @skb: packet injected by userspace
3432 + * @dev: the &struct device of this 802.11 device
3433 + */
3434 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
3435 +index dcaee24a4d877..14b6f7f445322 100644
3436 +--- a/include/net/net_namespace.h
3437 ++++ b/include/net/net_namespace.h
3438 +@@ -197,6 +197,8 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
3439 + void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
3440 +
3441 + void net_ns_barrier(void);
3442 ++
3443 ++struct ns_common *get_net_ns(struct ns_common *ns);
3444 + #else /* CONFIG_NET_NS */
3445 + #include <linux/sched.h>
3446 + #include <linux/nsproxy.h>
3447 +@@ -216,6 +218,11 @@ static inline void net_ns_get_ownership(const struct net *net,
3448 + }
3449 +
3450 + static inline void net_ns_barrier(void) {}
3451 ++
3452 ++static inline struct ns_common *get_net_ns(struct ns_common *ns)
3453 ++{
3454 ++ return ERR_PTR(-EINVAL);
3455 ++}
3456 + #endif /* CONFIG_NET_NS */
3457 +
3458 +
3459 +diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
3460 +index 7d6687618d808..d1b327036ae43 100644
3461 +--- a/include/uapi/linux/in.h
3462 ++++ b/include/uapi/linux/in.h
3463 +@@ -289,6 +289,9 @@ struct sockaddr_in {
3464 + /* Address indicating an error return. */
3465 + #define INADDR_NONE ((unsigned long int) 0xffffffff)
3466 +
3467 ++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
3468 ++#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
3469 ++
3470 + /* Network number for local host loopback. */
3471 + #define IN_LOOPBACKNET 127
3472 +
3473 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3474 +index 9e600767803b5..2423b4e918b90 100644
3475 +--- a/kernel/bpf/verifier.c
3476 ++++ b/kernel/bpf/verifier.c
3477 +@@ -5937,6 +5937,27 @@ struct bpf_sanitize_info {
3478 + bool mask_to_left;
3479 + };
3480 +
3481 ++static struct bpf_verifier_state *
3482 ++sanitize_speculative_path(struct bpf_verifier_env *env,
3483 ++ const struct bpf_insn *insn,
3484 ++ u32 next_idx, u32 curr_idx)
3485 ++{
3486 ++ struct bpf_verifier_state *branch;
3487 ++ struct bpf_reg_state *regs;
3488 ++
3489 ++ branch = push_stack(env, next_idx, curr_idx, true);
3490 ++ if (branch && insn) {
3491 ++ regs = branch->frame[branch->curframe]->regs;
3492 ++ if (BPF_SRC(insn->code) == BPF_K) {
3493 ++ mark_reg_unknown(env, regs, insn->dst_reg);
3494 ++ } else if (BPF_SRC(insn->code) == BPF_X) {
3495 ++ mark_reg_unknown(env, regs, insn->dst_reg);
3496 ++ mark_reg_unknown(env, regs, insn->src_reg);
3497 ++ }
3498 ++ }
3499 ++ return branch;
3500 ++}
3501 ++
3502 + static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3503 + struct bpf_insn *insn,
3504 + const struct bpf_reg_state *ptr_reg,
3505 +@@ -6020,12 +6041,26 @@ do_sim:
3506 + tmp = *dst_reg;
3507 + *dst_reg = *ptr_reg;
3508 + }
3509 +- ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3510 ++ ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
3511 ++ env->insn_idx);
3512 + if (!ptr_is_dst_reg && ret)
3513 + *dst_reg = tmp;
3514 + return !ret ? REASON_STACK : 0;
3515 + }
3516 +
3517 ++static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
3518 ++{
3519 ++ struct bpf_verifier_state *vstate = env->cur_state;
3520 ++
3521 ++ /* If we simulate paths under speculation, we don't update the
3522 ++ * insn as 'seen' such that when we verify unreachable paths in
3523 ++ * the non-speculative domain, sanitize_dead_code() can still
3524 ++ * rewrite/sanitize them.
3525 ++ */
3526 ++ if (!vstate->speculative)
3527 ++ env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
3528 ++}
3529 ++
3530 + static int sanitize_err(struct bpf_verifier_env *env,
3531 + const struct bpf_insn *insn, int reason,
3532 + const struct bpf_reg_state *off_reg,
3533 +@@ -8204,14 +8239,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
3534 + if (err)
3535 + return err;
3536 + }
3537 ++
3538 + if (pred == 1) {
3539 +- /* only follow the goto, ignore fall-through */
3540 ++ /* Only follow the goto, ignore fall-through. If needed, push
3541 ++ * the fall-through branch for simulation under speculative
3542 ++ * execution.
3543 ++ */
3544 ++ if (!env->bypass_spec_v1 &&
3545 ++ !sanitize_speculative_path(env, insn, *insn_idx + 1,
3546 ++ *insn_idx))
3547 ++ return -EFAULT;
3548 + *insn_idx += insn->off;
3549 + return 0;
3550 + } else if (pred == 0) {
3551 +- /* only follow fall-through branch, since
3552 +- * that's where the program will go
3553 ++ /* Only follow the fall-through branch, since that's where the
3554 ++ * program will go. If needed, push the goto branch for
3555 ++ * simulation under speculative execution.
3556 + */
3557 ++ if (!env->bypass_spec_v1 &&
3558 ++ !sanitize_speculative_path(env, insn,
3559 ++ *insn_idx + insn->off + 1,
3560 ++ *insn_idx))
3561 ++ return -EFAULT;
3562 + return 0;
3563 + }
3564 +
3565 +@@ -10060,7 +10109,7 @@ static int do_check(struct bpf_verifier_env *env)
3566 + }
3567 +
3568 + regs = cur_regs(env);
3569 +- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
3570 ++ sanitize_mark_insn_seen(env);
3571 + prev_insn_idx = env->insn_idx;
3572 +
3573 + if (class == BPF_ALU || class == BPF_ALU64) {
3574 +@@ -10285,7 +10334,7 @@ process_bpf_exit:
3575 + return err;
3576 +
3577 + env->insn_idx++;
3578 +- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
3579 ++ sanitize_mark_insn_seen(env);
3580 + } else {
3581 + verbose(env, "invalid BPF_LD mode\n");
3582 + return -EINVAL;
3583 +@@ -10784,6 +10833,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3584 + {
3585 + struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
3586 + struct bpf_insn *insn = new_prog->insnsi;
3587 ++ u32 old_seen = old_data[off].seen;
3588 + u32 prog_len;
3589 + int i;
3590 +
3591 +@@ -10804,7 +10854,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3592 + memcpy(new_data + off + cnt - 1, old_data + off,
3593 + sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
3594 + for (i = off; i < off + cnt - 1; i++) {
3595 +- new_data[i].seen = env->pass_cnt;
3596 ++ /* Expand insni[off]'s seen count to the patched range. */
3597 ++ new_data[i].seen = old_seen;
3598 + new_data[i].zext_dst = insn_has_def32(env, insn + i);
3599 + }
3600 + env->insn_aux_data = new_data;
3601 +@@ -12060,6 +12111,9 @@ static void free_states(struct bpf_verifier_env *env)
3602 + * insn_aux_data was touched. These variables are compared to clear temporary
3603 + * data from failed pass. For testing and experiments do_check_common() can be
3604 + * run multiple times even when prior attempt to verify is unsuccessful.
3605 ++ *
3606 ++ * Note that special handling is needed on !env->bypass_spec_v1 if this is
3607 ++ * ever called outside of error path with subsequent program rejection.
3608 + */
3609 + static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
3610 + {
3611 +diff --git a/kernel/crash_core.c b/kernel/crash_core.c
3612 +index 825284baaf466..684a6061a13a4 100644
3613 +--- a/kernel/crash_core.c
3614 ++++ b/kernel/crash_core.c
3615 +@@ -464,6 +464,7 @@ static int __init crash_save_vmcoreinfo_init(void)
3616 + VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
3617 + VMCOREINFO_STRUCT_SIZE(mem_section);
3618 + VMCOREINFO_OFFSET(mem_section, section_mem_map);
3619 ++ VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
3620 + VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
3621 + #endif
3622 + VMCOREINFO_STRUCT_SIZE(page);
3623 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3624 +index 487312a5ceabb..47fcc3fe9dc5a 100644
3625 +--- a/kernel/sched/fair.c
3626 ++++ b/kernel/sched/fair.c
3627 +@@ -3760,11 +3760,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3628 + */
3629 + static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3630 + {
3631 ++ /*
3632 ++ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3633 ++ * See ___update_load_avg() for details.
3634 ++ */
3635 ++ u32 divider = get_pelt_divider(&cfs_rq->avg);
3636 ++
3637 + dequeue_load_avg(cfs_rq, se);
3638 + sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3639 +- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3640 ++ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3641 + sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3642 +- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
3643 ++ cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3644 +
3645 + add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3646 +
3647 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3648 +index f2d4ee80feb34..7c8151d74faf0 100644
3649 +--- a/kernel/trace/trace.c
3650 ++++ b/kernel/trace/trace.c
3651 +@@ -2198,9 +2198,6 @@ struct saved_cmdlines_buffer {
3652 + };
3653 + static struct saved_cmdlines_buffer *savedcmd;
3654 +
3655 +-/* temporary disable recording */
3656 +-static atomic_t trace_record_taskinfo_disabled __read_mostly;
3657 +-
3658 + static inline char *get_saved_cmdlines(int idx)
3659 + {
3660 + return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
3661 +@@ -2486,8 +2483,6 @@ static bool tracing_record_taskinfo_skip(int flags)
3662 + {
3663 + if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
3664 + return true;
3665 +- if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
3666 +- return true;
3667 + if (!__this_cpu_read(trace_taskinfo_save))
3668 + return true;
3669 + return false;
3670 +@@ -3742,9 +3737,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3671 + return ERR_PTR(-EBUSY);
3672 + #endif
3673 +
3674 +- if (!iter->snapshot)
3675 +- atomic_inc(&trace_record_taskinfo_disabled);
3676 +-
3677 + if (*pos != iter->pos) {
3678 + iter->ent = NULL;
3679 + iter->cpu = 0;
3680 +@@ -3787,9 +3779,6 @@ static void s_stop(struct seq_file *m, void *p)
3681 + return;
3682 + #endif
3683 +
3684 +- if (!iter->snapshot)
3685 +- atomic_dec(&trace_record_taskinfo_disabled);
3686 +-
3687 + trace_access_unlock(iter->cpu_file);
3688 + trace_event_read_unlock();
3689 + }
3690 +diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
3691 +index c1637f90c8a38..4702efb00ff21 100644
3692 +--- a/kernel/trace/trace_clock.c
3693 ++++ b/kernel/trace/trace_clock.c
3694 +@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
3695 + prev_time = READ_ONCE(trace_clock_struct.prev_time);
3696 + now = sched_clock_cpu(this_cpu);
3697 +
3698 +- /* Make sure that now is always greater than prev_time */
3699 ++ /* Make sure that now is always greater than or equal to prev_time */
3700 + if ((s64)(now - prev_time) < 0)
3701 +- now = prev_time + 1;
3702 ++ now = prev_time;
3703 +
3704 + /*
3705 + * If in an NMI context then dont risk lockups and simply return
3706 +@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
3707 + /* Reread prev_time in case it was already updated */
3708 + prev_time = READ_ONCE(trace_clock_struct.prev_time);
3709 + if ((s64)(now - prev_time) < 0)
3710 +- now = prev_time + 1;
3711 ++ now = prev_time;
3712 +
3713 + trace_clock_struct.prev_time = now;
3714 +
3715 +diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3716 +index ce63ec0187c55..3da4817190f3d 100644
3717 +--- a/mm/hugetlb.c
3718 ++++ b/mm/hugetlb.c
3719 +@@ -2127,12 +2127,18 @@ out:
3720 + * be restored when a newly allocated huge page must be freed. It is
3721 + * to be called after calling vma_needs_reservation to determine if a
3722 + * reservation exists.
3723 ++ *
3724 ++ * vma_del_reservation is used in error paths where an entry in the reserve
3725 ++ * map was created during huge page allocation and must be removed. It is to
3726 ++ * be called after calling vma_needs_reservation to determine if a reservation
3727 ++ * exists.
3728 + */
3729 + enum vma_resv_mode {
3730 + VMA_NEEDS_RESV,
3731 + VMA_COMMIT_RESV,
3732 + VMA_END_RESV,
3733 + VMA_ADD_RESV,
3734 ++ VMA_DEL_RESV,
3735 + };
3736 + static long __vma_reservation_common(struct hstate *h,
3737 + struct vm_area_struct *vma, unsigned long addr,
3738 +@@ -2176,11 +2182,21 @@ static long __vma_reservation_common(struct hstate *h,
3739 + ret = region_del(resv, idx, idx + 1);
3740 + }
3741 + break;
3742 ++ case VMA_DEL_RESV:
3743 ++ if (vma->vm_flags & VM_MAYSHARE) {
3744 ++ region_abort(resv, idx, idx + 1, 1);
3745 ++ ret = region_del(resv, idx, idx + 1);
3746 ++ } else {
3747 ++ ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
3748 ++ /* region_add calls of range 1 should never fail. */
3749 ++ VM_BUG_ON(ret < 0);
3750 ++ }
3751 ++ break;
3752 + default:
3753 + BUG();
3754 + }
3755 +
3756 +- if (vma->vm_flags & VM_MAYSHARE)
3757 ++ if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
3758 + return ret;
3759 + else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
3760 + /*
3761 +@@ -2229,25 +2245,39 @@ static long vma_add_reservation(struct hstate *h,
3762 + return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
3763 + }
3764 +
3765 ++static long vma_del_reservation(struct hstate *h,
3766 ++ struct vm_area_struct *vma, unsigned long addr)
3767 ++{
3768 ++ return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
3769 ++}
3770 ++
3771 + /*
3772 +- * This routine is called to restore a reservation on error paths. In the
3773 +- * specific error paths, a huge page was allocated (via alloc_huge_page)
3774 +- * and is about to be freed. If a reservation for the page existed,
3775 +- * alloc_huge_page would have consumed the reservation and set
3776 +- * HPageRestoreReserve in the newly allocated page. When the page is freed
3777 +- * via free_huge_page, the global reservation count will be incremented if
3778 +- * HPageRestoreReserve is set. However, free_huge_page can not adjust the
3779 +- * reserve map. Adjust the reserve map here to be consistent with global
3780 +- * reserve count adjustments to be made by free_huge_page.
3781 ++ * This routine is called to restore reservation information on error paths.
3782 ++ * It should ONLY be called for pages allocated via alloc_huge_page(), and
3783 ++ * the hugetlb mutex should remain held when calling this routine.
3784 ++ *
3785 ++ * It handles two specific cases:
3786 ++ * 1) A reservation was in place and the page consumed the reservation.
3787 ++ * HPageRestoreReserve is set in the page.
3788 ++ * 2) No reservation was in place for the page, so HPageRestoreReserve is
3789 ++ * not set. However, alloc_huge_page always updates the reserve map.
3790 ++ *
3791 ++ * In case 1, free_huge_page later in the error path will increment the
3792 ++ * global reserve count. But, free_huge_page does not have enough context
3793 ++ * to adjust the reservation map. This case deals primarily with private
3794 ++ * mappings. Adjust the reserve map here to be consistent with global
3795 ++ * reserve count adjustments to be made by free_huge_page. Make sure the
3796 ++ * reserve map indicates there is a reservation present.
3797 ++ *
3798 ++ * In case 2, simply undo reserve map modifications done by alloc_huge_page.
3799 + */
3800 +-static void restore_reserve_on_error(struct hstate *h,
3801 +- struct vm_area_struct *vma, unsigned long address,
3802 +- struct page *page)
3803 ++void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
3804 ++ unsigned long address, struct page *page)
3805 + {
3806 +- if (unlikely(HPageRestoreReserve(page))) {
3807 +- long rc = vma_needs_reservation(h, vma, address);
3808 ++ long rc = vma_needs_reservation(h, vma, address);
3809 +
3810 +- if (unlikely(rc < 0)) {
3811 ++ if (HPageRestoreReserve(page)) {
3812 ++ if (unlikely(rc < 0))
3813 + /*
3814 + * Rare out of memory condition in reserve map
3815 + * manipulation. Clear HPageRestoreReserve so that
3816 +@@ -2260,16 +2290,57 @@ static void restore_reserve_on_error(struct hstate *h,
3817 + * accounting of reserve counts.
3818 + */
3819 + ClearHPageRestoreReserve(page);
3820 +- } else if (rc) {
3821 +- rc = vma_add_reservation(h, vma, address);
3822 +- if (unlikely(rc < 0))
3823 ++ else if (rc)
3824 ++ (void)vma_add_reservation(h, vma, address);
3825 ++ else
3826 ++ vma_end_reservation(h, vma, address);
3827 ++ } else {
3828 ++ if (!rc) {
3829 ++ /*
3830 ++ * This indicates there is an entry in the reserve map
3831 ++ * added by alloc_huge_page. We know it was added
3832 ++ * before the alloc_huge_page call, otherwise
3833 ++ * HPageRestoreReserve would be set on the page.
3834 ++ * Remove the entry so that a subsequent allocation
3835 ++ * does not consume a reservation.
3836 ++ */
3837 ++ rc = vma_del_reservation(h, vma, address);
3838 ++ if (rc < 0)
3839 + /*
3840 +- * See above comment about rare out of
3841 +- * memory condition.
3842 ++ * VERY rare out of memory condition. Since
3843 ++ * we can not delete the entry, set
3844 ++ * HPageRestoreReserve so that the reserve
3845 ++ * count will be incremented when the page
3846 ++ * is freed. This reserve will be consumed
3847 ++ * on a subsequent allocation.
3848 + */
3849 +- ClearHPageRestoreReserve(page);
3850 ++ SetHPageRestoreReserve(page);
3851 ++ } else if (rc < 0) {
3852 ++ /*
3853 ++ * Rare out of memory condition from
3854 ++ * vma_needs_reservation call. Memory allocation is
3855 ++ * only attempted if a new entry is needed. Therefore,
3856 ++ * this implies there is not an entry in the
3857 ++ * reserve map.
3858 ++ *
3859 ++ * For shared mappings, no entry in the map indicates
3860 ++ * no reservation. We are done.
3861 ++ */
3862 ++ if (!(vma->vm_flags & VM_MAYSHARE))
3863 ++ /*
3864 ++ * For private mappings, no entry indicates
3865 ++ * a reservation is present. Since we can
3866 ++ * not add an entry, set SetHPageRestoreReserve
3867 ++ * on the page so reserve count will be
3868 ++ * incremented when freed. This reserve will
3869 ++ * be consumed on a subsequent allocation.
3870 ++ */
3871 ++ SetHPageRestoreReserve(page);
3872 + } else
3873 +- vma_end_reservation(h, vma, address);
3874 ++ /*
3875 ++ * No reservation present, do nothing
3876 ++ */
3877 ++ vma_end_reservation(h, vma, address);
3878 + }
3879 + }
3880 +
3881 +@@ -3886,6 +3957,8 @@ again:
3882 + spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3883 + entry = huge_ptep_get(src_pte);
3884 + if (!pte_same(src_pte_old, entry)) {
3885 ++ restore_reserve_on_error(h, vma, addr,
3886 ++ new);
3887 + put_page(new);
3888 + /* dst_entry won't change as in child */
3889 + goto again;
3890 +@@ -4820,6 +4893,7 @@ out_release_unlock:
3891 + if (vm_shared)
3892 + unlock_page(page);
3893 + out_release_nounlock:
3894 ++ restore_reserve_on_error(h, dst_vma, dst_addr, page);
3895 + put_page(page);
3896 + goto out;
3897 + }
3898 +@@ -5664,6 +5738,21 @@ unlock:
3899 + return ret;
3900 + }
3901 +
3902 ++int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
3903 ++{
3904 ++ int ret = 0;
3905 ++
3906 ++ *hugetlb = false;
3907 ++ spin_lock_irq(&hugetlb_lock);
3908 ++ if (PageHeadHuge(page)) {
3909 ++ *hugetlb = true;
3910 ++ if (HPageFreed(page) || HPageMigratable(page))
3911 ++ ret = get_page_unless_zero(page);
3912 ++ }
3913 ++ spin_unlock_irq(&hugetlb_lock);
3914 ++ return ret;
3915 ++}
3916 ++
3917 + void putback_active_hugepage(struct page *page)
3918 + {
3919 + spin_lock(&hugetlb_lock);
3920 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3921 +index bd3945446d47e..704d05057d8c3 100644
3922 +--- a/mm/memory-failure.c
3923 ++++ b/mm/memory-failure.c
3924 +@@ -949,6 +949,17 @@ static int page_action(struct page_state *ps, struct page *p,
3925 + return (result == MF_RECOVERED || result == MF_DELAYED) ? 0 : -EBUSY;
3926 + }
3927 +
3928 ++/*
3929 ++ * Return true if a page type of a given page is supported by hwpoison
3930 ++ * mechanism (while handling could fail), otherwise false. This function
3931 ++ * does not return true for hugetlb or device memory pages, so it's assumed
3932 ++ * to be called only in the context where we never have such pages.
3933 ++ */
3934 ++static inline bool HWPoisonHandlable(struct page *page)
3935 ++{
3936 ++ return PageLRU(page) || __PageMovable(page);
3937 ++}
3938 ++
3939 + /**
3940 + * __get_hwpoison_page() - Get refcount for memory error handling:
3941 + * @page: raw error page (hit by memory error)
3942 +@@ -959,8 +970,22 @@ static int page_action(struct page_state *ps, struct page *p,
3943 + static int __get_hwpoison_page(struct page *page)
3944 + {
3945 + struct page *head = compound_head(page);
3946 ++ int ret = 0;
3947 ++ bool hugetlb = false;
3948 ++
3949 ++ ret = get_hwpoison_huge_page(head, &hugetlb);
3950 ++ if (hugetlb)
3951 ++ return ret;
3952 +
3953 +- if (!PageHuge(head) && PageTransHuge(head)) {
3954 ++ /*
3955 ++ * This check prevents from calling get_hwpoison_unless_zero()
3956 ++ * for any unsupported type of page in order to reduce the risk of
3957 ++ * unexpected races caused by taking a page refcount.
3958 ++ */
3959 ++ if (!HWPoisonHandlable(head))
3960 ++ return 0;
3961 ++
3962 ++ if (PageTransHuge(head)) {
3963 + /*
3964 + * Non anonymous thp exists only in allocation/free time. We
3965 + * can't handle such a case correctly, so let's give it up.
3966 +@@ -1017,7 +1042,7 @@ try_again:
3967 + ret = -EIO;
3968 + }
3969 + } else {
3970 +- if (PageHuge(p) || PageLRU(p) || __PageMovable(p)) {
3971 ++ if (PageHuge(p) || HWPoisonHandlable(p)) {
3972 + ret = 1;
3973 + } else {
3974 + /*
3975 +@@ -1527,7 +1552,12 @@ try_again:
3976 + return 0;
3977 + }
3978 +
3979 +- if (!PageTransTail(p) && !PageLRU(p))
3980 ++ /*
3981 ++ * __munlock_pagevec may clear a writeback page's LRU flag without
3982 ++ * page_lock. We need wait writeback completion for this page or it
3983 ++ * may trigger vfs BUG while evict inode.
3984 ++ */
3985 ++ if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
3986 + goto identify_page_state;
3987 +
3988 + /*
3989 +diff --git a/mm/slab_common.c b/mm/slab_common.c
3990 +index 88e833986332e..ba2f4b01920fd 100644
3991 +--- a/mm/slab_common.c
3992 ++++ b/mm/slab_common.c
3993 +@@ -89,8 +89,7 @@ EXPORT_SYMBOL(kmem_cache_size);
3994 + #ifdef CONFIG_DEBUG_VM
3995 + static int kmem_cache_sanity_check(const char *name, unsigned int size)
3996 + {
3997 +- if (!name || in_interrupt() || size < sizeof(void *) ||
3998 +- size > KMALLOC_MAX_SIZE) {
3999 ++ if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
4000 + pr_err("kmem_cache_create(%s) integrity check failed\n", name);
4001 + return -EINVAL;
4002 + }
4003 +diff --git a/mm/slub.c b/mm/slub.c
4004 +index 3021ce9bf1b3d..602f9712ab53d 100644
4005 +--- a/mm/slub.c
4006 ++++ b/mm/slub.c
4007 +@@ -15,6 +15,7 @@
4008 + #include <linux/module.h>
4009 + #include <linux/bit_spinlock.h>
4010 + #include <linux/interrupt.h>
4011 ++#include <linux/swab.h>
4012 + #include <linux/bitops.h>
4013 + #include <linux/slab.h>
4014 + #include "slab.h"
4015 +@@ -710,15 +711,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
4016 + p, p - addr, get_freepointer(s, p));
4017 +
4018 + if (s->flags & SLAB_RED_ZONE)
4019 +- print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
4020 ++ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
4021 + s->red_left_pad);
4022 + else if (p > addr + 16)
4023 + print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
4024 +
4025 +- print_section(KERN_ERR, "Object ", p,
4026 ++ print_section(KERN_ERR, "Object ", p,
4027 + min_t(unsigned int, s->object_size, PAGE_SIZE));
4028 + if (s->flags & SLAB_RED_ZONE)
4029 +- print_section(KERN_ERR, "Redzone ", p + s->object_size,
4030 ++ print_section(KERN_ERR, "Redzone ", p + s->object_size,
4031 + s->inuse - s->object_size);
4032 +
4033 + off = get_info_end(s);
4034 +@@ -730,7 +731,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
4035 +
4036 + if (off != size_from_object(s))
4037 + /* Beginning of the filler is the free pointer */
4038 +- print_section(KERN_ERR, "Padding ", p + off,
4039 ++ print_section(KERN_ERR, "Padding ", p + off,
4040 + size_from_object(s) - off);
4041 +
4042 + dump_stack();
4043 +@@ -907,11 +908,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
4044 + u8 *endobject = object + s->object_size;
4045 +
4046 + if (s->flags & SLAB_RED_ZONE) {
4047 +- if (!check_bytes_and_report(s, page, object, "Redzone",
4048 ++ if (!check_bytes_and_report(s, page, object, "Left Redzone",
4049 + object - s->red_left_pad, val, s->red_left_pad))
4050 + return 0;
4051 +
4052 +- if (!check_bytes_and_report(s, page, object, "Redzone",
4053 ++ if (!check_bytes_and_report(s, page, object, "Right Redzone",
4054 + endobject, val, s->inuse - s->object_size))
4055 + return 0;
4056 + } else {
4057 +@@ -926,7 +927,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
4058 + if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
4059 + (!check_bytes_and_report(s, page, p, "Poison", p,
4060 + POISON_FREE, s->object_size - 1) ||
4061 +- !check_bytes_and_report(s, page, p, "Poison",
4062 ++ !check_bytes_and_report(s, page, p, "End Poison",
4063 + p + s->object_size - 1, POISON_END, 1)))
4064 + return 0;
4065 + /*
4066 +@@ -3687,7 +3688,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
4067 + {
4068 + slab_flags_t flags = s->flags;
4069 + unsigned int size = s->object_size;
4070 +- unsigned int freepointer_area;
4071 + unsigned int order;
4072 +
4073 + /*
4074 +@@ -3696,13 +3696,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
4075 + * the possible location of the free pointer.
4076 + */
4077 + size = ALIGN(size, sizeof(void *));
4078 +- /*
4079 +- * This is the area of the object where a freepointer can be
4080 +- * safely written. If redzoning adds more to the inuse size, we
4081 +- * can't use that portion for writing the freepointer, so
4082 +- * s->offset must be limited within this for the general case.
4083 +- */
4084 +- freepointer_area = size;
4085 +
4086 + #ifdef CONFIG_SLUB_DEBUG
4087 + /*
4088 +@@ -3728,19 +3721,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
4089 +
4090 + /*
4091 + * With that we have determined the number of bytes in actual use
4092 +- * by the object. This is the potential offset to the free pointer.
4093 ++ * by the object and redzoning.
4094 + */
4095 + s->inuse = size;
4096 +
4097 +- if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
4098 +- s->ctor)) {
4099 ++ if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
4100 ++ ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
4101 ++ s->ctor) {
4102 + /*
4103 + * Relocate free pointer after the object if it is not
4104 + * permitted to overwrite the first word of the object on
4105 + * kmem_cache_free.
4106 + *
4107 + * This is the case if we do RCU, have a constructor or
4108 +- * destructor or are poisoning the objects.
4109 ++ * destructor, are poisoning the objects, or are
4110 ++ * redzoning an object smaller than sizeof(void *).
4111 + *
4112 + * The assumption that s->offset >= s->inuse means free
4113 + * pointer is outside of the object is used in the
4114 +@@ -3749,13 +3744,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
4115 + */
4116 + s->offset = size;
4117 + size += sizeof(void *);
4118 +- } else if (freepointer_area > sizeof(void *)) {
4119 ++ } else {
4120 + /*
4121 + * Store freelist pointer near middle of object to keep
4122 + * it away from the edges of the object to avoid small
4123 + * sized over/underflows from neighboring allocations.
4124 + */
4125 +- s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
4126 ++ s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
4127 + }
4128 +
4129 + #ifdef CONFIG_SLUB_DEBUG
4130 +diff --git a/mm/swapfile.c b/mm/swapfile.c
4131 +index 084a5b9a18e5c..2097648df212d 100644
4132 +--- a/mm/swapfile.c
4133 ++++ b/mm/swapfile.c
4134 +@@ -1900,7 +1900,7 @@ unsigned int count_swap_pages(int type, int free)
4135 +
4136 + static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
4137 + {
4138 +- return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
4139 ++ return pte_same(pte_swp_clear_flags(pte), swp_pte);
4140 + }
4141 +
4142 + /*
4143 +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
4144 +index a5e313cd6f447..b9dd150f6f01d 100644
4145 +--- a/net/batman-adv/bat_iv_ogm.c
4146 ++++ b/net/batman-adv/bat_iv_ogm.c
4147 +@@ -409,8 +409,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
4148 + if (WARN_ON(!forw_packet->if_outgoing))
4149 + return;
4150 +
4151 +- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
4152 ++ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
4153 ++ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
4154 + return;
4155 ++ }
4156 +
4157 + if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
4158 + return;
4159 +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
4160 +index af3430c2d6ea8..660dec6785ad9 100644
4161 +--- a/net/bridge/br_private.h
4162 ++++ b/net/bridge/br_private.h
4163 +@@ -90,8 +90,8 @@ struct bridge_mcast_stats {
4164 + #endif
4165 +
4166 + struct br_tunnel_info {
4167 +- __be64 tunnel_id;
4168 +- struct metadata_dst *tunnel_dst;
4169 ++ __be64 tunnel_id;
4170 ++ struct metadata_dst __rcu *tunnel_dst;
4171 + };
4172 +
4173 + /* private vlan flags */
4174 +diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
4175 +index 169e005fbda29..debe167202782 100644
4176 +--- a/net/bridge/br_vlan_tunnel.c
4177 ++++ b/net/bridge/br_vlan_tunnel.c
4178 +@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
4179 + br_vlan_tunnel_rht_params);
4180 + }
4181 +
4182 ++static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
4183 ++{
4184 ++ struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
4185 ++
4186 ++ WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
4187 ++ RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
4188 ++ dst_release(&tdst->dst);
4189 ++}
4190 ++
4191 + void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
4192 + struct net_bridge_vlan *vlan)
4193 + {
4194 +- if (!vlan->tinfo.tunnel_dst)
4195 ++ if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
4196 + return;
4197 + rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
4198 + br_vlan_tunnel_rht_params);
4199 +- vlan->tinfo.tunnel_id = 0;
4200 +- dst_release(&vlan->tinfo.tunnel_dst->dst);
4201 +- vlan->tinfo.tunnel_dst = NULL;
4202 ++ vlan_tunnel_info_release(vlan);
4203 + }
4204 +
4205 + static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
4206 + struct net_bridge_vlan *vlan, u32 tun_id)
4207 + {
4208 +- struct metadata_dst *metadata = NULL;
4209 ++ struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
4210 + __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
4211 + int err;
4212 +
4213 +- if (vlan->tinfo.tunnel_dst)
4214 ++ if (metadata)
4215 + return -EEXIST;
4216 +
4217 + metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
4218 +@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
4219 + return -EINVAL;
4220 +
4221 + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
4222 +- vlan->tinfo.tunnel_dst = metadata;
4223 +- vlan->tinfo.tunnel_id = key;
4224 ++ rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
4225 ++ WRITE_ONCE(vlan->tinfo.tunnel_id, key);
4226 +
4227 + err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
4228 + br_vlan_tunnel_rht_params);
4229 +@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
4230 +
4231 + return 0;
4232 + out:
4233 +- dst_release(&vlan->tinfo.tunnel_dst->dst);
4234 +- vlan->tinfo.tunnel_dst = NULL;
4235 +- vlan->tinfo.tunnel_id = 0;
4236 ++ vlan_tunnel_info_release(vlan);
4237 +
4238 + return err;
4239 + }
4240 +@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
4241 + int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
4242 + struct net_bridge_vlan *vlan)
4243 + {
4244 ++ struct metadata_dst *tunnel_dst;
4245 ++ __be64 tunnel_id;
4246 + int err;
4247 +
4248 +- if (!vlan || !vlan->tinfo.tunnel_id)
4249 ++ if (!vlan)
4250 + return 0;
4251 +
4252 +- if (unlikely(!skb_vlan_tag_present(skb)))
4253 ++ tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
4254 ++ if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
4255 + return 0;
4256 +
4257 + skb_dst_drop(skb);
4258 +@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
4259 + if (err)
4260 + return err;
4261 +
4262 +- skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
4263 ++ tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
4264 ++ if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
4265 ++ skb_dst_set(skb, &tunnel_dst->dst);
4266 +
4267 + return 0;
4268 + }
4269 +diff --git a/net/can/bcm.c b/net/can/bcm.c
4270 +index 909b9e684e043..f3e4d9528fa38 100644
4271 +--- a/net/can/bcm.c
4272 ++++ b/net/can/bcm.c
4273 +@@ -125,7 +125,7 @@ struct bcm_sock {
4274 + struct sock sk;
4275 + int bound;
4276 + int ifindex;
4277 +- struct notifier_block notifier;
4278 ++ struct list_head notifier;
4279 + struct list_head rx_ops;
4280 + struct list_head tx_ops;
4281 + unsigned long dropped_usr_msgs;
4282 +@@ -133,6 +133,10 @@ struct bcm_sock {
4283 + char procname [32]; /* inode number in decimal with \0 */
4284 + };
4285 +
4286 ++static LIST_HEAD(bcm_notifier_list);
4287 ++static DEFINE_SPINLOCK(bcm_notifier_lock);
4288 ++static struct bcm_sock *bcm_busy_notifier;
4289 ++
4290 + static inline struct bcm_sock *bcm_sk(const struct sock *sk)
4291 + {
4292 + return (struct bcm_sock *)sk;
4293 +@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
4294 + if (!op->count && (op->flags & TX_COUNTEVT)) {
4295 +
4296 + /* create notification to user */
4297 ++ memset(&msg_head, 0, sizeof(msg_head));
4298 + msg_head.opcode = TX_EXPIRED;
4299 + msg_head.flags = op->flags;
4300 + msg_head.count = op->count;
4301 +@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
4302 + /* this element is not throttled anymore */
4303 + data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
4304 +
4305 ++ memset(&head, 0, sizeof(head));
4306 + head.opcode = RX_CHANGED;
4307 + head.flags = op->flags;
4308 + head.count = op->count;
4309 +@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
4310 + }
4311 +
4312 + /* create notification to user */
4313 ++ memset(&msg_head, 0, sizeof(msg_head));
4314 + msg_head.opcode = RX_TIMEOUT;
4315 + msg_head.flags = op->flags;
4316 + msg_head.count = op->count;
4317 +@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
4318 + /*
4319 + * notification handler for netdevice status changes
4320 + */
4321 +-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
4322 +- void *ptr)
4323 ++static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
4324 ++ struct net_device *dev)
4325 + {
4326 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4327 +- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
4328 + struct sock *sk = &bo->sk;
4329 + struct bcm_op *op;
4330 + int notify_enodev = 0;
4331 +
4332 + if (!net_eq(dev_net(dev), sock_net(sk)))
4333 +- return NOTIFY_DONE;
4334 +-
4335 +- if (dev->type != ARPHRD_CAN)
4336 +- return NOTIFY_DONE;
4337 ++ return;
4338 +
4339 + switch (msg) {
4340 +
4341 +@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
4342 + sk->sk_error_report(sk);
4343 + }
4344 + }
4345 ++}
4346 +
4347 ++static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
4348 ++ void *ptr)
4349 ++{
4350 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4351 ++
4352 ++ if (dev->type != ARPHRD_CAN)
4353 ++ return NOTIFY_DONE;
4354 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
4355 ++ return NOTIFY_DONE;
4356 ++ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
4357 ++ return NOTIFY_DONE;
4358 ++
4359 ++ spin_lock(&bcm_notifier_lock);
4360 ++ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
4361 ++ spin_unlock(&bcm_notifier_lock);
4362 ++ bcm_notify(bcm_busy_notifier, msg, dev);
4363 ++ spin_lock(&bcm_notifier_lock);
4364 ++ }
4365 ++ bcm_busy_notifier = NULL;
4366 ++ spin_unlock(&bcm_notifier_lock);
4367 + return NOTIFY_DONE;
4368 + }
4369 +
4370 +@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
4371 + INIT_LIST_HEAD(&bo->rx_ops);
4372 +
4373 + /* set notifier */
4374 +- bo->notifier.notifier_call = bcm_notifier;
4375 +-
4376 +- register_netdevice_notifier(&bo->notifier);
4377 ++ spin_lock(&bcm_notifier_lock);
4378 ++ list_add_tail(&bo->notifier, &bcm_notifier_list);
4379 ++ spin_unlock(&bcm_notifier_lock);
4380 +
4381 + return 0;
4382 + }
4383 +@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
4384 +
4385 + /* remove bcm_ops, timer, rx_unregister(), etc. */
4386 +
4387 +- unregister_netdevice_notifier(&bo->notifier);
4388 ++ spin_lock(&bcm_notifier_lock);
4389 ++ while (bcm_busy_notifier == bo) {
4390 ++ spin_unlock(&bcm_notifier_lock);
4391 ++ schedule_timeout_uninterruptible(1);
4392 ++ spin_lock(&bcm_notifier_lock);
4393 ++ }
4394 ++ list_del(&bo->notifier);
4395 ++ spin_unlock(&bcm_notifier_lock);
4396 +
4397 + lock_sock(sk);
4398 +
4399 +@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
4400 + .exit = canbcm_pernet_exit,
4401 + };
4402 +
4403 ++static struct notifier_block canbcm_notifier = {
4404 ++ .notifier_call = bcm_notifier
4405 ++};
4406 ++
4407 + static int __init bcm_module_init(void)
4408 + {
4409 + int err;
4410 +@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
4411 + }
4412 +
4413 + register_pernet_subsys(&canbcm_pernet_ops);
4414 ++ register_netdevice_notifier(&canbcm_notifier);
4415 + return 0;
4416 + }
4417 +
4418 + static void __exit bcm_module_exit(void)
4419 + {
4420 + can_proto_unregister(&bcm_can_proto);
4421 ++ unregister_netdevice_notifier(&canbcm_notifier);
4422 + unregister_pernet_subsys(&canbcm_pernet_ops);
4423 + }
4424 +
4425 +diff --git a/net/can/isotp.c b/net/can/isotp.c
4426 +index 253b24417c8e5..be6183f8ca110 100644
4427 +--- a/net/can/isotp.c
4428 ++++ b/net/can/isotp.c
4429 +@@ -143,10 +143,14 @@ struct isotp_sock {
4430 + u32 force_tx_stmin;
4431 + u32 force_rx_stmin;
4432 + struct tpcon rx, tx;
4433 +- struct notifier_block notifier;
4434 ++ struct list_head notifier;
4435 + wait_queue_head_t wait;
4436 + };
4437 +
4438 ++static LIST_HEAD(isotp_notifier_list);
4439 ++static DEFINE_SPINLOCK(isotp_notifier_lock);
4440 ++static struct isotp_sock *isotp_busy_notifier;
4441 ++
4442 + static inline struct isotp_sock *isotp_sk(const struct sock *sk)
4443 + {
4444 + return (struct isotp_sock *)sk;
4445 +@@ -1013,7 +1017,14 @@ static int isotp_release(struct socket *sock)
4446 + /* wait for complete transmission of current pdu */
4447 + wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
4448 +
4449 +- unregister_netdevice_notifier(&so->notifier);
4450 ++ spin_lock(&isotp_notifier_lock);
4451 ++ while (isotp_busy_notifier == so) {
4452 ++ spin_unlock(&isotp_notifier_lock);
4453 ++ schedule_timeout_uninterruptible(1);
4454 ++ spin_lock(&isotp_notifier_lock);
4455 ++ }
4456 ++ list_del(&so->notifier);
4457 ++ spin_unlock(&isotp_notifier_lock);
4458 +
4459 + lock_sock(sk);
4460 +
4461 +@@ -1317,21 +1328,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
4462 + return 0;
4463 + }
4464 +
4465 +-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
4466 +- void *ptr)
4467 ++static void isotp_notify(struct isotp_sock *so, unsigned long msg,
4468 ++ struct net_device *dev)
4469 + {
4470 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4471 +- struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
4472 + struct sock *sk = &so->sk;
4473 +
4474 + if (!net_eq(dev_net(dev), sock_net(sk)))
4475 +- return NOTIFY_DONE;
4476 +-
4477 +- if (dev->type != ARPHRD_CAN)
4478 +- return NOTIFY_DONE;
4479 ++ return;
4480 +
4481 + if (so->ifindex != dev->ifindex)
4482 +- return NOTIFY_DONE;
4483 ++ return;
4484 +
4485 + switch (msg) {
4486 + case NETDEV_UNREGISTER:
4487 +@@ -1357,7 +1363,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
4488 + sk->sk_error_report(sk);
4489 + break;
4490 + }
4491 ++}
4492 +
4493 ++static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
4494 ++ void *ptr)
4495 ++{
4496 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4497 ++
4498 ++ if (dev->type != ARPHRD_CAN)
4499 ++ return NOTIFY_DONE;
4500 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
4501 ++ return NOTIFY_DONE;
4502 ++ if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
4503 ++ return NOTIFY_DONE;
4504 ++
4505 ++ spin_lock(&isotp_notifier_lock);
4506 ++ list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
4507 ++ spin_unlock(&isotp_notifier_lock);
4508 ++ isotp_notify(isotp_busy_notifier, msg, dev);
4509 ++ spin_lock(&isotp_notifier_lock);
4510 ++ }
4511 ++ isotp_busy_notifier = NULL;
4512 ++ spin_unlock(&isotp_notifier_lock);
4513 + return NOTIFY_DONE;
4514 + }
4515 +
4516 +@@ -1394,8 +1421,9 @@ static int isotp_init(struct sock *sk)
4517 +
4518 + init_waitqueue_head(&so->wait);
4519 +
4520 +- so->notifier.notifier_call = isotp_notifier;
4521 +- register_netdevice_notifier(&so->notifier);
4522 ++ spin_lock(&isotp_notifier_lock);
4523 ++ list_add_tail(&so->notifier, &isotp_notifier_list);
4524 ++ spin_unlock(&isotp_notifier_lock);
4525 +
4526 + return 0;
4527 + }
4528 +@@ -1442,6 +1470,10 @@ static const struct can_proto isotp_can_proto = {
4529 + .prot = &isotp_proto,
4530 + };
4531 +
4532 ++static struct notifier_block canisotp_notifier = {
4533 ++ .notifier_call = isotp_notifier
4534 ++};
4535 ++
4536 + static __init int isotp_module_init(void)
4537 + {
4538 + int err;
4539 +@@ -1451,6 +1483,8 @@ static __init int isotp_module_init(void)
4540 + err = can_proto_register(&isotp_can_proto);
4541 + if (err < 0)
4542 + pr_err("can: registration of isotp protocol failed\n");
4543 ++ else
4544 ++ register_netdevice_notifier(&canisotp_notifier);
4545 +
4546 + return err;
4547 + }
4548 +@@ -1458,6 +1492,7 @@ static __init int isotp_module_init(void)
4549 + static __exit void isotp_module_exit(void)
4550 + {
4551 + can_proto_unregister(&isotp_can_proto);
4552 ++ unregister_netdevice_notifier(&canisotp_notifier);
4553 + }
4554 +
4555 + module_init(isotp_module_init);
4556 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
4557 +index e09d087ba2409..c3946c3558826 100644
4558 +--- a/net/can/j1939/transport.c
4559 ++++ b/net/can/j1939/transport.c
4560 +@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
4561 +
4562 + if ((do_skcb->offset + do_skb->len) < offset_start) {
4563 + __skb_unlink(do_skb, &session->skb_queue);
4564 ++ /* drop ref taken in j1939_session_skb_queue() */
4565 ++ skb_unref(do_skb);
4566 ++
4567 + kfree_skb(do_skb);
4568 + }
4569 + spin_unlock_irqrestore(&session->skb_queue.lock, flags);
4570 +@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
4571 +
4572 + skcb->flags |= J1939_ECU_LOCAL_SRC;
4573 +
4574 ++ skb_get(skb);
4575 + skb_queue_tail(&session->skb_queue, skb);
4576 + }
4577 +
4578 + static struct
4579 +-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
4580 +- unsigned int offset_start)
4581 ++sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
4582 ++ unsigned int offset_start)
4583 + {
4584 + struct j1939_priv *priv = session->priv;
4585 + struct j1939_sk_buff_cb *do_skcb;
4586 +@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
4587 + skb = do_skb;
4588 + }
4589 + }
4590 ++
4591 ++ if (skb)
4592 ++ skb_get(skb);
4593 ++
4594 + spin_unlock_irqrestore(&session->skb_queue.lock, flags);
4595 +
4596 + if (!skb)
4597 +@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
4598 + return skb;
4599 + }
4600 +
4601 +-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
4602 ++static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
4603 + {
4604 + unsigned int offset_start;
4605 +
4606 + offset_start = session->pkt.dpo * 7;
4607 +- return j1939_session_skb_find_by_offset(session, offset_start);
4608 ++ return j1939_session_skb_get_by_offset(session, offset_start);
4609 + }
4610 +
4611 + /* see if we are receiver
4612 +@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
4613 + int ret = 0;
4614 + u8 dat[8];
4615 +
4616 +- se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
4617 ++ se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
4618 + if (!se_skb)
4619 + return -ENOBUFS;
4620 +
4621 +@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
4622 + netdev_err_once(priv->ndev,
4623 + "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
4624 + __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
4625 +- return -EOVERFLOW;
4626 ++ ret = -EOVERFLOW;
4627 ++ goto out_free;
4628 + }
4629 +
4630 + if (!len) {
4631 +@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
4632 + if (pkt_done)
4633 + j1939_tp_set_rxtimeout(session, 250);
4634 +
4635 ++ out_free:
4636 ++ if (ret)
4637 ++ kfree_skb(se_skb);
4638 ++ else
4639 ++ consume_skb(se_skb);
4640 ++
4641 + return ret;
4642 + }
4643 +
4644 +@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
4645 + static int j1939_simple_txnext(struct j1939_session *session)
4646 + {
4647 + struct j1939_priv *priv = session->priv;
4648 +- struct sk_buff *se_skb = j1939_session_skb_find(session);
4649 ++ struct sk_buff *se_skb = j1939_session_skb_get(session);
4650 + struct sk_buff *skb;
4651 + int ret;
4652 +
4653 +@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
4654 + return 0;
4655 +
4656 + skb = skb_clone(se_skb, GFP_ATOMIC);
4657 +- if (!skb)
4658 +- return -ENOMEM;
4659 ++ if (!skb) {
4660 ++ ret = -ENOMEM;
4661 ++ goto out_free;
4662 ++ }
4663 +
4664 + can_skb_set_owner(skb, se_skb->sk);
4665 +
4666 +@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
4667 +
4668 + ret = j1939_send_one(priv, skb);
4669 + if (ret)
4670 +- return ret;
4671 ++ goto out_free;
4672 +
4673 + j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
4674 + j1939_sk_queue_activate_next(session);
4675 +
4676 +- return 0;
4677 ++ out_free:
4678 ++ if (ret)
4679 ++ kfree_skb(se_skb);
4680 ++ else
4681 ++ consume_skb(se_skb);
4682 ++
4683 ++ return ret;
4684 + }
4685 +
4686 + static bool j1939_session_deactivate_locked(struct j1939_session *session)
4687 +@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
4688 + struct sk_buff *skb;
4689 +
4690 + if (!session->transmission) {
4691 +- skb = j1939_session_skb_find(session);
4692 ++ skb = j1939_session_skb_get(session);
4693 + /* distribute among j1939 receivers */
4694 + j1939_sk_recv(session->priv, skb);
4695 ++ consume_skb(skb);
4696 + }
4697 +
4698 + j1939_session_deactivate_activate_next(session);
4699 +@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4700 + {
4701 + struct j1939_priv *priv = session->priv;
4702 + struct j1939_sk_buff_cb *skcb;
4703 +- struct sk_buff *se_skb;
4704 ++ struct sk_buff *se_skb = NULL;
4705 + const u8 *dat;
4706 + u8 *tpdat;
4707 + int offset;
4708 +@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4709 + goto out_session_cancel;
4710 + }
4711 +
4712 +- se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
4713 ++ se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
4714 + if (!se_skb) {
4715 + netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
4716 + session);
4717 +@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4718 + j1939_tp_set_rxtimeout(session, 250);
4719 + }
4720 + session->last_cmd = 0xff;
4721 ++ consume_skb(se_skb);
4722 + j1939_session_put(session);
4723 +
4724 + return;
4725 +
4726 + out_session_cancel:
4727 ++ kfree_skb(se_skb);
4728 + j1939_session_timers_cancel(session);
4729 + j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
4730 + j1939_session_put(session);
4731 +diff --git a/net/can/raw.c b/net/can/raw.c
4732 +index 139d9471ddcf4..ac96fc2100253 100644
4733 +--- a/net/can/raw.c
4734 ++++ b/net/can/raw.c
4735 +@@ -83,7 +83,7 @@ struct raw_sock {
4736 + struct sock sk;
4737 + int bound;
4738 + int ifindex;
4739 +- struct notifier_block notifier;
4740 ++ struct list_head notifier;
4741 + int loopback;
4742 + int recv_own_msgs;
4743 + int fd_frames;
4744 +@@ -95,6 +95,10 @@ struct raw_sock {
4745 + struct uniqframe __percpu *uniq;
4746 + };
4747 +
4748 ++static LIST_HEAD(raw_notifier_list);
4749 ++static DEFINE_SPINLOCK(raw_notifier_lock);
4750 ++static struct raw_sock *raw_busy_notifier;
4751 ++
4752 + /* Return pointer to store the extra msg flags for raw_recvmsg().
4753 + * We use the space of one unsigned int beyond the 'struct sockaddr_can'
4754 + * in skb->cb.
4755 +@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
4756 + return err;
4757 + }
4758 +
4759 +-static int raw_notifier(struct notifier_block *nb,
4760 +- unsigned long msg, void *ptr)
4761 ++static void raw_notify(struct raw_sock *ro, unsigned long msg,
4762 ++ struct net_device *dev)
4763 + {
4764 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4765 +- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
4766 + struct sock *sk = &ro->sk;
4767 +
4768 + if (!net_eq(dev_net(dev), sock_net(sk)))
4769 +- return NOTIFY_DONE;
4770 +-
4771 +- if (dev->type != ARPHRD_CAN)
4772 +- return NOTIFY_DONE;
4773 ++ return;
4774 +
4775 + if (ro->ifindex != dev->ifindex)
4776 +- return NOTIFY_DONE;
4777 ++ return;
4778 +
4779 + switch (msg) {
4780 + case NETDEV_UNREGISTER:
4781 +@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
4782 + sk->sk_error_report(sk);
4783 + break;
4784 + }
4785 ++}
4786 ++
4787 ++static int raw_notifier(struct notifier_block *nb, unsigned long msg,
4788 ++ void *ptr)
4789 ++{
4790 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4791 ++
4792 ++ if (dev->type != ARPHRD_CAN)
4793 ++ return NOTIFY_DONE;
4794 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
4795 ++ return NOTIFY_DONE;
4796 ++ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
4797 ++ return NOTIFY_DONE;
4798 +
4799 ++ spin_lock(&raw_notifier_lock);
4800 ++ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
4801 ++ spin_unlock(&raw_notifier_lock);
4802 ++ raw_notify(raw_busy_notifier, msg, dev);
4803 ++ spin_lock(&raw_notifier_lock);
4804 ++ }
4805 ++ raw_busy_notifier = NULL;
4806 ++ spin_unlock(&raw_notifier_lock);
4807 + return NOTIFY_DONE;
4808 + }
4809 +
4810 +@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
4811 + return -ENOMEM;
4812 +
4813 + /* set notifier */
4814 +- ro->notifier.notifier_call = raw_notifier;
4815 +-
4816 +- register_netdevice_notifier(&ro->notifier);
4817 ++ spin_lock(&raw_notifier_lock);
4818 ++ list_add_tail(&ro->notifier, &raw_notifier_list);
4819 ++ spin_unlock(&raw_notifier_lock);
4820 +
4821 + return 0;
4822 + }
4823 +@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
4824 +
4825 + ro = raw_sk(sk);
4826 +
4827 +- unregister_netdevice_notifier(&ro->notifier);
4828 ++ spin_lock(&raw_notifier_lock);
4829 ++ while (raw_busy_notifier == ro) {
4830 ++ spin_unlock(&raw_notifier_lock);
4831 ++ schedule_timeout_uninterruptible(1);
4832 ++ spin_lock(&raw_notifier_lock);
4833 ++ }
4834 ++ list_del(&ro->notifier);
4835 ++ spin_unlock(&raw_notifier_lock);
4836 +
4837 + lock_sock(sk);
4838 +
4839 +@@ -889,6 +916,10 @@ static const struct can_proto raw_can_proto = {
4840 + .prot = &raw_proto,
4841 + };
4842 +
4843 ++static struct notifier_block canraw_notifier = {
4844 ++ .notifier_call = raw_notifier
4845 ++};
4846 ++
4847 + static __init int raw_module_init(void)
4848 + {
4849 + int err;
4850 +@@ -898,6 +929,8 @@ static __init int raw_module_init(void)
4851 + err = can_proto_register(&raw_can_proto);
4852 + if (err < 0)
4853 + pr_err("can: registration of raw protocol failed\n");
4854 ++ else
4855 ++ register_netdevice_notifier(&canraw_notifier);
4856 +
4857 + return err;
4858 + }
4859 +@@ -905,6 +938,7 @@ static __init int raw_module_init(void)
4860 + static __exit void raw_module_exit(void)
4861 + {
4862 + can_proto_unregister(&raw_can_proto);
4863 ++ unregister_netdevice_notifier(&canraw_notifier);
4864 + }
4865 +
4866 + module_init(raw_module_init);
4867 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
4868 +index 43b6ac4c44395..cc8dafb25d612 100644
4869 +--- a/net/core/net_namespace.c
4870 ++++ b/net/core/net_namespace.c
4871 +@@ -641,6 +641,18 @@ void __put_net(struct net *net)
4872 + }
4873 + EXPORT_SYMBOL_GPL(__put_net);
4874 +
4875 ++/**
4876 ++ * get_net_ns - increment the refcount of the network namespace
4877 ++ * @ns: common namespace (net)
4878 ++ *
4879 ++ * Returns the net's common namespace.
4880 ++ */
4881 ++struct ns_common *get_net_ns(struct ns_common *ns)
4882 ++{
4883 ++ return &get_net(container_of(ns, struct net, ns))->ns;
4884 ++}
4885 ++EXPORT_SYMBOL_GPL(get_net_ns);
4886 ++
4887 + struct net *get_net_ns_by_fd(int fd)
4888 + {
4889 + struct file *file;
4890 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4891 +index 9ad046917b340..2123427883baa 100644
4892 +--- a/net/core/rtnetlink.c
4893 ++++ b/net/core/rtnetlink.c
4894 +@@ -4833,10 +4833,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
4895 + if (err < 0)
4896 + goto errout;
4897 +
4898 +- if (!skb->len) {
4899 +- err = -EINVAL;
4900 ++ /* Notification info is only filled for bridge ports, not the bridge
4901 ++ * device itself. Therefore, a zero notification length is valid and
4902 ++ * should not result in an error.
4903 ++ */
4904 ++ if (!skb->len)
4905 + goto errout;
4906 +- }
4907 +
4908 + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4909 + return 0;
4910 +diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4911 +index c421c8f809256..7997d99afbd8e 100644
4912 +--- a/net/core/skbuff.c
4913 ++++ b/net/core/skbuff.c
4914 +@@ -1252,6 +1252,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
4915 + struct sock *sk = skb->sk;
4916 + struct sk_buff_head *q;
4917 + unsigned long flags;
4918 ++ bool is_zerocopy;
4919 + u32 lo, hi;
4920 + u16 len;
4921 +
4922 +@@ -1266,6 +1267,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
4923 + len = uarg->len;
4924 + lo = uarg->id;
4925 + hi = uarg->id + len - 1;
4926 ++ is_zerocopy = uarg->zerocopy;
4927 +
4928 + serr = SKB_EXT_ERR(skb);
4929 + memset(serr, 0, sizeof(*serr));
4930 +@@ -1273,7 +1275,7 @@ static void __msg_zerocopy_callback(struct ubuf_info *uarg)
4931 + serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
4932 + serr->ee.ee_data = hi;
4933 + serr->ee.ee_info = lo;
4934 +- if (!uarg->zerocopy)
4935 ++ if (!is_zerocopy)
4936 + serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
4937 +
4938 + q = &sk->sk_error_queue;
4939 +diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
4940 +index c3a5489964cde..9908b922cce8d 100644
4941 +--- a/net/ethtool/strset.c
4942 ++++ b/net/ethtool/strset.c
4943 +@@ -328,6 +328,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
4944 + int len = 0;
4945 + int ret;
4946 +
4947 ++ len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
4948 ++
4949 + for (i = 0; i < ETH_SS_COUNT; i++) {
4950 + const struct strset_info *set_info = &data->sets[i];
4951 +
4952 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
4953 +index bfaf327e9d121..e0480c6cebaad 100644
4954 +--- a/net/ipv4/cipso_ipv4.c
4955 ++++ b/net/ipv4/cipso_ipv4.c
4956 +@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
4957 + kfree(doi_def->map.std->lvl.local);
4958 + kfree(doi_def->map.std->cat.cipso);
4959 + kfree(doi_def->map.std->cat.local);
4960 ++ kfree(doi_def->map.std);
4961 + break;
4962 + }
4963 + kfree(doi_def);
4964 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
4965 +index 616e2dc1c8fa4..cd65d3146c300 100644
4966 +--- a/net/ipv4/icmp.c
4967 ++++ b/net/ipv4/icmp.c
4968 +@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
4969 + icmp_param.data_len = room;
4970 + icmp_param.head_len = sizeof(struct icmphdr);
4971 +
4972 ++ /* if we don't have a source address at this point, fall back to the
4973 ++ * dummy address instead of sending out a packet with a source address
4974 ++ * of 0.0.0.0
4975 ++ */
4976 ++ if (!fl4.saddr)
4977 ++ fl4.saddr = htonl(INADDR_DUMMY);
4978 ++
4979 + icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
4980 + ende:
4981 + ip_rt_put(rt);
4982 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
4983 +index 7b272bbed2b43..6b3c558a4f232 100644
4984 +--- a/net/ipv4/igmp.c
4985 ++++ b/net/ipv4/igmp.c
4986 +@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
4987 + while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
4988 + in_dev->mc_list = i->next_rcu;
4989 + in_dev->mc_count--;
4990 ++ ip_mc_clear_src(i);
4991 + ip_ma_put(i);
4992 + }
4993 + }
4994 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
4995 +index d635b4f32d348..09506203156d1 100644
4996 +--- a/net/ipv4/route.c
4997 ++++ b/net/ipv4/route.c
4998 +@@ -2081,6 +2081,19 @@ martian_source:
4999 + return err;
5000 + }
5001 +
5002 ++/* get device for dst_alloc with local routes */
5003 ++static struct net_device *ip_rt_get_dev(struct net *net,
5004 ++ const struct fib_result *res)
5005 ++{
5006 ++ struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
5007 ++ struct net_device *dev = NULL;
5008 ++
5009 ++ if (nhc)
5010 ++ dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
5011 ++
5012 ++ return dev ? : net->loopback_dev;
5013 ++}
5014 ++
5015 + /*
5016 + * NOTE. We drop all the packets that has local source
5017 + * addresses, because every properly looped back packet
5018 +@@ -2237,7 +2250,7 @@ local_input:
5019 + }
5020 + }
5021 +
5022 +- rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
5023 ++ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
5024 + flags | RTCF_LOCAL, res->type,
5025 + IN_DEV_ORCONF(in_dev, NOPOLICY), false);
5026 + if (!rth)
5027 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
5028 +index c586a6bb8c6d0..3dd340679d096 100644
5029 +--- a/net/ipv4/udp.c
5030 ++++ b/net/ipv4/udp.c
5031 +@@ -2576,6 +2576,9 @@ void udp_destroy_sock(struct sock *sk)
5032 + {
5033 + struct udp_sock *up = udp_sk(sk);
5034 + bool slow = lock_sock_fast(sk);
5035 ++
5036 ++ /* protects from races with udp_abort() */
5037 ++ sock_set_flag(sk, SOCK_DEAD);
5038 + udp_flush_pending_frames(sk);
5039 + unlock_sock_fast(sk, slow);
5040 + if (static_branch_unlikely(&udp_encap_needed_key)) {
5041 +@@ -2826,10 +2829,17 @@ int udp_abort(struct sock *sk, int err)
5042 + {
5043 + lock_sock(sk);
5044 +
5045 ++ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
5046 ++ * with close()
5047 ++ */
5048 ++ if (sock_flag(sk, SOCK_DEAD))
5049 ++ goto out;
5050 ++
5051 + sk->sk_err = err;
5052 + sk->sk_error_report(sk);
5053 + __udp_disconnect(sk, 0);
5054 +
5055 ++out:
5056 + release_sock(sk);
5057 +
5058 + return 0;
5059 +diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
5060 +index e204163c7036c..92f3235fa2874 100644
5061 +--- a/net/ipv6/netfilter/nft_fib_ipv6.c
5062 ++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
5063 +@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
5064 + }
5065 + EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
5066 +
5067 ++static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
5068 ++{
5069 ++ if (likely(next != IPPROTO_ICMPV6))
5070 ++ return false;
5071 ++
5072 ++ if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
5073 ++ return false;
5074 ++
5075 ++ return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
5076 ++}
5077 ++
5078 + void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
5079 + const struct nft_pktinfo *pkt)
5080 + {
5081 +@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
5082 +
5083 + lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
5084 +
5085 +- if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
5086 +- nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
5087 +- nft_fib_store_result(dest, priv, nft_in(pkt));
5088 +- return;
5089 ++ if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
5090 ++ nft_hook(pkt) == NF_INET_INGRESS) {
5091 ++ if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
5092 ++ nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
5093 ++ nft_fib_store_result(dest, priv, nft_in(pkt));
5094 ++ return;
5095 ++ }
5096 + }
5097 +
5098 + *dest = 0;
5099 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
5100 +index d25e5a9252fdb..29288f134d7ac 100644
5101 +--- a/net/ipv6/udp.c
5102 ++++ b/net/ipv6/udp.c
5103 +@@ -1597,6 +1597,9 @@ void udpv6_destroy_sock(struct sock *sk)
5104 + {
5105 + struct udp_sock *up = udp_sk(sk);
5106 + lock_sock(sk);
5107 ++
5108 ++ /* protects from races with udp_abort() */
5109 ++ sock_set_flag(sk, SOCK_DEAD);
5110 + udp_v6_flush_pending_frames(sk);
5111 + release_sock(sk);
5112 +
5113 +diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
5114 +index 5296898875ffb..223fbcafd6fce 100644
5115 +--- a/net/mac80211/debugfs.c
5116 ++++ b/net/mac80211/debugfs.c
5117 +@@ -4,7 +4,7 @@
5118 + *
5119 + * Copyright 2007 Johannes Berg <johannes@××××××××××××.net>
5120 + * Copyright 2013-2014 Intel Mobile Communications GmbH
5121 +- * Copyright (C) 2018 - 2019 Intel Corporation
5122 ++ * Copyright (C) 2018 - 2019, 2021 Intel Corporation
5123 + */
5124 +
5125 + #include <linux/debugfs.h>
5126 +@@ -387,10 +387,17 @@ static ssize_t reset_write(struct file *file, const char __user *user_buf,
5127 + size_t count, loff_t *ppos)
5128 + {
5129 + struct ieee80211_local *local = file->private_data;
5130 ++ int ret;
5131 +
5132 + rtnl_lock();
5133 ++ wiphy_lock(local->hw.wiphy);
5134 + __ieee80211_suspend(&local->hw, NULL);
5135 +- __ieee80211_resume(&local->hw);
5136 ++ ret = __ieee80211_resume(&local->hw);
5137 ++ wiphy_unlock(local->hw.wiphy);
5138 ++
5139 ++ if (ret)
5140 ++ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
5141 ++
5142 + rtnl_unlock();
5143 +
5144 + return count;
5145 +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
5146 +index 6f8885766cbaa..6ebfd484e61d2 100644
5147 +--- a/net/mac80211/iface.c
5148 ++++ b/net/mac80211/iface.c
5149 +@@ -475,14 +475,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
5150 + GFP_KERNEL);
5151 + }
5152 +
5153 +- /* APs need special treatment */
5154 + if (sdata->vif.type == NL80211_IFTYPE_AP) {
5155 +- struct ieee80211_sub_if_data *vlan, *tmpsdata;
5156 +-
5157 +- /* down all dependent devices, that is VLANs */
5158 +- list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
5159 +- u.vlan.list)
5160 +- dev_close(vlan->dev);
5161 + WARN_ON(!list_empty(&sdata->u.ap.vlans));
5162 + } else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
5163 + /* remove all packets in parent bc_buf pointing to this dev */
5164 +@@ -640,6 +633,15 @@ static int ieee80211_stop(struct net_device *dev)
5165 + {
5166 + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
5167 +
5168 ++ /* close all dependent VLAN interfaces before locking wiphy */
5169 ++ if (sdata->vif.type == NL80211_IFTYPE_AP) {
5170 ++ struct ieee80211_sub_if_data *vlan, *tmpsdata;
5171 ++
5172 ++ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
5173 ++ u.vlan.list)
5174 ++ dev_close(vlan->dev);
5175 ++ }
5176 ++
5177 + wiphy_lock(sdata->local->hw.wiphy);
5178 + ieee80211_do_stop(sdata, true);
5179 + wiphy_unlock(sdata->local->hw.wiphy);
5180 +@@ -1589,6 +1591,9 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
5181 +
5182 + switch (sdata->vif.type) {
5183 + case NL80211_IFTYPE_AP:
5184 ++ if (!list_empty(&sdata->u.ap.vlans))
5185 ++ return -EBUSY;
5186 ++ break;
5187 + case NL80211_IFTYPE_STATION:
5188 + case NL80211_IFTYPE_ADHOC:
5189 + case NL80211_IFTYPE_OCB:
5190 +diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5191 +index 0331f3a3c40e0..9dd741b68f268 100644
5192 +--- a/net/mac80211/main.c
5193 ++++ b/net/mac80211/main.c
5194 +@@ -252,6 +252,7 @@ static void ieee80211_restart_work(struct work_struct *work)
5195 + struct ieee80211_local *local =
5196 + container_of(work, struct ieee80211_local, restart_work);
5197 + struct ieee80211_sub_if_data *sdata;
5198 ++ int ret;
5199 +
5200 + /* wait for scan work complete */
5201 + flush_workqueue(local->workqueue);
5202 +@@ -294,8 +295,12 @@ static void ieee80211_restart_work(struct work_struct *work)
5203 + /* wait for all packet processing to be done */
5204 + synchronize_net();
5205 +
5206 +- ieee80211_reconfig(local);
5207 ++ ret = ieee80211_reconfig(local);
5208 + wiphy_unlock(local->hw.wiphy);
5209 ++
5210 ++ if (ret)
5211 ++ cfg80211_shutdown_all_interfaces(local->hw.wiphy);
5212 ++
5213 + rtnl_unlock();
5214 + }
5215 +
5216 +diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
5217 +index ecad9b10984ff..e627a11844a9b 100644
5218 +--- a/net/mac80211/rc80211_minstrel_ht.c
5219 ++++ b/net/mac80211/rc80211_minstrel_ht.c
5220 +@@ -1516,7 +1516,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
5221 + (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
5222 + return;
5223 +
5224 +- if (time_is_before_jiffies(mi->sample_time))
5225 ++ if (time_is_after_jiffies(mi->sample_time))
5226 + return;
5227 +
5228 + mi->sample_time = jiffies + MINSTREL_SAMPLE_INTERVAL;
5229 +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
5230 +index d4cc9ac2d7033..6b50cb5e0e3cc 100644
5231 +--- a/net/mac80211/scan.c
5232 ++++ b/net/mac80211/scan.c
5233 +@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
5234 + struct ieee80211_mgmt *mgmt = (void *)skb->data;
5235 + struct ieee80211_bss *bss;
5236 + struct ieee80211_channel *channel;
5237 ++ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
5238 ++ u.probe_resp.variable);
5239 ++
5240 ++ if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
5241 ++ !ieee80211_is_beacon(mgmt->frame_control) &&
5242 ++ !ieee80211_is_s1g_beacon(mgmt->frame_control))
5243 ++ return;
5244 +
5245 + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
5246 +- if (skb->len < 15)
5247 +- return;
5248 +- } else if (skb->len < 24 ||
5249 +- (!ieee80211_is_probe_resp(mgmt->frame_control) &&
5250 +- !ieee80211_is_beacon(mgmt->frame_control)))
5251 ++ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
5252 ++ min_hdr_len = offsetof(struct ieee80211_ext,
5253 ++ u.s1g_short_beacon.variable);
5254 ++ else
5255 ++ min_hdr_len = offsetof(struct ieee80211_ext,
5256 ++ u.s1g_beacon);
5257 ++ }
5258 ++
5259 ++ if (skb->len < min_hdr_len)
5260 + return;
5261 +
5262 + sdata1 = rcu_dereference(local->scan_sdata);
5263 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5264 +index 28422d6870967..d33dc4e023715 100644
5265 +--- a/net/mac80211/tx.c
5266 ++++ b/net/mac80211/tx.c
5267 +@@ -2002,6 +2002,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
5268 + ieee80211_tx(sdata, sta, skb, false);
5269 + }
5270 +
5271 ++static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
5272 ++{
5273 ++ struct ieee80211_radiotap_header *rthdr =
5274 ++ (struct ieee80211_radiotap_header *)skb->data;
5275 ++
5276 ++ /* check for not even having the fixed radiotap header part */
5277 ++ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
5278 ++ return false; /* too short to be possibly valid */
5279 ++
5280 ++ /* is it a header version we can trust to find length from? */
5281 ++ if (unlikely(rthdr->it_version))
5282 ++ return false; /* only version 0 is supported */
5283 ++
5284 ++ /* does the skb contain enough to deliver on the alleged length? */
5285 ++ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
5286 ++ return false; /* skb too short for claimed rt header extent */
5287 ++
5288 ++ return true;
5289 ++}
5290 ++
5291 + bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
5292 + struct net_device *dev)
5293 + {
5294 +@@ -2010,8 +2030,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
5295 + struct ieee80211_radiotap_header *rthdr =
5296 + (struct ieee80211_radiotap_header *) skb->data;
5297 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
5298 +- struct ieee80211_supported_band *sband =
5299 +- local->hw.wiphy->bands[info->band];
5300 + int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
5301 + NULL);
5302 + u16 txflags;
5303 +@@ -2024,17 +2042,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
5304 + u8 vht_mcs = 0, vht_nss = 0;
5305 + int i;
5306 +
5307 +- /* check for not even having the fixed radiotap header part */
5308 +- if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
5309 +- return false; /* too short to be possibly valid */
5310 +-
5311 +- /* is it a header version we can trust to find length from? */
5312 +- if (unlikely(rthdr->it_version))
5313 +- return false; /* only version 0 is supported */
5314 +-
5315 +- /* does the skb contain enough to deliver on the alleged length? */
5316 +- if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
5317 +- return false; /* skb too short for claimed rt header extent */
5318 ++ if (!ieee80211_validate_radiotap_len(skb))
5319 ++ return false;
5320 +
5321 + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
5322 + IEEE80211_TX_CTL_DONTFRAG;
5323 +@@ -2174,6 +2183,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
5324 + return false;
5325 +
5326 + if (rate_found) {
5327 ++ struct ieee80211_supported_band *sband =
5328 ++ local->hw.wiphy->bands[info->band];
5329 ++
5330 + info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
5331 +
5332 + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
5333 +@@ -2187,7 +2199,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
5334 + } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
5335 + ieee80211_rate_set_vht(info->control.rates, vht_mcs,
5336 + vht_nss);
5337 +- } else {
5338 ++ } else if (sband) {
5339 + for (i = 0; i < sband->n_bitrates; i++) {
5340 + if (rate * 5 != sband->bitrates[i].bitrate)
5341 + continue;
5342 +@@ -2224,8 +2236,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
5343 + info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
5344 + IEEE80211_TX_CTL_INJECTED;
5345 +
5346 +- /* Sanity-check and process the injection radiotap header */
5347 +- if (!ieee80211_parse_tx_radiotap(skb, dev))
5348 ++ /* Sanity-check the length of the radiotap header */
5349 ++ if (!ieee80211_validate_radiotap_len(skb))
5350 + goto fail;
5351 +
5352 + /* we now know there is a radiotap header with a length we can use */
5353 +@@ -2339,6 +2351,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
5354 + ieee80211_select_queue_80211(sdata, skb, hdr);
5355 + skb_set_queue_mapping(skb, ieee80211_ac_from_tid(skb->priority));
5356 +
5357 ++ /*
5358 ++ * Process the radiotap header. This will now take into account the
5359 ++ * selected chandef above to accurately set injection rates and
5360 ++ * retransmissions.
5361 ++ */
5362 ++ if (!ieee80211_parse_tx_radiotap(skb, dev))
5363 ++ goto fail_rcu;
5364 ++
5365 + /* remove the injection radiotap header */
5366 + skb_pull(skb, len_rthdr);
5367 +
5368 +diff --git a/net/mac80211/util.c b/net/mac80211/util.c
5369 +index c0fa526a45b4d..53755a05f73b5 100644
5370 +--- a/net/mac80211/util.c
5371 ++++ b/net/mac80211/util.c
5372 +@@ -2186,8 +2186,6 @@ static void ieee80211_handle_reconfig_failure(struct ieee80211_local *local)
5373 + list_for_each_entry(ctx, &local->chanctx_list, list)
5374 + ctx->driver_present = false;
5375 + mutex_unlock(&local->chanctx_mtx);
5376 +-
5377 +- cfg80211_shutdown_all_interfaces(local->hw.wiphy);
5378 + }
5379 +
5380 + static void ieee80211_assign_chanctx(struct ieee80211_local *local,
5381 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
5382 +index 8848a9e2a95b1..47d90cf31f125 100644
5383 +--- a/net/mptcp/options.c
5384 ++++ b/net/mptcp/options.c
5385 +@@ -337,6 +337,8 @@ void mptcp_get_options(const struct sk_buff *skb,
5386 + length--;
5387 + continue;
5388 + default:
5389 ++ if (length < 2)
5390 ++ return;
5391 + opsize = *ptr++;
5392 + if (opsize < 2) /* "silly options" */
5393 + return;
5394 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
5395 +index 225b988215171..d8187ac065397 100644
5396 +--- a/net/mptcp/protocol.c
5397 ++++ b/net/mptcp/protocol.c
5398 +@@ -287,11 +287,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
5399 +
5400 + /* try to fetch required memory from subflow */
5401 + if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
5402 +- if (ssk->sk_forward_alloc < skb->truesize)
5403 +- goto drop;
5404 +- __sk_mem_reclaim(ssk, skb->truesize);
5405 +- if (!sk_rmem_schedule(sk, skb, skb->truesize))
5406 ++ int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
5407 ++
5408 ++ if (ssk->sk_forward_alloc < amount)
5409 + goto drop;
5410 ++
5411 ++ ssk->sk_forward_alloc -= amount;
5412 ++ sk->sk_forward_alloc += amount;
5413 + }
5414 +
5415 + /* the skb map_seq accounts for the skb offset:
5416 +@@ -687,18 +689,22 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
5417 + /* In most cases we will be able to lock the mptcp socket. If its already
5418 + * owned, we need to defer to the work queue to avoid ABBA deadlock.
5419 + */
5420 +-static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
5421 ++static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
5422 + {
5423 + struct sock *sk = (struct sock *)msk;
5424 + unsigned int moved = 0;
5425 +
5426 + if (inet_sk_state_load(sk) == TCP_CLOSE)
5427 +- return;
5428 +-
5429 +- mptcp_data_lock(sk);
5430 ++ return false;
5431 +
5432 + __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
5433 + __mptcp_ofo_queue(msk);
5434 ++ if (unlikely(ssk->sk_err)) {
5435 ++ if (!sock_owned_by_user(sk))
5436 ++ __mptcp_error_report(sk);
5437 ++ else
5438 ++ set_bit(MPTCP_ERROR_REPORT, &msk->flags);
5439 ++ }
5440 +
5441 + /* If the moves have caught up with the DATA_FIN sequence number
5442 + * it's time to ack the DATA_FIN and change socket state, but
5443 +@@ -707,7 +713,7 @@ static void move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
5444 + */
5445 + if (mptcp_pending_data_fin(sk, NULL))
5446 + mptcp_schedule_work(sk);
5447 +- mptcp_data_unlock(sk);
5448 ++ return moved > 0;
5449 + }
5450 +
5451 + void mptcp_data_ready(struct sock *sk, struct sock *ssk)
5452 +@@ -715,7 +721,6 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
5453 + struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
5454 + struct mptcp_sock *msk = mptcp_sk(sk);
5455 + int sk_rbuf, ssk_rbuf;
5456 +- bool wake;
5457 +
5458 + /* The peer can send data while we are shutting down this
5459 + * subflow at msk destruction time, but we must avoid enqueuing
5460 +@@ -724,28 +729,22 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
5461 + if (unlikely(subflow->disposable))
5462 + return;
5463 +
5464 +- /* move_skbs_to_msk below can legitly clear the data_avail flag,
5465 +- * but we will need later to properly woke the reader, cache its
5466 +- * value
5467 +- */
5468 +- wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
5469 +- if (wake)
5470 +- set_bit(MPTCP_DATA_READY, &msk->flags);
5471 +-
5472 + ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
5473 + sk_rbuf = READ_ONCE(sk->sk_rcvbuf);
5474 + if (unlikely(ssk_rbuf > sk_rbuf))
5475 + sk_rbuf = ssk_rbuf;
5476 +
5477 +- /* over limit? can't append more skbs to msk */
5478 ++ /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
5479 + if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf)
5480 +- goto wake;
5481 +-
5482 +- move_skbs_to_msk(msk, ssk);
5483 ++ return;
5484 +
5485 +-wake:
5486 +- if (wake)
5487 ++ /* Wake-up the reader only for in-sequence data */
5488 ++ mptcp_data_lock(sk);
5489 ++ if (move_skbs_to_msk(msk, ssk)) {
5490 ++ set_bit(MPTCP_DATA_READY, &msk->flags);
5491 + sk->sk_data_ready(sk);
5492 ++ }
5493 ++ mptcp_data_unlock(sk);
5494 + }
5495 +
5496 + void __mptcp_flush_join_list(struct mptcp_sock *msk)
5497 +@@ -848,7 +847,7 @@ static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
5498 + sock_owned_by_me(sk);
5499 +
5500 + mptcp_for_each_subflow(msk, subflow) {
5501 +- if (subflow->data_avail)
5502 ++ if (READ_ONCE(subflow->data_avail))
5503 + return mptcp_subflow_tcp_sock(subflow);
5504 + }
5505 +
5506 +@@ -1939,6 +1938,9 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
5507 + done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
5508 + mptcp_data_unlock(sk);
5509 + tcp_cleanup_rbuf(ssk, moved);
5510 ++
5511 ++ if (unlikely(ssk->sk_err))
5512 ++ __mptcp_error_report(sk);
5513 + unlock_sock_fast(ssk, slowpath);
5514 + } while (!done);
5515 +
5516 +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
5517 +index e21a5bc36cf08..14e89e4bd4a80 100644
5518 +--- a/net/mptcp/protocol.h
5519 ++++ b/net/mptcp/protocol.h
5520 +@@ -372,7 +372,6 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
5521 + enum mptcp_data_avail {
5522 + MPTCP_SUBFLOW_NODATA,
5523 + MPTCP_SUBFLOW_DATA_AVAIL,
5524 +- MPTCP_SUBFLOW_OOO_DATA
5525 + };
5526 +
5527 + struct mptcp_delegated_action {
5528 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
5529 +index 8425cd393bf3e..d6d8ad4f918e7 100644
5530 +--- a/net/mptcp/subflow.c
5531 ++++ b/net/mptcp/subflow.c
5532 +@@ -754,10 +754,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
5533 + return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
5534 + }
5535 +
5536 +-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
5537 ++static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
5538 + {
5539 +- WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
5540 +- ssn, subflow->map_subflow_seq, subflow->map_data_len);
5541 ++ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
5542 ++ ssn, subflow->map_subflow_seq, subflow->map_data_len);
5543 + }
5544 +
5545 + static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
5546 +@@ -782,13 +782,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
5547 + /* Mapping covers data later in the subflow stream,
5548 + * currently unsupported.
5549 + */
5550 +- warn_bad_map(subflow, ssn);
5551 ++ dbg_bad_map(subflow, ssn);
5552 + return false;
5553 + }
5554 + if (unlikely(!before(ssn, subflow->map_subflow_seq +
5555 + subflow->map_data_len))) {
5556 + /* Mapping does covers past subflow data, invalid */
5557 +- warn_bad_map(subflow, ssn + skb->len);
5558 ++ dbg_bad_map(subflow, ssn);
5559 + return false;
5560 + }
5561 + return true;
5562 +@@ -974,7 +974,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
5563 + pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
5564 + subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
5565 + if (!skb_peek(&ssk->sk_receive_queue))
5566 +- subflow->data_avail = 0;
5567 ++ WRITE_ONCE(subflow->data_avail, 0);
5568 + if (subflow->data_avail)
5569 + return true;
5570 +
5571 +@@ -1012,18 +1012,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
5572 + ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
5573 + pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
5574 + ack_seq);
5575 +- if (ack_seq == old_ack) {
5576 +- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
5577 +- break;
5578 +- } else if (after64(ack_seq, old_ack)) {
5579 +- subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
5580 +- break;
5581 ++ if (unlikely(before64(ack_seq, old_ack))) {
5582 ++ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
5583 ++ continue;
5584 + }
5585 +
5586 +- /* only accept in-sequence mapping. Old values are spurious
5587 +- * retransmission
5588 +- */
5589 +- mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
5590 ++ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
5591 ++ break;
5592 + }
5593 + return true;
5594 +
5595 +@@ -1038,10 +1033,9 @@ fallback:
5596 + * subflow_error_report() will introduce the appropriate barriers
5597 + */
5598 + ssk->sk_err = EBADMSG;
5599 +- ssk->sk_error_report(ssk);
5600 + tcp_set_state(ssk, TCP_CLOSE);
5601 + tcp_send_active_reset(ssk, GFP_ATOMIC);
5602 +- subflow->data_avail = 0;
5603 ++ WRITE_ONCE(subflow->data_avail, 0);
5604 + return false;
5605 + }
5606 +
5607 +@@ -1051,7 +1045,7 @@ fallback:
5608 + subflow->map_seq = READ_ONCE(msk->ack_seq);
5609 + subflow->map_data_len = skb->len;
5610 + subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
5611 +- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
5612 ++ WRITE_ONCE(subflow->data_avail, MPTCP_SUBFLOW_DATA_AVAIL);
5613 + return true;
5614 + }
5615 +
5616 +@@ -1063,7 +1057,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
5617 + if (subflow->map_valid &&
5618 + mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len) {
5619 + subflow->map_valid = 0;
5620 +- subflow->data_avail = 0;
5621 ++ WRITE_ONCE(subflow->data_avail, 0);
5622 +
5623 + pr_debug("Done with mapping: seq=%u data_len=%u",
5624 + subflow->map_subflow_seq,
5625 +@@ -1091,41 +1085,6 @@ void mptcp_space(const struct sock *ssk, int *space, int *full_space)
5626 + *full_space = tcp_full_space(sk);
5627 + }
5628 +
5629 +-static void subflow_data_ready(struct sock *sk)
5630 +-{
5631 +- struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
5632 +- u16 state = 1 << inet_sk_state_load(sk);
5633 +- struct sock *parent = subflow->conn;
5634 +- struct mptcp_sock *msk;
5635 +-
5636 +- msk = mptcp_sk(parent);
5637 +- if (state & TCPF_LISTEN) {
5638 +- /* MPJ subflow are removed from accept queue before reaching here,
5639 +- * avoid stray wakeups
5640 +- */
5641 +- if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
5642 +- return;
5643 +-
5644 +- set_bit(MPTCP_DATA_READY, &msk->flags);
5645 +- parent->sk_data_ready(parent);
5646 +- return;
5647 +- }
5648 +-
5649 +- WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
5650 +- !subflow->mp_join && !(state & TCPF_CLOSE));
5651 +-
5652 +- if (mptcp_subflow_data_available(sk))
5653 +- mptcp_data_ready(parent, sk);
5654 +-}
5655 +-
5656 +-static void subflow_write_space(struct sock *ssk)
5657 +-{
5658 +- struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
5659 +-
5660 +- mptcp_propagate_sndbuf(sk, ssk);
5661 +- mptcp_write_space(sk);
5662 +-}
5663 +-
5664 + void __mptcp_error_report(struct sock *sk)
5665 + {
5666 + struct mptcp_subflow_context *subflow;
5667 +@@ -1166,6 +1125,43 @@ static void subflow_error_report(struct sock *ssk)
5668 + mptcp_data_unlock(sk);
5669 + }
5670 +
5671 ++static void subflow_data_ready(struct sock *sk)
5672 ++{
5673 ++ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
5674 ++ u16 state = 1 << inet_sk_state_load(sk);
5675 ++ struct sock *parent = subflow->conn;
5676 ++ struct mptcp_sock *msk;
5677 ++
5678 ++ msk = mptcp_sk(parent);
5679 ++ if (state & TCPF_LISTEN) {
5680 ++ /* MPJ subflow are removed from accept queue before reaching here,
5681 ++ * avoid stray wakeups
5682 ++ */
5683 ++ if (reqsk_queue_empty(&inet_csk(sk)->icsk_accept_queue))
5684 ++ return;
5685 ++
5686 ++ set_bit(MPTCP_DATA_READY, &msk->flags);
5687 ++ parent->sk_data_ready(parent);
5688 ++ return;
5689 ++ }
5690 ++
5691 ++ WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
5692 ++ !subflow->mp_join && !(state & TCPF_CLOSE));
5693 ++
5694 ++ if (mptcp_subflow_data_available(sk))
5695 ++ mptcp_data_ready(parent, sk);
5696 ++ else if (unlikely(sk->sk_err))
5697 ++ subflow_error_report(sk);
5698 ++}
5699 ++
5700 ++static void subflow_write_space(struct sock *ssk)
5701 ++{
5702 ++ struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
5703 ++
5704 ++ mptcp_propagate_sndbuf(sk, ssk);
5705 ++ mptcp_write_space(sk);
5706 ++}
5707 ++
5708 + static struct inet_connection_sock_af_ops *
5709 + subflow_default_af_ops(struct sock *sk)
5710 + {
5711 +@@ -1474,6 +1470,8 @@ static void subflow_state_change(struct sock *sk)
5712 + */
5713 + if (mptcp_subflow_data_available(sk))
5714 + mptcp_data_ready(parent, sk);
5715 ++ else if (unlikely(sk->sk_err))
5716 ++ subflow_error_report(sk);
5717 +
5718 + subflow_sched_work_if_closed(mptcp_sk(parent), sk);
5719 +
5720 +diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
5721 +index b100c04a0e435..3d6d49420db8b 100644
5722 +--- a/net/netfilter/nf_synproxy_core.c
5723 ++++ b/net/netfilter/nf_synproxy_core.c
5724 +@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
5725 + int length = (th->doff * 4) - sizeof(*th);
5726 + u8 buf[40], *ptr;
5727 +
5728 ++ if (unlikely(length < 0))
5729 ++ return false;
5730 ++
5731 + ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
5732 + if (ptr == NULL)
5733 + return false;
5734 +@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
5735 + length--;
5736 + continue;
5737 + default:
5738 ++ if (length < 2)
5739 ++ return true;
5740 + opsize = *ptr++;
5741 + if (opsize < 2)
5742 + return true;
5743 +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
5744 +index 31016c144c48b..9d5ea23529657 100644
5745 +--- a/net/netfilter/nf_tables_api.c
5746 ++++ b/net/netfilter/nf_tables_api.c
5747 +@@ -4317,13 +4317,44 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
5748 + err = nf_tables_set_alloc_name(&ctx, set, name);
5749 + kfree(name);
5750 + if (err < 0)
5751 +- goto err_set_alloc_name;
5752 ++ goto err_set_name;
5753 ++
5754 ++ udata = NULL;
5755 ++ if (udlen) {
5756 ++ udata = set->data + size;
5757 ++ nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
5758 ++ }
5759 ++
5760 ++ INIT_LIST_HEAD(&set->bindings);
5761 ++ set->table = table;
5762 ++ write_pnet(&set->net, net);
5763 ++ set->ops = ops;
5764 ++ set->ktype = ktype;
5765 ++ set->klen = desc.klen;
5766 ++ set->dtype = dtype;
5767 ++ set->objtype = objtype;
5768 ++ set->dlen = desc.dlen;
5769 ++ set->flags = flags;
5770 ++ set->size = desc.size;
5771 ++ set->policy = policy;
5772 ++ set->udlen = udlen;
5773 ++ set->udata = udata;
5774 ++ set->timeout = timeout;
5775 ++ set->gc_int = gc_int;
5776 ++
5777 ++ set->field_count = desc.field_count;
5778 ++ for (i = 0; i < desc.field_count; i++)
5779 ++ set->field_len[i] = desc.field_len[i];
5780 ++
5781 ++ err = ops->init(set, &desc, nla);
5782 ++ if (err < 0)
5783 ++ goto err_set_init;
5784 +
5785 + if (nla[NFTA_SET_EXPR]) {
5786 + expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
5787 + if (IS_ERR(expr)) {
5788 + err = PTR_ERR(expr);
5789 +- goto err_set_alloc_name;
5790 ++ goto err_set_expr_alloc;
5791 + }
5792 + set->exprs[0] = expr;
5793 + set->num_exprs++;
5794 +@@ -4334,74 +4365,44 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
5795 +
5796 + if (!(flags & NFT_SET_EXPR)) {
5797 + err = -EINVAL;
5798 +- goto err_set_alloc_name;
5799 ++ goto err_set_expr_alloc;
5800 + }
5801 + i = 0;
5802 + nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
5803 + if (i == NFT_SET_EXPR_MAX) {
5804 + err = -E2BIG;
5805 +- goto err_set_init;
5806 ++ goto err_set_expr_alloc;
5807 + }
5808 + if (nla_type(tmp) != NFTA_LIST_ELEM) {
5809 + err = -EINVAL;
5810 +- goto err_set_init;
5811 ++ goto err_set_expr_alloc;
5812 + }
5813 + expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
5814 + if (IS_ERR(expr)) {
5815 + err = PTR_ERR(expr);
5816 +- goto err_set_init;
5817 ++ goto err_set_expr_alloc;
5818 + }
5819 + set->exprs[i++] = expr;
5820 + set->num_exprs++;
5821 + }
5822 + }
5823 +
5824 +- udata = NULL;
5825 +- if (udlen) {
5826 +- udata = set->data + size;
5827 +- nla_memcpy(udata, nla[NFTA_SET_USERDATA], udlen);
5828 +- }
5829 +-
5830 +- INIT_LIST_HEAD(&set->bindings);
5831 +- set->table = table;
5832 +- write_pnet(&set->net, net);
5833 +- set->ops = ops;
5834 +- set->ktype = ktype;
5835 +- set->klen = desc.klen;
5836 +- set->dtype = dtype;
5837 +- set->objtype = objtype;
5838 +- set->dlen = desc.dlen;
5839 +- set->flags = flags;
5840 +- set->size = desc.size;
5841 +- set->policy = policy;
5842 +- set->udlen = udlen;
5843 +- set->udata = udata;
5844 +- set->timeout = timeout;
5845 +- set->gc_int = gc_int;
5846 + set->handle = nf_tables_alloc_handle(table);
5847 +
5848 +- set->field_count = desc.field_count;
5849 +- for (i = 0; i < desc.field_count; i++)
5850 +- set->field_len[i] = desc.field_len[i];
5851 +-
5852 +- err = ops->init(set, &desc, nla);
5853 +- if (err < 0)
5854 +- goto err_set_init;
5855 +-
5856 + err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
5857 + if (err < 0)
5858 +- goto err_set_trans;
5859 ++ goto err_set_expr_alloc;
5860 +
5861 + list_add_tail_rcu(&set->list, &table->sets);
5862 + table->use++;
5863 + return 0;
5864 +
5865 +-err_set_trans:
5866 +- ops->destroy(set);
5867 +-err_set_init:
5868 ++err_set_expr_alloc:
5869 + for (i = 0; i < set->num_exprs; i++)
5870 + nft_expr_destroy(&ctx, set->exprs[i]);
5871 +-err_set_alloc_name:
5872 ++
5873 ++ ops->destroy(set);
5874 ++err_set_init:
5875 + kfree(set->name);
5876 + err_set_name:
5877 + kvfree(set);
5878 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
5879 +index 1e4fb568fa841..24f10bf7d8a3f 100644
5880 +--- a/net/qrtr/qrtr.c
5881 ++++ b/net/qrtr/qrtr.c
5882 +@@ -435,7 +435,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
5883 + struct qrtr_sock *ipc;
5884 + struct sk_buff *skb;
5885 + struct qrtr_cb *cb;
5886 +- unsigned int size;
5887 ++ size_t size;
5888 + unsigned int ver;
5889 + size_t hdrlen;
5890 +
5891 +diff --git a/net/rds/recv.c b/net/rds/recv.c
5892 +index aba4afe4dfedc..967d115f97efd 100644
5893 +--- a/net/rds/recv.c
5894 ++++ b/net/rds/recv.c
5895 +@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
5896 +
5897 + if (rds_cmsg_recv(inc, msg, rs)) {
5898 + ret = -EFAULT;
5899 +- goto out;
5900 ++ break;
5901 + }
5902 + rds_recvmsg_zcookie(rs, msg);
5903 +
5904 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
5905 +index ba7f57cb41c30..143786d8cde03 100644
5906 +--- a/net/sched/act_ct.c
5907 ++++ b/net/sched/act_ct.c
5908 +@@ -904,14 +904,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
5909 + }
5910 +
5911 + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
5912 +- if (err == NF_ACCEPT &&
5913 +- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
5914 +- if (maniptype == NF_NAT_MANIP_SRC)
5915 +- maniptype = NF_NAT_MANIP_DST;
5916 +- else
5917 +- maniptype = NF_NAT_MANIP_SRC;
5918 +-
5919 +- err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
5920 ++ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
5921 ++ if (ct->status & IPS_SRC_NAT) {
5922 ++ if (maniptype == NF_NAT_MANIP_SRC)
5923 ++ maniptype = NF_NAT_MANIP_DST;
5924 ++ else
5925 ++ maniptype = NF_NAT_MANIP_SRC;
5926 ++
5927 ++ err = ct_nat_execute(skb, ct, ctinfo, range,
5928 ++ maniptype);
5929 ++ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
5930 ++ err = ct_nat_execute(skb, ct, ctinfo, NULL,
5931 ++ NF_NAT_MANIP_SRC);
5932 ++ }
5933 + }
5934 + return err;
5935 + #else
5936 +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
5937 +index 7d37638ee1c7a..5c15968b5155b 100644
5938 +--- a/net/sched/sch_cake.c
5939 ++++ b/net/sched/sch_cake.c
5940 +@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
5941 + }
5942 +
5943 + tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
5944 +- if (!tcph)
5945 ++ if (!tcph || tcph->doff < 5)
5946 + return NULL;
5947 +
5948 + return skb_header_pointer(skb, offset,
5949 +@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
5950 + length--;
5951 + continue;
5952 + }
5953 ++ if (length < 2)
5954 ++ break;
5955 + opsize = *ptr++;
5956 + if (opsize < 2 || opsize > length)
5957 + break;
5958 +@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
5959 + length--;
5960 + continue;
5961 + }
5962 ++ if (length < 2)
5963 ++ break;
5964 + opsize = *ptr++;
5965 + if (opsize < 2 || opsize > length)
5966 + break;
5967 +diff --git a/net/socket.c b/net/socket.c
5968 +index 84a8049c2b099..03259cb919f7e 100644
5969 +--- a/net/socket.c
5970 ++++ b/net/socket.c
5971 +@@ -1072,19 +1072,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
5972 + * what to do with it - that's up to the protocol still.
5973 + */
5974 +
5975 +-/**
5976 +- * get_net_ns - increment the refcount of the network namespace
5977 +- * @ns: common namespace (net)
5978 +- *
5979 +- * Returns the net's common namespace.
5980 +- */
5981 +-
5982 +-struct ns_common *get_net_ns(struct ns_common *ns)
5983 +-{
5984 +- return &get_net(container_of(ns, struct net, ns))->ns;
5985 +-}
5986 +-EXPORT_SYMBOL_GPL(get_net_ns);
5987 +-
5988 + static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
5989 + {
5990 + struct socket *sock;
5991 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5992 +index 5a31307ceb76d..5d1192ceb1397 100644
5993 +--- a/net/unix/af_unix.c
5994 ++++ b/net/unix/af_unix.c
5995 +@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
5996 + u->path.mnt = NULL;
5997 + state = sk->sk_state;
5998 + sk->sk_state = TCP_CLOSE;
5999 ++
6000 ++ skpair = unix_peer(sk);
6001 ++ unix_peer(sk) = NULL;
6002 ++
6003 + unix_state_unlock(sk);
6004 +
6005 + wake_up_interruptible_all(&u->peer_wait);
6006 +
6007 +- skpair = unix_peer(sk);
6008 +-
6009 + if (skpair != NULL) {
6010 + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
6011 + unix_state_lock(skpair);
6012 +@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
6013 +
6014 + unix_dgram_peer_wake_disconnect(sk, skpair);
6015 + sock_put(skpair); /* It may now die */
6016 +- unix_peer(sk) = NULL;
6017 + }
6018 +
6019 + /* Try to flush out this socket. Throw out buffers at least */
6020 +diff --git a/net/wireless/Makefile b/net/wireless/Makefile
6021 +index 2eee93985ab0d..af590ae606b69 100644
6022 +--- a/net/wireless/Makefile
6023 ++++ b/net/wireless/Makefile
6024 +@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
6025 + @$(kecho) " GEN $@"
6026 + @(echo '#include "reg.h"'; \
6027 + echo 'const u8 shipped_regdb_certs[] = {'; \
6028 +- cat $^ ; \
6029 ++ echo | cat - $^ ; \
6030 + echo '};'; \
6031 + echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
6032 + ) > $@
6033 +diff --git a/net/wireless/core.c b/net/wireless/core.c
6034 +index 589ee5a69a2e5..0e364f32794d3 100644
6035 +--- a/net/wireless/core.c
6036 ++++ b/net/wireless/core.c
6037 +@@ -1339,6 +1339,11 @@ void cfg80211_register_wdev(struct cfg80211_registered_device *rdev,
6038 + rdev->devlist_generation++;
6039 + wdev->registered = true;
6040 +
6041 ++ if (wdev->netdev &&
6042 ++ sysfs_create_link(&wdev->netdev->dev.kobj, &rdev->wiphy.dev.kobj,
6043 ++ "phy80211"))
6044 ++ pr_err("failed to add phy80211 symlink to netdev!\n");
6045 ++
6046 + nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
6047 + }
6048 +
6049 +@@ -1364,14 +1369,6 @@ int cfg80211_register_netdevice(struct net_device *dev)
6050 + if (ret)
6051 + goto out;
6052 +
6053 +- if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj,
6054 +- "phy80211")) {
6055 +- pr_err("failed to add phy80211 symlink to netdev!\n");
6056 +- unregister_netdevice(dev);
6057 +- ret = -EINVAL;
6058 +- goto out;
6059 +- }
6060 +-
6061 + cfg80211_register_wdev(rdev, wdev);
6062 + ret = 0;
6063 + out:
6064 +diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
6065 +index a95c79d183492..a817d8e3e4b36 100644
6066 +--- a/net/wireless/pmsr.c
6067 ++++ b/net/wireless/pmsr.c
6068 +@@ -324,6 +324,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
6069 + gfp_t gfp)
6070 + {
6071 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
6072 ++ struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
6073 + struct sk_buff *msg;
6074 + void *hdr;
6075 +
6076 +@@ -354,9 +355,20 @@ free_msg:
6077 + nlmsg_free(msg);
6078 + free_request:
6079 + spin_lock_bh(&wdev->pmsr_lock);
6080 +- list_del(&req->list);
6081 ++ /*
6082 ++ * cfg80211_pmsr_process_abort() may have already moved this request
6083 ++ * to the free list, and will free it later. In this case, don't free
6084 ++ * it here.
6085 ++ */
6086 ++ list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
6087 ++ if (tmp == req) {
6088 ++ list_del(&req->list);
6089 ++ to_free = req;
6090 ++ break;
6091 ++ }
6092 ++ }
6093 + spin_unlock_bh(&wdev->pmsr_lock);
6094 +- kfree(req);
6095 ++ kfree(to_free);
6096 + }
6097 + EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
6098 +
6099 +diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
6100 +index 9b959e3b09c6d..0c3f05c9be27a 100644
6101 +--- a/net/wireless/sysfs.c
6102 ++++ b/net/wireless/sysfs.c
6103 +@@ -133,6 +133,10 @@ static int wiphy_resume(struct device *dev)
6104 + if (rdev->wiphy.registered && rdev->ops->resume)
6105 + ret = rdev_resume(rdev);
6106 + wiphy_unlock(&rdev->wiphy);
6107 ++
6108 ++ if (ret)
6109 ++ cfg80211_shutdown_all_interfaces(&rdev->wiphy);
6110 ++
6111 + rtnl_unlock();
6112 +
6113 + return ret;
6114 +diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
6115 +index 91a4ef7f620ca..a9b079d56fd69 100644
6116 +--- a/sound/soc/codecs/rt5659.c
6117 ++++ b/sound/soc/codecs/rt5659.c
6118 +@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
6119 + return 0;
6120 + }
6121 +
6122 +-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
6123 ++static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
6124 + SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
6125 + NULL, 0),
6126 +- SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
6127 +- NULL, 0),
6128 ++ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
6129 ++ 0, NULL, 0),
6130 + SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
6131 + RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
6132 ++};
6133 ++
6134 ++static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
6135 ++ SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
6136 ++ NULL, 0),
6137 + SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
6138 + RT5659_PWR_VREF3_BIT, 0, NULL, 0),
6139 +
6140 +@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
6141 + RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
6142 +
6143 + /* Input Side */
6144 +- SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
6145 +- 0, NULL, 0),
6146 + SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
6147 + 0, NULL, 0),
6148 + SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
6149 +@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
6150 +
6151 + static int rt5659_probe(struct snd_soc_component *component)
6152 + {
6153 ++ struct snd_soc_dapm_context *dapm =
6154 ++ snd_soc_component_get_dapm(component);
6155 + struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
6156 +
6157 + rt5659->component = component;
6158 +
6159 ++ switch (rt5659->pdata.jd_src) {
6160 ++ case RT5659_JD_HDA_HEADER:
6161 ++ break;
6162 ++
6163 ++ default:
6164 ++ snd_soc_dapm_new_controls(dapm,
6165 ++ rt5659_particular_dapm_widgets,
6166 ++ ARRAY_SIZE(rt5659_particular_dapm_widgets));
6167 ++ break;
6168 ++ }
6169 ++
6170 + return 0;
6171 + }
6172 +
6173 +diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
6174 +index b49f1e16125d4..d1dd7f720ba48 100644
6175 +--- a/sound/soc/codecs/rt5682-sdw.c
6176 ++++ b/sound/soc/codecs/rt5682-sdw.c
6177 +@@ -462,7 +462,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
6178 +
6179 + regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
6180 + RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
6181 +- regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
6182 ++ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
6183 ++ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
6184 + regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
6185 + RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
6186 + regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
6187 +diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
6188 +index 81866aeb3fbfa..55b2a1f52ca37 100644
6189 +--- a/sound/soc/codecs/tas2562.h
6190 ++++ b/sound/soc/codecs/tas2562.h
6191 +@@ -57,13 +57,13 @@
6192 + #define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
6193 + #define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
6194 + #define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
6195 +-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
6196 +-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
6197 +-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
6198 +-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
6199 +-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
6200 +-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
6201 +-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
6202 ++#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ (0x0 << 1)
6203 ++#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ (0x1 << 1)
6204 ++#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ (0x2 << 1)
6205 ++#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ (0x3 << 1)
6206 ++#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ (0x4 << 1)
6207 ++#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ (0x5 << 1)
6208 ++#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
6209 +
6210 + #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
6211 +
6212 +diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
6213 +index f62f81ceab0d2..9dcbe5d5a428c 100644
6214 +--- a/sound/soc/fsl/fsl-asoc-card.c
6215 ++++ b/sound/soc/fsl/fsl-asoc-card.c
6216 +@@ -732,6 +732,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
6217 + /* Initialize sound card */
6218 + priv->pdev = pdev;
6219 + priv->card.dev = &pdev->dev;
6220 ++ priv->card.owner = THIS_MODULE;
6221 + ret = snd_soc_of_parse_card_name(&priv->card, "model");
6222 + if (ret) {
6223 + snprintf(priv->name, sizeof(priv->name), "%s-audio",
6224 +diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
6225 +index 936384a94f25e..74d3d8c586080 100644
6226 +--- a/sound/soc/qcom/lpass-cpu.c
6227 ++++ b/sound/soc/qcom/lpass-cpu.c
6228 +@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
6229 + struct snd_soc_dai *dai)
6230 + {
6231 + struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
6232 ++ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
6233 ++ unsigned int id = dai->driver->id;
6234 +
6235 + clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
6236 ++ /*
6237 ++ * Ensure LRCLK is disabled even in device node validation.
6238 ++ * Will not impact if disabled in lpass_cpu_daiops_trigger()
6239 ++ * suspend.
6240 ++ */
6241 ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
6242 ++ regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
6243 ++ else
6244 ++ regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
6245 ++
6246 ++ /*
6247 ++ * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
6248 ++ * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
6249 ++ * lpass_cpu_daiops_prepare.
6250 ++ */
6251 ++ if (drvdata->mi2s_was_prepared[dai->driver->id]) {
6252 ++ drvdata->mi2s_was_prepared[dai->driver->id] = false;
6253 ++ clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
6254 ++ }
6255 ++
6256 + clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
6257 + }
6258 +
6259 +@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
6260 + case SNDRV_PCM_TRIGGER_START:
6261 + case SNDRV_PCM_TRIGGER_RESUME:
6262 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
6263 ++ /*
6264 ++ * Ensure lpass BCLK/LRCLK is enabled during
6265 ++ * device resume as lpass_cpu_daiops_prepare() is not called
6266 ++ * after the device resumes. We don't check mi2s_was_prepared before
6267 ++ * enable/disable BCLK in trigger events because:
6268 ++ * 1. These trigger events are paired, so the BCLK
6269 ++ * enable_count is balanced.
6270 ++ * 2. the BCLK can be shared (ex: headset and headset mic),
6271 ++ * we need to increase the enable_count so that we don't
6272 ++ * turn off the shared BCLK while other devices are using
6273 ++ * it.
6274 ++ */
6275 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
6276 + ret = regmap_fields_write(i2sctl->spken, id,
6277 + LPAIF_I2SCTL_SPKEN_ENABLE);
6278 +@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
6279 + case SNDRV_PCM_TRIGGER_STOP:
6280 + case SNDRV_PCM_TRIGGER_SUSPEND:
6281 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
6282 ++ /*
6283 ++ * To ensure lpass BCLK/LRCLK is disabled during
6284 ++ * device suspend.
6285 ++ */
6286 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
6287 + ret = regmap_fields_write(i2sctl->spken, id,
6288 + LPAIF_I2SCTL_SPKEN_DISABLE);
6289 +@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
6290 + return ret;
6291 + }
6292 +
6293 ++static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
6294 ++ struct snd_soc_dai *dai)
6295 ++{
6296 ++ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
6297 ++ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
6298 ++ unsigned int id = dai->driver->id;
6299 ++ int ret;
6300 ++
6301 ++ /*
6302 ++ * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
6303 ++ * data flow starts. This allows other codec to have some delay before
6304 ++ * the data flow.
6305 ++ * (ex: to drop start up pop noise before capture starts).
6306 ++ */
6307 ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
6308 ++ ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
6309 ++ else
6310 ++ ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
6311 ++
6312 ++ if (ret) {
6313 ++ dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
6314 ++ return ret;
6315 ++ }
6316 ++
6317 ++ /*
6318 ++ * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
6319 ++ * be called multiple times. It's paired with the clk_disable in
6320 ++ * lpass_cpu_daiops_shutdown.
6321 ++ */
6322 ++ if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
6323 ++ ret = clk_enable(drvdata->mi2s_bit_clk[id]);
6324 ++ if (ret) {
6325 ++ dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
6326 ++ return ret;
6327 ++ }
6328 ++ drvdata->mi2s_was_prepared[dai->driver->id] = true;
6329 ++ }
6330 ++ return 0;
6331 ++}
6332 ++
6333 + const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
6334 + .set_sysclk = lpass_cpu_daiops_set_sysclk,
6335 + .startup = lpass_cpu_daiops_startup,
6336 + .shutdown = lpass_cpu_daiops_shutdown,
6337 + .hw_params = lpass_cpu_daiops_hw_params,
6338 + .trigger = lpass_cpu_daiops_trigger,
6339 ++ .prepare = lpass_cpu_daiops_prepare,
6340 + };
6341 + EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
6342 +
6343 +diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
6344 +index 83b2e08ade060..7f72214404baf 100644
6345 +--- a/sound/soc/qcom/lpass.h
6346 ++++ b/sound/soc/qcom/lpass.h
6347 +@@ -67,6 +67,10 @@ struct lpass_data {
6348 + /* MI2S SD lines to use for playback/capture */
6349 + unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
6350 + unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
6351 ++
6352 ++ /* The state of MI2S prepare dai_ops was called */
6353 ++ bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
6354 ++
6355 + int hdmi_port_enable;
6356 +
6357 + /* low-power audio interface (LPAIF) registers */
6358 +diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
6359 +index 7d6687618d808..d1b327036ae43 100644
6360 +--- a/tools/include/uapi/linux/in.h
6361 ++++ b/tools/include/uapi/linux/in.h
6362 +@@ -289,6 +289,9 @@ struct sockaddr_in {
6363 + /* Address indicating an error return. */
6364 + #define INADDR_NONE ((unsigned long int) 0xffffffff)
6365 +
6366 ++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
6367 ++#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
6368 ++
6369 + /* Network number for local host loopback. */
6370 + #define IN_LOOPBACKNET 127
6371 +
6372 +diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
6373 +index 007fe5d594386..fe2bec500bf68 100644
6374 +--- a/tools/lib/bpf/xsk.c
6375 ++++ b/tools/lib/bpf/xsk.c
6376 +@@ -928,7 +928,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
6377 + goto out_put_ctx;
6378 + }
6379 + if (xsk->fd == umem->fd)
6380 +- umem->rx_ring_setup_done = true;
6381 ++ umem->tx_ring_setup_done = true;
6382 + }
6383 +
6384 + err = xsk_get_mmap_offsets(xsk->fd, &off);
6385 +diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
6386 +index 385894b4a8bba..42222a84167f3 100644
6387 +--- a/tools/perf/trace/beauty/include/linux/socket.h
6388 ++++ b/tools/perf/trace/beauty/include/linux/socket.h
6389 +@@ -438,6 +438,4 @@ extern int __sys_socketpair(int family, int type, int protocol,
6390 + int __user *usockvec);
6391 + extern int __sys_shutdown_sock(struct socket *sock, int how);
6392 + extern int __sys_shutdown(int fd, int how);
6393 +-
6394 +-extern struct ns_common *get_net_ns(struct ns_common *ns);
6395 + #endif /* _LINUX_SOCKET_H */
6396 +diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
6397 +index 26c990e323781..939aed36e0c2a 100644
6398 +--- a/tools/perf/util/metricgroup.c
6399 ++++ b/tools/perf/util/metricgroup.c
6400 +@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
6401 + return false;
6402 + }
6403 +
6404 +-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
6405 ++static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
6406 + {
6407 + if (!ev1->pmu_name || !ev2->pmu_name)
6408 +- return false;
6409 ++ return true;
6410 +
6411 + return !strcmp(ev1->pmu_name, ev2->pmu_name);
6412 + }
6413 +@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
6414 + */
6415 + if (!has_constraint &&
6416 + ev->leader != metric_events[i]->leader &&
6417 +- evsel_same_pmu(ev->leader, metric_events[i]->leader))
6418 ++ evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
6419 + break;
6420 + if (!strcmp(metric_events[i]->name, ev->name)) {
6421 + set_bit(ev->idx, evlist_used);
6422 +@@ -1072,16 +1072,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
6423 +
6424 + ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
6425 + if (ret)
6426 +- return ret;
6427 ++ goto out;
6428 +
6429 + ret = resolve_metric(d->metric_no_group,
6430 + d->metric_list, NULL, d->ids);
6431 + if (ret)
6432 +- return ret;
6433 ++ goto out;
6434 +
6435 + *(d->has_match) = true;
6436 +
6437 +- return *d->ret;
6438 ++out:
6439 ++ *(d->ret) = ret;
6440 ++ return ret;
6441 + }
6442 +
6443 + static int metricgroup__add_metric(const char *metric, bool metric_no_group,
6444 +diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
6445 +index 2b5707738609e..6fad54c7ecb4a 100755
6446 +--- a/tools/testing/selftests/net/fib_tests.sh
6447 ++++ b/tools/testing/selftests/net/fib_tests.sh
6448 +@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
6449 + ipv4_rt_replace_mpath
6450 + }
6451 +
6452 ++# checks that cached input route on VRF port is deleted
6453 ++# when VRF is deleted
6454 ++ipv4_local_rt_cache()
6455 ++{
6456 ++ run_cmd "ip addr add 10.0.0.1/32 dev lo"
6457 ++ run_cmd "ip netns add test-ns"
6458 ++ run_cmd "ip link add veth-outside type veth peer name veth-inside"
6459 ++ run_cmd "ip link add vrf-100 type vrf table 1100"
6460 ++ run_cmd "ip link set veth-outside master vrf-100"
6461 ++ run_cmd "ip link set veth-inside netns test-ns"
6462 ++ run_cmd "ip link set veth-outside up"
6463 ++ run_cmd "ip link set vrf-100 up"
6464 ++ run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
6465 ++ run_cmd "ip netns exec test-ns ip link set veth-inside up"
6466 ++ run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
6467 ++ run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
6468 ++ run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
6469 ++ run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
6470 ++ run_cmd "ip link delete vrf-100"
6471 ++
6472 ++ # if we do not hang test is a success
6473 ++ log_test $? 0 "Cached route removed from VRF port device"
6474 ++}
6475 ++
6476 + ipv4_route_test()
6477 + {
6478 + route_setup
6479 +
6480 + ipv4_rt_add
6481 + ipv4_rt_replace
6482 ++ ipv4_local_rt_cache
6483 +
6484 + route_cleanup
6485 + }
6486 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
6487 +index 65b3b983efc26..8763706b0d047 100755
6488 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
6489 ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
6490 +@@ -197,9 +197,6 @@ ip -net "$ns4" link set ns4eth3 up
6491 + ip -net "$ns4" route add default via 10.0.3.2
6492 + ip -net "$ns4" route add default via dead:beef:3::2
6493 +
6494 +-# use TCP syn cookies, even if no flooding was detected.
6495 +-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
6496 +-
6497 + set_ethtool_flags() {
6498 + local ns="$1"
6499 + local dev="$2"
6500 +@@ -711,6 +708,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
6501 + exit $ret
6502 + fi
6503 +
6504 ++ # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
6505 ++ # mptcp syncookie support.
6506 ++ if [ $sender = $ns1 ]; then
6507 ++ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
6508 ++ else
6509 ++ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
6510 ++ fi
6511 ++
6512 + run_tests "$ns2" $sender 10.0.1.2
6513 + run_tests "$ns2" $sender dead:beef:1::2
6514 + run_tests "$ns2" $sender 10.0.2.1