Gentoo Archives: gentoo-commits

From: Mike Pagano <mpagano@g.o>
To: gentoo-commits@l.g.o
Subject: [gentoo-commits] proj/linux-patches:5.10 commit in: /
Date: Wed, 23 Jun 2021 15:12:39
Message-Id: 1624461140.735c9f35ae8368863f1218a6b171c1fdad8d1ede.mpagano@gentoo
1 commit: 735c9f35ae8368863f1218a6b171c1fdad8d1ede
2 Author: Mike Pagano <mpagano <AT> gentoo <DOT> org>
3 AuthorDate: Wed Jun 23 15:12:20 2021 +0000
4 Commit: Mike Pagano <mpagano <AT> gentoo <DOT> org>
5 CommitDate: Wed Jun 23 15:12:20 2021 +0000
6 URL: https://gitweb.gentoo.org/proj/linux-patches.git/commit/?id=735c9f35
7
8 Linuxpatch 5.10.46
9
10 Signed-off-by: Mike Pagano <mpagano <AT> gentoo.org>
11
12 0000_README | 4 +
13 1045_linux-5.10.46.patch | 5158 ++++++++++++++++++++++++++++++++++++++++++++++
14 2 files changed, 5162 insertions(+)
15
16 diff --git a/0000_README b/0000_README
17 index 5ac74f5..6abe7e2 100644
18 --- a/0000_README
19 +++ b/0000_README
20 @@ -223,6 +223,10 @@ Patch: 1044_linux-5.10.45.patch
21 From: http://www.kernel.org
22 Desc: Linux 5.10.45
23
24 +Patch: 1045_linux-5.10.46.patch
25 +From: http://www.kernel.org
26 +Desc: Linux 5.10.46
27 +
28 Patch: 1500_XATTR_USER_PREFIX.patch
29 From: https://bugs.gentoo.org/show_bug.cgi?id=470644
30 Desc: Support for namespace user.pax.* on tmpfs.
31
32 diff --git a/1045_linux-5.10.46.patch b/1045_linux-5.10.46.patch
33 new file mode 100644
34 index 0000000..c2e48b5
35 --- /dev/null
36 +++ b/1045_linux-5.10.46.patch
37 @@ -0,0 +1,5158 @@
38 +diff --git a/Documentation/vm/slub.rst b/Documentation/vm/slub.rst
39 +index 03f294a638bd8..d3028554b1e9c 100644
40 +--- a/Documentation/vm/slub.rst
41 ++++ b/Documentation/vm/slub.rst
42 +@@ -181,7 +181,7 @@ SLUB Debug output
43 + Here is a sample of slub debug output::
44 +
45 + ====================================================================
46 +- BUG kmalloc-8: Redzone overwritten
47 ++ BUG kmalloc-8: Right Redzone overwritten
48 + --------------------------------------------------------------------
49 +
50 + INFO: 0xc90f6d28-0xc90f6d2b. First byte 0x00 instead of 0xcc
51 +@@ -189,10 +189,10 @@ Here is a sample of slub debug output::
52 + INFO: Object 0xc90f6d20 @offset=3360 fp=0xc90f6d58
53 + INFO: Allocated in get_modalias+0x61/0xf5 age=53 cpu=1 pid=554
54 +
55 +- Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
56 +- Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005
57 +- Redzone 0xc90f6d28: 00 cc cc cc .
58 +- Padding 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
59 ++ Bytes b4 (0xc90f6d10): 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ
60 ++ Object (0xc90f6d20): 31 30 31 39 2e 30 30 35 1019.005
61 ++ Redzone (0xc90f6d28): 00 cc cc cc .
62 ++ Padding (0xc90f6d50): 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ
63 +
64 + [<c010523d>] dump_trace+0x63/0x1eb
65 + [<c01053df>] show_trace_log_lvl+0x1a/0x2f
66 +diff --git a/Makefile b/Makefile
67 +index 808b68483002f..7ab22f105a032 100644
68 +--- a/Makefile
69 ++++ b/Makefile
70 +@@ -1,7 +1,7 @@
71 + # SPDX-License-Identifier: GPL-2.0
72 + VERSION = 5
73 + PATCHLEVEL = 10
74 +-SUBLEVEL = 45
75 ++SUBLEVEL = 46
76 + EXTRAVERSION =
77 + NAME = Dare mighty things
78 +
79 +diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
80 +index 95f8a4380e110..7a5449dfcb290 100644
81 +--- a/arch/arc/include/uapi/asm/sigcontext.h
82 ++++ b/arch/arc/include/uapi/asm/sigcontext.h
83 +@@ -18,6 +18,7 @@
84 + */
85 + struct sigcontext {
86 + struct user_regs_struct regs;
87 ++ struct user_regs_arcv2 v2abi;
88 + };
89 +
90 + #endif /* _ASM_ARC_SIGCONTEXT_H */
91 +diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
92 +index 98e575dbcce51..9d5996e014c01 100644
93 +--- a/arch/arc/kernel/signal.c
94 ++++ b/arch/arc/kernel/signal.c
95 +@@ -61,6 +61,41 @@ struct rt_sigframe {
96 + unsigned int sigret_magic;
97 + };
98 +
99 ++static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
100 ++{
101 ++ int err = 0;
102 ++#ifndef CONFIG_ISA_ARCOMPACT
103 ++ struct user_regs_arcv2 v2abi;
104 ++
105 ++ v2abi.r30 = regs->r30;
106 ++#ifdef CONFIG_ARC_HAS_ACCL_REGS
107 ++ v2abi.r58 = regs->r58;
108 ++ v2abi.r59 = regs->r59;
109 ++#else
110 ++ v2abi.r58 = v2abi.r59 = 0;
111 ++#endif
112 ++ err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
113 ++#endif
114 ++ return err;
115 ++}
116 ++
117 ++static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
118 ++{
119 ++ int err = 0;
120 ++#ifndef CONFIG_ISA_ARCOMPACT
121 ++ struct user_regs_arcv2 v2abi;
122 ++
123 ++ err = __copy_from_user(&v2abi, &mctx->v2abi, sizeof(v2abi));
124 ++
125 ++ regs->r30 = v2abi.r30;
126 ++#ifdef CONFIG_ARC_HAS_ACCL_REGS
127 ++ regs->r58 = v2abi.r58;
128 ++ regs->r59 = v2abi.r59;
129 ++#endif
130 ++#endif
131 ++ return err;
132 ++}
133 ++
134 + static int
135 + stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
136 + sigset_t *set)
137 +@@ -94,6 +129,10 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
138 +
139 + err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), &uregs.scratch,
140 + sizeof(sf->uc.uc_mcontext.regs.scratch));
141 ++
142 ++ if (is_isa_arcv2())
143 ++ err |= save_arcv2_regs(&(sf->uc.uc_mcontext), regs);
144 ++
145 + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
146 +
147 + return err ? -EFAULT : 0;
148 +@@ -109,6 +148,10 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
149 + err |= __copy_from_user(&uregs.scratch,
150 + &(sf->uc.uc_mcontext.regs.scratch),
151 + sizeof(sf->uc.uc_mcontext.regs.scratch));
152 ++
153 ++ if (is_isa_arcv2())
154 ++ err |= restore_arcv2_regs(&(sf->uc.uc_mcontext), regs);
155 ++
156 + if (err)
157 + return -EFAULT;
158 +
159 +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
160 +index 81c458e996d9b..963e8cb936e28 100644
161 +--- a/arch/s390/kernel/entry.S
162 ++++ b/arch/s390/kernel/entry.S
163 +@@ -1284,7 +1284,7 @@ ENDPROC(stack_overflow)
164 + je 1f
165 + larl %r13,.Lsie_entry
166 + slgr %r9,%r13
167 +- larl %r13,.Lsie_skip
168 ++ lghi %r13,.Lsie_skip - .Lsie_entry
169 + clgr %r9,%r13
170 + jh 1f
171 + oi __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
172 +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
173 +index ceeba9f631722..fdee23ea4e173 100644
174 +--- a/arch/x86/include/asm/fpu/internal.h
175 ++++ b/arch/x86/include/asm/fpu/internal.h
176 +@@ -578,10 +578,17 @@ static inline void switch_fpu_finish(struct fpu *new_fpu)
177 + * PKRU state is switched eagerly because it needs to be valid before we
178 + * return to userland e.g. for a copy_to_user() operation.
179 + */
180 +- if (current->mm) {
181 ++ if (!(current->flags & PF_KTHREAD)) {
182 ++ /*
183 ++ * If the PKRU bit in xsave.header.xfeatures is not set,
184 ++ * then the PKRU component was in init state, which means
185 ++ * XRSTOR will set PKRU to 0. If the bit is not set then
186 ++ * get_xsave_addr() will return NULL because the PKRU value
187 ++ * in memory is not valid. This means pkru_val has to be
188 ++ * set to 0 and not to init_pkru_value.
189 ++ */
190 + pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
191 +- if (pk)
192 +- pkru_val = pk->pkru;
193 ++ pkru_val = pk ? pk->pkru : 0;
194 + }
195 + __write_pkru(pkru_val);
196 + }
197 +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
198 +index a4ec65317a7fa..ec3ae30547920 100644
199 +--- a/arch/x86/kernel/fpu/signal.c
200 ++++ b/arch/x86/kernel/fpu/signal.c
201 +@@ -307,13 +307,17 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
202 + return 0;
203 + }
204 +
205 +- if (!access_ok(buf, size))
206 +- return -EACCES;
207 ++ if (!access_ok(buf, size)) {
208 ++ ret = -EACCES;
209 ++ goto out;
210 ++ }
211 +
212 +- if (!static_cpu_has(X86_FEATURE_FPU))
213 +- return fpregs_soft_set(current, NULL,
214 +- 0, sizeof(struct user_i387_ia32_struct),
215 +- NULL, buf) != 0;
216 ++ if (!static_cpu_has(X86_FEATURE_FPU)) {
217 ++ ret = fpregs_soft_set(current, NULL, 0,
218 ++ sizeof(struct user_i387_ia32_struct),
219 ++ NULL, buf);
220 ++ goto out;
221 ++ }
222 +
223 + if (use_xsave()) {
224 + struct _fpx_sw_bytes fx_sw_user;
225 +@@ -369,6 +373,25 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
226 + fpregs_unlock();
227 + return 0;
228 + }
229 ++
230 ++ /*
231 ++ * The above did an FPU restore operation, restricted to
232 ++ * the user portion of the registers, and failed, but the
233 ++ * microcode might have modified the FPU registers
234 ++ * nevertheless.
235 ++ *
236 ++ * If the FPU registers do not belong to current, then
237 ++ * invalidate the FPU register state otherwise the task might
238 ++ * preempt current and return to user space with corrupted
239 ++ * FPU registers.
240 ++ *
241 ++ * In case current owns the FPU registers then no further
242 ++ * action is required. The fixup below will handle it
243 ++ * correctly.
244 ++ */
245 ++ if (test_thread_flag(TIF_NEED_FPU_LOAD))
246 ++ __cpu_invalidate_fpregs_state();
247 ++
248 + fpregs_unlock();
249 + } else {
250 + /*
251 +@@ -377,7 +400,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
252 + */
253 + ret = __copy_from_user(&env, buf, sizeof(env));
254 + if (ret)
255 +- goto err_out;
256 ++ goto out;
257 + envp = &env;
258 + }
259 +
260 +@@ -405,16 +428,9 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
261 + if (use_xsave() && !fx_only) {
262 + u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
263 +
264 +- if (using_compacted_format()) {
265 +- ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
266 +- } else {
267 +- ret = __copy_from_user(&fpu->state.xsave, buf_fx, state_size);
268 +-
269 +- if (!ret && state_size > offsetof(struct xregs_state, header))
270 +- ret = validate_user_xstate_header(&fpu->state.xsave.header);
271 +- }
272 ++ ret = copy_user_to_xstate(&fpu->state.xsave, buf_fx);
273 + if (ret)
274 +- goto err_out;
275 ++ goto out;
276 +
277 + sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
278 + fx_only);
279 +@@ -434,7 +450,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
280 + ret = __copy_from_user(&fpu->state.fxsave, buf_fx, state_size);
281 + if (ret) {
282 + ret = -EFAULT;
283 +- goto err_out;
284 ++ goto out;
285 + }
286 +
287 + sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
288 +@@ -452,7 +468,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
289 + } else {
290 + ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
291 + if (ret)
292 +- goto err_out;
293 ++ goto out;
294 +
295 + fpregs_lock();
296 + ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
297 +@@ -463,7 +479,7 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
298 + fpregs_deactivate(fpu);
299 + fpregs_unlock();
300 +
301 +-err_out:
302 ++out:
303 + if (ret)
304 + fpu__clear_user_states(fpu);
305 + return ret;
306 +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
307 +index 5759eb075d2fc..677d21082454f 100644
308 +--- a/arch/x86/kvm/lapic.c
309 ++++ b/arch/x86/kvm/lapic.c
310 +@@ -1405,6 +1405,9 @@ int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
311 + if (!apic_x2apic_mode(apic))
312 + valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI);
313 +
314 ++ if (alignment + len > 4)
315 ++ return 1;
316 ++
317 + if (offset > 0x3f0 || !(valid_reg_mask & APIC_REG_MASK(offset)))
318 + return 1;
319 +
320 +diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
321 +index ac5054763e38e..6b794344c02db 100644
322 +--- a/arch/x86/kvm/mmu/mmu.c
323 ++++ b/arch/x86/kvm/mmu/mmu.c
324 +@@ -4705,9 +4705,33 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
325 + context->inject_page_fault = kvm_inject_page_fault;
326 + }
327 +
328 ++static union kvm_mmu_role kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu)
329 ++{
330 ++ union kvm_mmu_role role = kvm_calc_shadow_root_page_role_common(vcpu, false);
331 ++
332 ++ /*
333 ++ * Nested MMUs are used only for walking L2's gva->gpa, they never have
334 ++ * shadow pages of their own and so "direct" has no meaning. Set it
335 ++ * to "true" to try to detect bogus usage of the nested MMU.
336 ++ */
337 ++ role.base.direct = true;
338 ++
339 ++ if (!is_paging(vcpu))
340 ++ role.base.level = 0;
341 ++ else if (is_long_mode(vcpu))
342 ++ role.base.level = is_la57_mode(vcpu) ? PT64_ROOT_5LEVEL :
343 ++ PT64_ROOT_4LEVEL;
344 ++ else if (is_pae(vcpu))
345 ++ role.base.level = PT32E_ROOT_LEVEL;
346 ++ else
347 ++ role.base.level = PT32_ROOT_LEVEL;
348 ++
349 ++ return role;
350 ++}
351 ++
352 + static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
353 + {
354 +- union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
355 ++ union kvm_mmu_role new_role = kvm_calc_nested_mmu_role(vcpu);
356 + struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
357 +
358 + if (new_role.as_u64 == g_context->mmu_role.as_u64)
359 +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
360 +index 109041630d30b..d3372cb973079 100644
361 +--- a/arch/x86/kvm/x86.c
362 ++++ b/arch/x86/kvm/x86.c
363 +@@ -6876,7 +6876,10 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
364 +
365 + static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
366 + {
367 +- emul_to_vcpu(ctxt)->arch.hflags = emul_flags;
368 ++ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
369 ++
370 ++ vcpu->arch.hflags = emul_flags;
371 ++ kvm_mmu_reset_context(vcpu);
372 + }
373 +
374 + static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
375 +@@ -8018,6 +8021,7 @@ void kvm_arch_exit(void)
376 + kvm_x86_ops.hardware_enable = NULL;
377 + kvm_mmu_module_exit();
378 + free_percpu(user_return_msrs);
379 ++ kmem_cache_destroy(x86_emulator_cache);
380 + kmem_cache_destroy(x86_fpu_cache);
381 + }
382 +
383 +diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
384 +index 9e5ccc56f8e07..356b746dfbe7a 100644
385 +--- a/arch/x86/mm/ioremap.c
386 ++++ b/arch/x86/mm/ioremap.c
387 +@@ -118,7 +118,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
388 + if (!IS_ENABLED(CONFIG_EFI))
389 + return;
390 +
391 +- if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
392 ++ if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
393 ++ (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
394 ++ efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
395 + desc->flags |= IORES_MAP_ENCRYPTED;
396 + }
397 +
398 +diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
399 +index 5eb4dc2b97dac..e94da744386f3 100644
400 +--- a/arch/x86/mm/numa.c
401 ++++ b/arch/x86/mm/numa.c
402 +@@ -254,7 +254,13 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
403 +
404 + /* make sure all non-reserved blocks are inside the limits */
405 + bi->start = max(bi->start, low);
406 +- bi->end = min(bi->end, high);
407 ++
408 ++ /* preserve info for non-RAM areas above 'max_pfn': */
409 ++ if (bi->end > high) {
410 ++ numa_add_memblk_to(bi->nid, high, bi->end,
411 ++ &numa_reserved_meminfo);
412 ++ bi->end = high;
413 ++ }
414 +
415 + /* and there's no empty block */
416 + if (bi->start >= bi->end)
417 +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
418 +index 90284ffda58a7..f2db761ee5488 100644
419 +--- a/drivers/dma/Kconfig
420 ++++ b/drivers/dma/Kconfig
421 +@@ -59,6 +59,7 @@ config DMA_OF
422 + #devices
423 + config ALTERA_MSGDMA
424 + tristate "Altera / Intel mSGDMA Engine"
425 ++ depends on HAS_IOMEM
426 + select DMA_ENGINE
427 + help
428 + Enable support for Altera / Intel mSGDMA controller.
429 +diff --git a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
430 +index 4ec909e0b8106..4ae057922ef1f 100644
431 +--- a/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
432 ++++ b/drivers/dma/fsl-dpaa2-qdma/dpaa2-qdma.c
433 +@@ -332,6 +332,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
434 + }
435 +
436 + if (priv->dpdmai_attr.version.major > DPDMAI_VER_MAJOR) {
437 ++ err = -EINVAL;
438 + dev_err(dev, "DPDMAI major version mismatch\n"
439 + "Found %u.%u, supported version is %u.%u\n",
440 + priv->dpdmai_attr.version.major,
441 +@@ -341,6 +342,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
442 + }
443 +
444 + if (priv->dpdmai_attr.version.minor > DPDMAI_VER_MINOR) {
445 ++ err = -EINVAL;
446 + dev_err(dev, "DPDMAI minor version mismatch\n"
447 + "Found %u.%u, supported version is %u.%u\n",
448 + priv->dpdmai_attr.version.major,
449 +@@ -475,6 +477,7 @@ static int __cold dpaa2_qdma_dpio_setup(struct dpaa2_qdma_priv *priv)
450 + ppriv->store =
451 + dpaa2_io_store_create(DPAA2_QDMA_STORE_SIZE, dev);
452 + if (!ppriv->store) {
453 ++ err = -ENOMEM;
454 + dev_err(dev, "dpaa2_io_store_create() failed\n");
455 + goto err_store;
456 + }
457 +diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
458 +index f4c7ce8cb399c..048a23018a3df 100644
459 +--- a/drivers/dma/idxd/init.c
460 ++++ b/drivers/dma/idxd/init.c
461 +@@ -518,6 +518,7 @@ module_init(idxd_init_module);
462 +
463 + static void __exit idxd_exit_module(void)
464 + {
465 ++ idxd_unregister_driver();
466 + pci_unregister_driver(&idxd_pci_driver);
467 + idxd_cdev_remove();
468 + idxd_unregister_bus_type();
469 +diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
470 +index 0f5c19370f6d7..dfbf514188f37 100644
471 +--- a/drivers/dma/pl330.c
472 ++++ b/drivers/dma/pl330.c
473 +@@ -2696,13 +2696,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
474 + for (i = 0; i < len / period_len; i++) {
475 + desc = pl330_get_desc(pch);
476 + if (!desc) {
477 ++ unsigned long iflags;
478 ++
479 + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
480 + __func__, __LINE__);
481 +
482 + if (!first)
483 + return NULL;
484 +
485 +- spin_lock_irqsave(&pl330->pool_lock, flags);
486 ++ spin_lock_irqsave(&pl330->pool_lock, iflags);
487 +
488 + while (!list_empty(&first->node)) {
489 + desc = list_entry(first->node.next,
490 +@@ -2712,7 +2714,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
491 +
492 + list_move_tail(&first->node, &pl330->desc_pool);
493 +
494 +- spin_unlock_irqrestore(&pl330->pool_lock, flags);
495 ++ spin_unlock_irqrestore(&pl330->pool_lock, iflags);
496 +
497 + return NULL;
498 + }
499 +diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
500 +index 3bcb689162c67..ef038f3c5e328 100644
501 +--- a/drivers/dma/qcom/Kconfig
502 ++++ b/drivers/dma/qcom/Kconfig
503 +@@ -10,6 +10,7 @@ config QCOM_BAM_DMA
504 +
505 + config QCOM_HIDMA_MGMT
506 + tristate "Qualcomm Technologies HIDMA Management support"
507 ++ depends on HAS_IOMEM
508 + select DMA_ENGINE
509 + help
510 + Enable support for the Qualcomm Technologies HIDMA Management.
511 +diff --git a/drivers/dma/sf-pdma/Kconfig b/drivers/dma/sf-pdma/Kconfig
512 +index f8ffa02e279ff..ba46a0a15a936 100644
513 +--- a/drivers/dma/sf-pdma/Kconfig
514 ++++ b/drivers/dma/sf-pdma/Kconfig
515 +@@ -1,5 +1,6 @@
516 + config SF_PDMA
517 + tristate "Sifive PDMA controller driver"
518 ++ depends on HAS_IOMEM
519 + select DMA_ENGINE
520 + select DMA_VIRTUAL_CHANNELS
521 + help
522 +diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
523 +index 77ab1f4730be9..b35b97cb8fd25 100644
524 +--- a/drivers/dma/ste_dma40.c
525 ++++ b/drivers/dma/ste_dma40.c
526 +@@ -3676,6 +3676,9 @@ static int __init d40_probe(struct platform_device *pdev)
527 +
528 + kfree(base->lcla_pool.base_unaligned);
529 +
530 ++ if (base->lcpa_base)
531 ++ iounmap(base->lcpa_base);
532 ++
533 + if (base->phy_lcpa)
534 + release_mem_region(base->phy_lcpa,
535 + base->lcpa_size);
536 +diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
537 +index 70b29bd079c9f..ff7dfb3fdeb47 100644
538 +--- a/drivers/dma/xilinx/xilinx_dpdma.c
539 ++++ b/drivers/dma/xilinx/xilinx_dpdma.c
540 +@@ -1459,7 +1459,7 @@ static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
541 + */
542 + static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
543 + {
544 +- dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL);
545 ++ dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
546 + dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
547 + }
548 +
549 +@@ -1596,6 +1596,26 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
550 + return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
551 + }
552 +
553 ++static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
554 ++{
555 ++ unsigned int i;
556 ++ void __iomem *reg;
557 ++
558 ++ /* Disable all interrupts */
559 ++ xilinx_dpdma_disable_irq(xdev);
560 ++
561 ++ /* Stop all channels */
562 ++ for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
563 ++ reg = xdev->reg + XILINX_DPDMA_CH_BASE
564 ++ + XILINX_DPDMA_CH_OFFSET * i;
565 ++ dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
566 ++ }
567 ++
568 ++ /* Clear the interrupt status registers */
569 ++ dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
570 ++ dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
571 ++}
572 ++
573 + static int xilinx_dpdma_probe(struct platform_device *pdev)
574 + {
575 + struct xilinx_dpdma_device *xdev;
576 +@@ -1622,6 +1642,8 @@ static int xilinx_dpdma_probe(struct platform_device *pdev)
577 + if (IS_ERR(xdev->reg))
578 + return PTR_ERR(xdev->reg);
579 +
580 ++ dpdma_hw_init(xdev);
581 ++
582 + xdev->irq = platform_get_irq(pdev, 0);
583 + if (xdev->irq < 0) {
584 + dev_err(xdev->dev, "failed to get platform irq\n");
585 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
586 +index fc8da5fed779b..3c92dacbc24ad 100644
587 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
588 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
589 +@@ -6590,8 +6590,12 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
590 + if (ring->use_doorbell) {
591 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
592 + (adev->doorbell_index.kiq * 2) << 2);
593 ++ /* If GC has entered CGPG, ringing doorbell > first page doesn't
594 ++ * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
595 ++ * this issue.
596 ++ */
597 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
598 +- (adev->doorbell_index.userqueue_end * 2) << 2);
599 ++ (adev->doorbell.size - 4));
600 + }
601 +
602 + WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
603 +diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
604 +index fb15e8b5af32f..1859d293ef712 100644
605 +--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
606 ++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
607 +@@ -3619,8 +3619,12 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
608 + if (ring->use_doorbell) {
609 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
610 + (adev->doorbell_index.kiq * 2) << 2);
611 ++ /* If GC has entered CGPG, ringing doorbell > first page doesn't
612 ++ * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
613 ++ * this issue.
614 ++ */
615 + WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
616 +- (adev->doorbell_index.userqueue_end * 2) << 2);
617 ++ (adev->doorbell.size - 4));
618 + }
619 +
620 + WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
621 +diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
622 +index 57fb3eb3a4b45..1f4e3396d097c 100644
623 +--- a/drivers/gpu/drm/radeon/radeon_uvd.c
624 ++++ b/drivers/gpu/drm/radeon/radeon_uvd.c
625 +@@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
626 + if (rdev->uvd.vcpu_bo == NULL)
627 + return -EINVAL;
628 +
629 +- memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
630 ++ memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
631 +
632 + size = radeon_bo_size(rdev->uvd.vcpu_bo);
633 + size -= rdev->uvd_fw->size;
634 +@@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
635 + ptr = rdev->uvd.cpu_addr;
636 + ptr += rdev->uvd_fw->size;
637 +
638 +- memset(ptr, 0, size);
639 ++ memset_io((void __iomem *)ptr, 0, size);
640 +
641 + return 0;
642 + }
643 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
644 +index bbdfd5e26ec88..f75fb157f2ff7 100644
645 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
646 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
647 +@@ -209,7 +209,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
648 + goto err_disable_clk_tmds;
649 + }
650 +
651 +- ret = sun8i_hdmi_phy_probe(hdmi, phy_node);
652 ++ ret = sun8i_hdmi_phy_get(hdmi, phy_node);
653 + of_node_put(phy_node);
654 + if (ret) {
655 + dev_err(dev, "Couldn't get the HDMI PHY\n");
656 +@@ -242,7 +242,6 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
657 +
658 + cleanup_encoder:
659 + drm_encoder_cleanup(encoder);
660 +- sun8i_hdmi_phy_remove(hdmi);
661 + err_disable_clk_tmds:
662 + clk_disable_unprepare(hdmi->clk_tmds);
663 + err_assert_ctrl_reset:
664 +@@ -263,7 +262,6 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
665 + struct sun8i_dw_hdmi *hdmi = dev_get_drvdata(dev);
666 +
667 + dw_hdmi_unbind(hdmi->hdmi);
668 +- sun8i_hdmi_phy_remove(hdmi);
669 + clk_disable_unprepare(hdmi->clk_tmds);
670 + reset_control_assert(hdmi->rst_ctrl);
671 + gpiod_set_value(hdmi->ddc_en, 0);
672 +@@ -320,7 +318,32 @@ static struct platform_driver sun8i_dw_hdmi_pltfm_driver = {
673 + .of_match_table = sun8i_dw_hdmi_dt_ids,
674 + },
675 + };
676 +-module_platform_driver(sun8i_dw_hdmi_pltfm_driver);
677 ++
678 ++static int __init sun8i_dw_hdmi_init(void)
679 ++{
680 ++ int ret;
681 ++
682 ++ ret = platform_driver_register(&sun8i_dw_hdmi_pltfm_driver);
683 ++ if (ret)
684 ++ return ret;
685 ++
686 ++ ret = platform_driver_register(&sun8i_hdmi_phy_driver);
687 ++ if (ret) {
688 ++ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
689 ++ return ret;
690 ++ }
691 ++
692 ++ return ret;
693 ++}
694 ++
695 ++static void __exit sun8i_dw_hdmi_exit(void)
696 ++{
697 ++ platform_driver_unregister(&sun8i_dw_hdmi_pltfm_driver);
698 ++ platform_driver_unregister(&sun8i_hdmi_phy_driver);
699 ++}
700 ++
701 ++module_init(sun8i_dw_hdmi_init);
702 ++module_exit(sun8i_dw_hdmi_exit);
703 +
704 + MODULE_AUTHOR("Jernej Skrabec <jernej.skrabec@××××.net>");
705 + MODULE_DESCRIPTION("Allwinner DW HDMI bridge");
706 +diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
707 +index d4b55af0592f8..74f6ed0e25709 100644
708 +--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
709 ++++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
710 +@@ -195,14 +195,15 @@ struct sun8i_dw_hdmi {
711 + struct gpio_desc *ddc_en;
712 + };
713 +
714 ++extern struct platform_driver sun8i_hdmi_phy_driver;
715 ++
716 + static inline struct sun8i_dw_hdmi *
717 + encoder_to_sun8i_dw_hdmi(struct drm_encoder *encoder)
718 + {
719 + return container_of(encoder, struct sun8i_dw_hdmi, encoder);
720 + }
721 +
722 +-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
723 +-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi);
724 ++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node);
725 +
726 + void sun8i_hdmi_phy_init(struct sun8i_hdmi_phy *phy);
727 + void sun8i_hdmi_phy_set_ops(struct sun8i_hdmi_phy *phy,
728 +diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
729 +index 9994edf675096..c9239708d398c 100644
730 +--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
731 ++++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
732 +@@ -5,6 +5,7 @@
733 +
734 + #include <linux/delay.h>
735 + #include <linux/of_address.h>
736 ++#include <linux/of_platform.h>
737 +
738 + #include "sun8i_dw_hdmi.h"
739 +
740 +@@ -597,10 +598,30 @@ static const struct of_device_id sun8i_hdmi_phy_of_table[] = {
741 + { /* sentinel */ }
742 + };
743 +
744 +-int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
745 ++int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
746 ++{
747 ++ struct platform_device *pdev = of_find_device_by_node(node);
748 ++ struct sun8i_hdmi_phy *phy;
749 ++
750 ++ if (!pdev)
751 ++ return -EPROBE_DEFER;
752 ++
753 ++ phy = platform_get_drvdata(pdev);
754 ++ if (!phy)
755 ++ return -EPROBE_DEFER;
756 ++
757 ++ hdmi->phy = phy;
758 ++
759 ++ put_device(&pdev->dev);
760 ++
761 ++ return 0;
762 ++}
763 ++
764 ++static int sun8i_hdmi_phy_probe(struct platform_device *pdev)
765 + {
766 + const struct of_device_id *match;
767 +- struct device *dev = hdmi->dev;
768 ++ struct device *dev = &pdev->dev;
769 ++ struct device_node *node = dev->of_node;
770 + struct sun8i_hdmi_phy *phy;
771 + struct resource res;
772 + void __iomem *regs;
773 +@@ -704,7 +725,7 @@ int sun8i_hdmi_phy_probe(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
774 + clk_prepare_enable(phy->clk_phy);
775 + }
776 +
777 +- hdmi->phy = phy;
778 ++ platform_set_drvdata(pdev, phy);
779 +
780 + return 0;
781 +
782 +@@ -728,9 +749,9 @@ err_put_clk_bus:
783 + return ret;
784 + }
785 +
786 +-void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
787 ++static int sun8i_hdmi_phy_remove(struct platform_device *pdev)
788 + {
789 +- struct sun8i_hdmi_phy *phy = hdmi->phy;
790 ++ struct sun8i_hdmi_phy *phy = platform_get_drvdata(pdev);
791 +
792 + clk_disable_unprepare(phy->clk_mod);
793 + clk_disable_unprepare(phy->clk_bus);
794 +@@ -744,4 +765,14 @@ void sun8i_hdmi_phy_remove(struct sun8i_dw_hdmi *hdmi)
795 + clk_put(phy->clk_pll1);
796 + clk_put(phy->clk_mod);
797 + clk_put(phy->clk_bus);
798 ++ return 0;
799 + }
800 ++
801 ++struct platform_driver sun8i_hdmi_phy_driver = {
802 ++ .probe = sun8i_hdmi_phy_probe,
803 ++ .remove = sun8i_hdmi_phy_remove,
804 ++ .driver = {
805 ++ .name = "sun8i-hdmi-phy",
806 ++ .of_match_table = sun8i_hdmi_phy_of_table,
807 ++ },
808 ++};
809 +diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
810 +index 25aac40f2764a..919877970ae3b 100644
811 +--- a/drivers/hwmon/scpi-hwmon.c
812 ++++ b/drivers/hwmon/scpi-hwmon.c
813 +@@ -99,6 +99,15 @@ scpi_show_sensor(struct device *dev, struct device_attribute *attr, char *buf)
814 +
815 + scpi_scale_reading(&value, sensor);
816 +
817 ++ /*
818 ++ * Temperature sensor values are treated as signed values based on
819 ++ * observation even though that is not explicitly specified, and
820 ++ * because an unsigned u64 temperature does not really make practical
821 ++ * sense especially when the temperature is below zero degrees Celsius.
822 ++ */
823 ++ if (sensor->info.class == TEMPERATURE)
824 ++ return sprintf(buf, "%lld\n", (s64)value);
825 ++
826 + return sprintf(buf, "%llu\n", value);
827 + }
828 +
829 +diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
830 +index 7929bf12651ca..1005b182bab47 100644
831 +--- a/drivers/irqchip/irq-gic-v3.c
832 ++++ b/drivers/irqchip/irq-gic-v3.c
833 +@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
834 + nmi_exit();
835 + }
836 +
837 ++static u32 do_read_iar(struct pt_regs *regs)
838 ++{
839 ++ u32 iar;
840 ++
841 ++ if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
842 ++ u64 pmr;
843 ++
844 ++ /*
845 ++ * We were in a context with IRQs disabled. However, the
846 ++ * entry code has set PMR to a value that allows any
847 ++ * interrupt to be acknowledged, and not just NMIs. This can
848 ++ * lead to surprising effects if the NMI has been retired in
849 ++ * the meantime, and that there is an IRQ pending. The IRQ
850 ++ * would then be taken in NMI context, something that nobody
851 ++ * wants to debug twice.
852 ++ *
853 ++ * Until we sort this, drop PMR again to a level that will
854 ++ * actually only allow NMIs before reading IAR, and then
855 ++ * restore it to what it was.
856 ++ */
857 ++ pmr = gic_read_pmr();
858 ++ gic_pmr_mask_irqs();
859 ++ isb();
860 ++
861 ++ iar = gic_read_iar();
862 ++
863 ++ gic_write_pmr(pmr);
864 ++ } else {
865 ++ iar = gic_read_iar();
866 ++ }
867 ++
868 ++ return iar;
869 ++}
870 ++
871 + static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
872 + {
873 + u32 irqnr;
874 +
875 +- irqnr = gic_read_iar();
876 ++ irqnr = do_read_iar(regs);
877 +
878 + /* Check for special IDs first */
879 + if ((irqnr >= 1020 && irqnr <= 1023))
880 +diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
881 +index e97f2e0da6b07..6d03f1d6c4d38 100644
882 +--- a/drivers/net/can/usb/mcba_usb.c
883 ++++ b/drivers/net/can/usb/mcba_usb.c
884 +@@ -82,6 +82,8 @@ struct mcba_priv {
885 + bool can_ka_first_pass;
886 + bool can_speed_check;
887 + atomic_t free_ctx_cnt;
888 ++ void *rxbuf[MCBA_MAX_RX_URBS];
889 ++ dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS];
890 + };
891 +
892 + /* CAN frame */
893 +@@ -633,6 +635,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
894 + for (i = 0; i < MCBA_MAX_RX_URBS; i++) {
895 + struct urb *urb = NULL;
896 + u8 *buf;
897 ++ dma_addr_t buf_dma;
898 +
899 + /* create a URB, and a buffer for it */
900 + urb = usb_alloc_urb(0, GFP_KERNEL);
901 +@@ -642,7 +645,7 @@ static int mcba_usb_start(struct mcba_priv *priv)
902 + }
903 +
904 + buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
905 +- GFP_KERNEL, &urb->transfer_dma);
906 ++ GFP_KERNEL, &buf_dma);
907 + if (!buf) {
908 + netdev_err(netdev, "No memory left for USB buffer\n");
909 + usb_free_urb(urb);
910 +@@ -661,11 +664,14 @@ static int mcba_usb_start(struct mcba_priv *priv)
911 + if (err) {
912 + usb_unanchor_urb(urb);
913 + usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
914 +- buf, urb->transfer_dma);
915 ++ buf, buf_dma);
916 + usb_free_urb(urb);
917 + break;
918 + }
919 +
920 ++ priv->rxbuf[i] = buf;
921 ++ priv->rxbuf_dma[i] = buf_dma;
922 ++
923 + /* Drop reference, USB core will take care of freeing it */
924 + usb_free_urb(urb);
925 + }
926 +@@ -708,7 +714,14 @@ static int mcba_usb_open(struct net_device *netdev)
927 +
928 + static void mcba_urb_unlink(struct mcba_priv *priv)
929 + {
930 ++ int i;
931 ++
932 + usb_kill_anchored_urbs(&priv->rx_submitted);
933 ++
934 ++ for (i = 0; i < MCBA_MAX_RX_URBS; ++i)
935 ++ usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE,
936 ++ priv->rxbuf[i], priv->rxbuf_dma[i]);
937 ++
938 + usb_kill_anchored_urbs(&priv->tx_submitted);
939 + }
940 +
941 +diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
942 +index 9e02f88645931..5e90df42b2013 100644
943 +--- a/drivers/net/ethernet/atheros/alx/main.c
944 ++++ b/drivers/net/ethernet/atheros/alx/main.c
945 +@@ -1849,6 +1849,7 @@ out_free_netdev:
946 + free_netdev(netdev);
947 + out_pci_release:
948 + pci_release_mem_regions(pdev);
949 ++ pci_disable_pcie_error_reporting(pdev);
950 + out_pci_disable:
951 + pci_disable_device(pdev);
952 + return err;
953 +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
954 +index adfaa9a850dd3..db1b89f570794 100644
955 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
956 ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
957 +@@ -7184,7 +7184,7 @@ skip_rdma:
958 + entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
959 + 2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
960 + entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
961 +- entries = ctx->qp_max_l2_entries + extra_qps + ctx->qp_min_qp1_entries;
962 ++ entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
963 + entries = roundup(entries, ctx->tqm_entries_multiple);
964 + entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
965 + for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
966 +@@ -11353,6 +11353,8 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
967 + bnxt_hwrm_coal_params_qcaps(bp);
968 + }
969 +
970 ++static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
971 ++
972 + static int bnxt_fw_init_one(struct bnxt *bp)
973 + {
974 + int rc;
975 +@@ -11367,6 +11369,9 @@ static int bnxt_fw_init_one(struct bnxt *bp)
976 + netdev_err(bp->dev, "Firmware init phase 2 failed\n");
977 + return rc;
978 + }
979 ++ rc = bnxt_probe_phy(bp, false);
980 ++ if (rc)
981 ++ return rc;
982 + rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
983 + if (rc)
984 + return rc;
985 +@@ -12741,6 +12746,7 @@ init_err_pci_clean:
986 + bnxt_hwrm_func_drv_unrgtr(bp);
987 + bnxt_free_hwrm_short_cmd_req(bp);
988 + bnxt_free_hwrm_resources(bp);
989 ++ bnxt_ethtool_free(bp);
990 + kfree(bp->fw_health);
991 + bp->fw_health = NULL;
992 + bnxt_cleanup_pci(bp);
993 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
994 +index 61ea3ec5c3fcc..83ed10ac86606 100644
995 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
996 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
997 +@@ -1337,13 +1337,27 @@ static int cxgb4_ethtool_flash_phy(struct net_device *netdev,
998 + return ret;
999 + }
1000 +
1001 +- spin_lock_bh(&adap->win0_lock);
1002 ++ /* We have to RESET the chip/firmware because we need the
1003 ++ * chip in uninitialized state for loading new PHY image.
1004 ++ * Otherwise, the running firmware will only store the PHY
1005 ++ * image in local RAM which will be lost after next reset.
1006 ++ */
1007 ++ ret = t4_fw_reset(adap, adap->mbox, PIORSTMODE_F | PIORST_F);
1008 ++ if (ret < 0) {
1009 ++ dev_err(adap->pdev_dev,
1010 ++ "Set FW to RESET for flashing PHY FW failed. ret: %d\n",
1011 ++ ret);
1012 ++ return ret;
1013 ++ }
1014 ++
1015 + ret = t4_load_phy_fw(adap, MEMWIN_NIC, NULL, data, size);
1016 +- spin_unlock_bh(&adap->win0_lock);
1017 +- if (ret)
1018 +- dev_err(adap->pdev_dev, "Failed to load PHY FW\n");
1019 ++ if (ret < 0) {
1020 ++ dev_err(adap->pdev_dev, "Failed to load PHY FW. ret: %d\n",
1021 ++ ret);
1022 ++ return ret;
1023 ++ }
1024 +
1025 +- return ret;
1026 ++ return 0;
1027 + }
1028 +
1029 + static int cxgb4_ethtool_flash_fw(struct net_device *netdev,
1030 +@@ -1610,16 +1624,14 @@ static struct filter_entry *cxgb4_get_filter_entry(struct adapter *adap,
1031 + u32 ftid)
1032 + {
1033 + struct tid_info *t = &adap->tids;
1034 +- struct filter_entry *f;
1035 +
1036 +- if (ftid < t->nhpftids)
1037 +- f = &adap->tids.hpftid_tab[ftid];
1038 +- else if (ftid < t->nftids)
1039 +- f = &adap->tids.ftid_tab[ftid - t->nhpftids];
1040 +- else
1041 +- f = lookup_tid(&adap->tids, ftid);
1042 ++ if (ftid >= t->hpftid_base && ftid < t->hpftid_base + t->nhpftids)
1043 ++ return &t->hpftid_tab[ftid - t->hpftid_base];
1044 +
1045 +- return f;
1046 ++ if (ftid >= t->ftid_base && ftid < t->ftid_base + t->nftids)
1047 ++ return &t->ftid_tab[ftid - t->ftid_base];
1048 ++
1049 ++ return lookup_tid(t, ftid);
1050 + }
1051 +
1052 + static void cxgb4_fill_filter_rule(struct ethtool_rx_flow_spec *fs,
1053 +@@ -1826,6 +1838,11 @@ static int cxgb4_ntuple_del_filter(struct net_device *dev,
1054 + filter_id = filter_info->loc_array[cmd->fs.location];
1055 + f = cxgb4_get_filter_entry(adapter, filter_id);
1056 +
1057 ++ if (f->fs.prio)
1058 ++ filter_id -= adapter->tids.hpftid_base;
1059 ++ else if (!f->fs.hash)
1060 ++ filter_id -= (adapter->tids.ftid_base - adapter->tids.nhpftids);
1061 ++
1062 + ret = cxgb4_flow_rule_destroy(dev, f->fs.tc_prio, &f->fs, filter_id);
1063 + if (ret)
1064 + goto err;
1065 +@@ -1885,6 +1902,11 @@ static int cxgb4_ntuple_set_filter(struct net_device *netdev,
1066 +
1067 + filter_info = &adapter->ethtool_filters->port[pi->port_id];
1068 +
1069 ++ if (fs.prio)
1070 ++ tid += adapter->tids.hpftid_base;
1071 ++ else if (!fs.hash)
1072 ++ tid += (adapter->tids.ftid_base - adapter->tids.nhpftids);
1073 ++
1074 + filter_info->loc_array[cmd->fs.location] = tid;
1075 + set_bit(cmd->fs.location, filter_info->bmap);
1076 + filter_info->in_use++;
1077 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
1078 +index e664e05b9f026..5fbc087268dbe 100644
1079 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
1080 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
1081 +@@ -198,7 +198,7 @@ static void set_nat_params(struct adapter *adap, struct filter_entry *f,
1082 + WORD_MASK, f->fs.nat_lip[3] |
1083 + f->fs.nat_lip[2] << 8 |
1084 + f->fs.nat_lip[1] << 16 |
1085 +- (u64)f->fs.nat_lip[0] << 25, 1);
1086 ++ (u64)f->fs.nat_lip[0] << 24, 1);
1087 + }
1088 + }
1089 +
1090 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1091 +index 04dcb5e4b3161..8be525c5e2e4a 100644
1092 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1093 ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1094 +@@ -4428,10 +4428,8 @@ static int adap_init0_phy(struct adapter *adap)
1095 +
1096 + /* Load PHY Firmware onto adapter.
1097 + */
1098 +- spin_lock_bh(&adap->win0_lock);
1099 + ret = t4_load_phy_fw(adap, MEMWIN_NIC, phy_info->phy_fw_version,
1100 + (u8 *)phyf->data, phyf->size);
1101 +- spin_unlock_bh(&adap->win0_lock);
1102 + if (ret < 0)
1103 + dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
1104 + -ret);
1105 +diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1106 +index 581670dced6ec..964ea3491b80b 100644
1107 +--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1108 ++++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
1109 +@@ -3067,16 +3067,19 @@ int t4_read_flash(struct adapter *adapter, unsigned int addr,
1110 + * @addr: the start address to write
1111 + * @n: length of data to write in bytes
1112 + * @data: the data to write
1113 ++ * @byte_oriented: whether to store data as bytes or as words
1114 + *
1115 + * Writes up to a page of data (256 bytes) to the serial flash starting
1116 + * at the given address. All the data must be written to the same page.
1117 ++ * If @byte_oriented is set the write data is stored as byte stream
1118 ++ * (i.e. matches what on disk), otherwise in big-endian.
1119 + */
1120 + static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1121 +- unsigned int n, const u8 *data)
1122 ++ unsigned int n, const u8 *data, bool byte_oriented)
1123 + {
1124 +- int ret;
1125 +- u32 buf[64];
1126 + unsigned int i, c, left, val, offset = addr & 0xff;
1127 ++ u32 buf[64];
1128 ++ int ret;
1129 +
1130 + if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
1131 + return -EINVAL;
1132 +@@ -3087,10 +3090,14 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1133 + (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
1134 + goto unlock;
1135 +
1136 +- for (left = n; left; left -= c) {
1137 ++ for (left = n; left; left -= c, data += c) {
1138 + c = min(left, 4U);
1139 +- for (val = 0, i = 0; i < c; ++i)
1140 +- val = (val << 8) + *data++;
1141 ++ for (val = 0, i = 0; i < c; ++i) {
1142 ++ if (byte_oriented)
1143 ++ val = (val << 8) + data[i];
1144 ++ else
1145 ++ val = (val << 8) + data[c - i - 1];
1146 ++ }
1147 +
1148 + ret = sf1_write(adapter, c, c != left, 1, val);
1149 + if (ret)
1150 +@@ -3103,7 +3110,8 @@ static int t4_write_flash(struct adapter *adapter, unsigned int addr,
1151 + t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
1152 +
1153 + /* Read the page to verify the write succeeded */
1154 +- ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
1155 ++ ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf,
1156 ++ byte_oriented);
1157 + if (ret)
1158 + return ret;
1159 +
1160 +@@ -3699,7 +3707,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1161 + */
1162 + memcpy(first_page, fw_data, SF_PAGE_SIZE);
1163 + ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
1164 +- ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
1165 ++ ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page, true);
1166 + if (ret)
1167 + goto out;
1168 +
1169 +@@ -3707,14 +3715,14 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
1170 + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1171 + addr += SF_PAGE_SIZE;
1172 + fw_data += SF_PAGE_SIZE;
1173 +- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
1174 ++ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data, true);
1175 + if (ret)
1176 + goto out;
1177 + }
1178 +
1179 +- ret = t4_write_flash(adap,
1180 +- fw_start + offsetof(struct fw_hdr, fw_ver),
1181 +- sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
1182 ++ ret = t4_write_flash(adap, fw_start + offsetof(struct fw_hdr, fw_ver),
1183 ++ sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver,
1184 ++ true);
1185 + out:
1186 + if (ret)
1187 + dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
1188 +@@ -3819,9 +3827,11 @@ int t4_load_phy_fw(struct adapter *adap, int win,
1189 + /* Copy the supplied PHY Firmware image to the adapter memory location
1190 + * allocated by the adapter firmware.
1191 + */
1192 ++ spin_lock_bh(&adap->win0_lock);
1193 + ret = t4_memory_rw(adap, win, mtype, maddr,
1194 + phy_fw_size, (__be32 *)phy_fw_data,
1195 + T4_MEMORY_WRITE);
1196 ++ spin_unlock_bh(&adap->win0_lock);
1197 + if (ret)
1198 + return ret;
1199 +
1200 +@@ -10215,7 +10225,7 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1201 + n = size - i;
1202 + else
1203 + n = SF_PAGE_SIZE;
1204 +- ret = t4_write_flash(adap, addr, n, cfg_data);
1205 ++ ret = t4_write_flash(adap, addr, n, cfg_data, true);
1206 + if (ret)
1207 + goto out;
1208 +
1209 +@@ -10684,13 +10694,14 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
1210 + for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
1211 + addr += SF_PAGE_SIZE;
1212 + boot_data += SF_PAGE_SIZE;
1213 +- ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data);
1214 ++ ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, boot_data,
1215 ++ false);
1216 + if (ret)
1217 + goto out;
1218 + }
1219 +
1220 + ret = t4_write_flash(adap, boot_sector, SF_PAGE_SIZE,
1221 +- (const u8 *)header);
1222 ++ (const u8 *)header, false);
1223 +
1224 + out:
1225 + if (ret)
1226 +@@ -10765,7 +10776,7 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1227 + for (i = 0; i < size; i += SF_PAGE_SIZE) {
1228 + n = min_t(u32, size - i, SF_PAGE_SIZE);
1229 +
1230 +- ret = t4_write_flash(adap, addr, n, cfg_data);
1231 ++ ret = t4_write_flash(adap, addr, n, cfg_data, false);
1232 + if (ret)
1233 + goto out;
1234 +
1235 +@@ -10777,7 +10788,8 @@ int t4_load_bootcfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
1236 + for (i = 0; i < npad; i++) {
1237 + u8 data = 0;
1238 +
1239 +- ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data);
1240 ++ ret = t4_write_flash(adap, cfg_addr + size + i, 1, &data,
1241 ++ false);
1242 + if (ret)
1243 + goto out;
1244 + }
1245 +diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
1246 +index 46b0dbab8aadc..7c992172933bc 100644
1247 +--- a/drivers/net/ethernet/ec_bhf.c
1248 ++++ b/drivers/net/ethernet/ec_bhf.c
1249 +@@ -576,10 +576,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
1250 + struct ec_bhf_priv *priv = netdev_priv(net_dev);
1251 +
1252 + unregister_netdev(net_dev);
1253 +- free_netdev(net_dev);
1254 +
1255 + pci_iounmap(dev, priv->dma_io);
1256 + pci_iounmap(dev, priv->io);
1257 ++
1258 ++ free_netdev(net_dev);
1259 ++
1260 + pci_release_regions(dev);
1261 + pci_clear_master(dev);
1262 + pci_disable_device(dev);
1263 +diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
1264 +index 676e437d78f6a..cb1e1ad652d09 100644
1265 +--- a/drivers/net/ethernet/emulex/benet/be_main.c
1266 ++++ b/drivers/net/ethernet/emulex/benet/be_main.c
1267 +@@ -5905,6 +5905,7 @@ drv_cleanup:
1268 + unmap_bars:
1269 + be_unmap_pci_bars(adapter);
1270 + free_netdev:
1271 ++ pci_disable_pcie_error_reporting(pdev);
1272 + free_netdev(netdev);
1273 + rel_reg:
1274 + pci_release_regions(pdev);
1275 +diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
1276 +index 1753807cbf97e..d71eac7e19249 100644
1277 +--- a/drivers/net/ethernet/freescale/fec_ptp.c
1278 ++++ b/drivers/net/ethernet/freescale/fec_ptp.c
1279 +@@ -215,15 +215,13 @@ static u64 fec_ptp_read(const struct cyclecounter *cc)
1280 + {
1281 + struct fec_enet_private *fep =
1282 + container_of(cc, struct fec_enet_private, cc);
1283 +- const struct platform_device_id *id_entry =
1284 +- platform_get_device_id(fep->pdev);
1285 + u32 tempval;
1286 +
1287 + tempval = readl(fep->hwp + FEC_ATIME_CTRL);
1288 + tempval |= FEC_T_CTRL_CAPTURE;
1289 + writel(tempval, fep->hwp + FEC_ATIME_CTRL);
1290 +
1291 +- if (id_entry->driver_data & FEC_QUIRK_BUG_CAPTURE)
1292 ++ if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
1293 + udelay(1);
1294 +
1295 + return readl(fep->hwp + FEC_ATIME);
1296 +@@ -604,6 +602,10 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
1297 + fep->ptp_caps.enable = fec_ptp_enable;
1298 +
1299 + fep->cycle_speed = clk_get_rate(fep->clk_ptp);
1300 ++ if (!fep->cycle_speed) {
1301 ++ fep->cycle_speed = NSEC_PER_SEC;
1302 ++ dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
1303 ++ }
1304 + fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
1305 +
1306 + spin_lock_init(&fep->tmreg_lock);
1307 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
1308 +index fb20c6971f4c7..dc944d605a741 100644
1309 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c
1310 ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
1311 +@@ -1705,12 +1705,13 @@ setup_rings:
1312 + * ice_vsi_cfg_txqs - Configure the VSI for Tx
1313 + * @vsi: the VSI being configured
1314 + * @rings: Tx ring array to be configured
1315 ++ * @count: number of Tx ring array elements
1316 + *
1317 + * Return 0 on success and a negative value on error
1318 + * Configure the Tx VSI for operation.
1319 + */
1320 + static int
1321 +-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1322 ++ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
1323 + {
1324 + struct ice_aqc_add_tx_qgrp *qg_buf;
1325 + u16 q_idx = 0;
1326 +@@ -1722,7 +1723,7 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1327 +
1328 + qg_buf->num_txqs = 1;
1329 +
1330 +- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1331 ++ for (q_idx = 0; q_idx < count; q_idx++) {
1332 + err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1333 + if (err)
1334 + goto err_cfg_txqs;
1335 +@@ -1742,7 +1743,7 @@ err_cfg_txqs:
1336 + */
1337 + int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1338 + {
1339 +- return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1340 ++ return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1341 + }
1342 +
1343 + /**
1344 +@@ -1757,7 +1758,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1345 + int ret;
1346 + int i;
1347 +
1348 +- ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
1349 ++ ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1350 + if (ret)
1351 + return ret;
1352 +
1353 +@@ -1955,17 +1956,18 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
1354 + * @rst_src: reset source
1355 + * @rel_vmvf_num: Relative ID of VF/VM
1356 + * @rings: Tx ring array to be stopped
1357 ++ * @count: number of Tx ring array elements
1358 + */
1359 + static int
1360 + ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1361 +- u16 rel_vmvf_num, struct ice_ring **rings)
1362 ++ u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
1363 + {
1364 + u16 q_idx;
1365 +
1366 + if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1367 + return -EINVAL;
1368 +
1369 +- for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1370 ++ for (q_idx = 0; q_idx < count; q_idx++) {
1371 + struct ice_txq_meta txq_meta = { };
1372 + int status;
1373 +
1374 +@@ -1993,7 +1995,7 @@ int
1375 + ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1376 + u16 rel_vmvf_num)
1377 + {
1378 +- return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1379 ++ return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
1380 + }
1381 +
1382 + /**
1383 +@@ -2002,7 +2004,7 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1384 + */
1385 + int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
1386 + {
1387 +- return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
1388 ++ return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
1389 + }
1390 +
1391 + /**
1392 +diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
1393 +index 6f30aad7695fb..1567ddd4c5b87 100644
1394 +--- a/drivers/net/ethernet/intel/ice/ice_main.c
1395 ++++ b/drivers/net/ethernet/intel/ice/ice_main.c
1396 +@@ -2539,6 +2539,20 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
1397 + return (ret || xdp_ring_err) ? -ENOMEM : 0;
1398 + }
1399 +
1400 ++/**
1401 ++ * ice_xdp_safe_mode - XDP handler for safe mode
1402 ++ * @dev: netdevice
1403 ++ * @xdp: XDP command
1404 ++ */
1405 ++static int ice_xdp_safe_mode(struct net_device __always_unused *dev,
1406 ++ struct netdev_bpf *xdp)
1407 ++{
1408 ++ NL_SET_ERR_MSG_MOD(xdp->extack,
1409 ++ "Please provide working DDP firmware package in order to use XDP\n"
1410 ++ "Refer to Documentation/networking/device_drivers/ethernet/intel/ice.rst");
1411 ++ return -EOPNOTSUPP;
1412 ++}
1413 ++
1414 + /**
1415 + * ice_xdp - implements XDP handler
1416 + * @dev: netdevice
1417 +@@ -6786,6 +6800,7 @@ static const struct net_device_ops ice_netdev_safe_mode_ops = {
1418 + .ndo_change_mtu = ice_change_mtu,
1419 + .ndo_get_stats64 = ice_get_stats64,
1420 + .ndo_tx_timeout = ice_tx_timeout,
1421 ++ .ndo_bpf = ice_xdp_safe_mode,
1422 + };
1423 +
1424 + static const struct net_device_ops ice_netdev_ops = {
1425 +diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
1426 +index 135ba5b6ae980..072075bc60ee9 100644
1427 +--- a/drivers/net/ethernet/lantiq_xrx200.c
1428 ++++ b/drivers/net/ethernet/lantiq_xrx200.c
1429 +@@ -154,6 +154,7 @@ static int xrx200_close(struct net_device *net_dev)
1430 +
1431 + static int xrx200_alloc_skb(struct xrx200_chan *ch)
1432 + {
1433 ++ struct sk_buff *skb = ch->skb[ch->dma.desc];
1434 + dma_addr_t mapping;
1435 + int ret = 0;
1436 +
1437 +@@ -168,6 +169,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
1438 + XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
1439 + if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
1440 + dev_kfree_skb_any(ch->skb[ch->dma.desc]);
1441 ++ ch->skb[ch->dma.desc] = skb;
1442 + ret = -ENOMEM;
1443 + goto skip;
1444 + }
1445 +@@ -198,7 +200,6 @@ static int xrx200_hw_receive(struct xrx200_chan *ch)
1446 + ch->dma.desc %= LTQ_DESC_NUM;
1447 +
1448 + if (ret) {
1449 +- ch->skb[ch->dma.desc] = skb;
1450 + net_dev->stats.rx_dropped++;
1451 + netdev_err(net_dev, "failed to allocate new rx buffer\n");
1452 + return ret;
1453 +@@ -352,8 +353,8 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr)
1454 + struct xrx200_chan *ch = ptr;
1455 +
1456 + if (napi_schedule_prep(&ch->napi)) {
1457 +- __napi_schedule(&ch->napi);
1458 + ltq_dma_disable_irq(&ch->dma);
1459 ++ __napi_schedule(&ch->napi);
1460 + }
1461 +
1462 + ltq_dma_ack_irq(&ch->dma);
1463 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1464 +index 3d45341e2216f..26f7fab109d97 100644
1465 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1466 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
1467 +@@ -532,9 +532,6 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
1468 + struct mlx5_core_dev *mdev = priv->mdev;
1469 + struct net_device *netdev = priv->netdev;
1470 +
1471 +- if (!priv->ipsec)
1472 +- return;
1473 +-
1474 + if (!(mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_ESP) ||
1475 + !MLX5_CAP_ETH(mdev, swp)) {
1476 + mlx5_core_dbg(mdev, "mlx5e: ESP and SWP offload not supported\n");
1477 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1478 +index f18b52be32e98..d81fa8e561991 100644
1479 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1480 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1481 +@@ -4958,13 +4958,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
1482 + }
1483 +
1484 + if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)) {
1485 +- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
1486 +- NETIF_F_GSO_UDP_TUNNEL_CSUM;
1487 +- netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
1488 +- NETIF_F_GSO_UDP_TUNNEL_CSUM;
1489 +- netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
1490 +- netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL |
1491 +- NETIF_F_GSO_UDP_TUNNEL_CSUM;
1492 ++ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
1493 ++ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1494 ++ netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL;
1495 + }
1496 +
1497 + if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
1498 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1499 +index 80abdb0b47d7e..59837af959d06 100644
1500 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1501 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1502 +@@ -5206,7 +5206,7 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
1503 + list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
1504 + wait_for_completion(&hpe->res_ready);
1505 + if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
1506 +- hpe->hp->pair->peer_gone = true;
1507 ++ mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
1508 +
1509 + mlx5e_hairpin_put(priv, hpe);
1510 + }
1511 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1512 +index d61539b5567c0..401b2f5128dd4 100644
1513 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1514 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1515 +@@ -1302,6 +1302,12 @@ static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
1516 + (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1517 + vport->info.trusted = true;
1518 +
1519 ++ /* External controller host PF has factory programmed MAC.
1520 ++ * Read it from the device.
1521 ++ */
1522 ++ if (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF)
1523 ++ mlx5_query_nic_vport_mac_address(esw->dev, vport_num, true, vport->info.mac);
1524 ++
1525 + esw_vport_change_handle_locked(vport);
1526 +
1527 + esw->enabled_vports++;
1528 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
1529 +index 9eb51f06d3ae2..d1972508338cf 100644
1530 +--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
1531 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
1532 +@@ -54,7 +54,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1533 + mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
1534 + mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1535 + mkey->size = MLX5_GET64(mkc, mkc, len);
1536 +- mkey->key |= mlx5_idx_to_mkey(mkey_index);
1537 ++ mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
1538 + mkey->pd = MLX5_GET(mkc, mkc, pd);
1539 +
1540 + mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
1541 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
1542 +index 8e0dddc6383f0..2389239acadc9 100644
1543 +--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
1544 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
1545 +@@ -156,6 +156,9 @@ void mlx5_rdma_enable_roce(struct mlx5_core_dev *dev)
1546 + {
1547 + int err;
1548 +
1549 ++ if (!MLX5_CAP_GEN(dev, roce))
1550 ++ return;
1551 ++
1552 + err = mlx5_nic_vport_enable_roce(dev);
1553 + if (err) {
1554 + mlx5_core_err(dev, "Failed to enable RoCE: %d\n", err);
1555 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
1556 +index 51bbd88ff021c..fd56cae0d54fc 100644
1557 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
1558 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
1559 +@@ -78,9 +78,9 @@ int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
1560 + caps->uplink_icm_address_tx =
1561 + MLX5_CAP64_ESW_FLOWTABLE(mdev,
1562 + sw_steering_uplink_icm_address_tx);
1563 +- caps->sw_owner =
1564 +- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev,
1565 +- sw_owner);
1566 ++ caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
1567 ++ if (!caps->sw_owner_v2)
1568 ++ caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
1569 +
1570 + return 0;
1571 + }
1572 +@@ -113,10 +113,15 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
1573 + caps->nic_tx_allow_address =
1574 + MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
1575 +
1576 +- caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
1577 +- caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
1578 ++ caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
1579 ++ caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
1580 ++
1581 ++ if (!caps->rx_sw_owner_v2)
1582 ++ caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
1583 ++ if (!caps->tx_sw_owner_v2)
1584 ++ caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
1585 +
1586 +- caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
1587 ++ caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
1588 +
1589 + caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
1590 + caps->hdr_modify_icm_addr =
1591 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
1592 +index aa2c2d6c44e6b..00d861361428f 100644
1593 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
1594 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
1595 +@@ -4,6 +4,11 @@
1596 + #include <linux/mlx5/eswitch.h>
1597 + #include "dr_types.h"
1598 +
1599 ++#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
1600 ++ ((dmn)->info.caps.dmn_type##_sw_owner || \
1601 ++ ((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
1602 ++ (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
1603 ++
1604 + static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
1605 + {
1606 + /* Per vport cached FW FT for checksum recalculation, this
1607 +@@ -181,6 +186,7 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
1608 + return ret;
1609 +
1610 + dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
1611 ++ dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
1612 + dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
1613 + dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
1614 +
1615 +@@ -223,18 +229,13 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
1616 + if (ret)
1617 + return ret;
1618 +
1619 +- if (dmn->info.caps.sw_format_ver != MLX5_STEERING_FORMAT_CONNECTX_5) {
1620 +- mlx5dr_err(dmn, "SW steering is not supported on this device\n");
1621 +- return -EOPNOTSUPP;
1622 +- }
1623 +-
1624 + ret = dr_domain_query_fdb_caps(mdev, dmn);
1625 + if (ret)
1626 + return ret;
1627 +
1628 + switch (dmn->type) {
1629 + case MLX5DR_DOMAIN_TYPE_NIC_RX:
1630 +- if (!dmn->info.caps.rx_sw_owner)
1631 ++ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
1632 + return -ENOTSUPP;
1633 +
1634 + dmn->info.supp_sw_steering = true;
1635 +@@ -243,7 +244,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
1636 + dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
1637 + break;
1638 + case MLX5DR_DOMAIN_TYPE_NIC_TX:
1639 +- if (!dmn->info.caps.tx_sw_owner)
1640 ++ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
1641 + return -ENOTSUPP;
1642 +
1643 + dmn->info.supp_sw_steering = true;
1644 +@@ -255,7 +256,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
1645 + if (!dmn->info.caps.eswitch_manager)
1646 + return -ENOTSUPP;
1647 +
1648 +- if (!dmn->info.caps.fdb_sw_owner)
1649 ++ if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
1650 + return -ENOTSUPP;
1651 +
1652 + dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX;
1653 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
1654 +index cf62ea4f882e6..42c49f09e9d3f 100644
1655 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
1656 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
1657 +@@ -597,7 +597,8 @@ struct mlx5dr_esw_caps {
1658 + u64 drop_icm_address_tx;
1659 + u64 uplink_icm_address_rx;
1660 + u64 uplink_icm_address_tx;
1661 +- bool sw_owner;
1662 ++ u8 sw_owner:1;
1663 ++ u8 sw_owner_v2:1;
1664 + };
1665 +
1666 + struct mlx5dr_cmd_vport_cap {
1667 +@@ -630,6 +631,9 @@ struct mlx5dr_cmd_caps {
1668 + bool rx_sw_owner;
1669 + bool tx_sw_owner;
1670 + bool fdb_sw_owner;
1671 ++ u8 rx_sw_owner_v2:1;
1672 ++ u8 tx_sw_owner_v2:1;
1673 ++ u8 fdb_sw_owner_v2:1;
1674 + u32 num_vports;
1675 + struct mlx5dr_esw_caps esw_caps;
1676 + struct mlx5dr_cmd_vport_cap *vports_caps;
1677 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
1678 +index 7914fe3fc68d8..454968ba68313 100644
1679 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
1680 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
1681 +@@ -124,7 +124,11 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
1682 + static inline bool
1683 + mlx5dr_is_supported(struct mlx5_core_dev *dev)
1684 + {
1685 +- return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner);
1686 ++ return MLX5_CAP_GEN(dev, roce) &&
1687 ++ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
1688 ++ (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
1689 ++ (MLX5_CAP_GEN(dev, steering_format_version) <=
1690 ++ MLX5_STEERING_FORMAT_CONNECTX_6DX)));
1691 + }
1692 +
1693 + #endif /* _MLX5DR_H_ */
1694 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
1695 +index 01cc00ad8acf2..b6931bbe52d29 100644
1696 +--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
1697 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
1698 +@@ -424,6 +424,15 @@ err_modify_sq:
1699 + return err;
1700 + }
1701 +
1702 ++static void mlx5_hairpin_unpair_peer_sq(struct mlx5_hairpin *hp)
1703 ++{
1704 ++ int i;
1705 ++
1706 ++ for (i = 0; i < hp->num_channels; i++)
1707 ++ mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
1708 ++ MLX5_SQC_STATE_RST, 0, 0);
1709 ++}
1710 ++
1711 + static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
1712 + {
1713 + int i;
1714 +@@ -432,13 +441,9 @@ static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
1715 + for (i = 0; i < hp->num_channels; i++)
1716 + mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
1717 + MLX5_RQC_STATE_RST, 0, 0);
1718 +-
1719 + /* unset peer SQs */
1720 +- if (hp->peer_gone)
1721 +- return;
1722 +- for (i = 0; i < hp->num_channels; i++)
1723 +- mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
1724 +- MLX5_SQC_STATE_RST, 0, 0);
1725 ++ if (!hp->peer_gone)
1726 ++ mlx5_hairpin_unpair_peer_sq(hp);
1727 + }
1728 +
1729 + struct mlx5_hairpin *
1730 +@@ -485,3 +490,16 @@ void mlx5_core_hairpin_destroy(struct mlx5_hairpin *hp)
1731 + mlx5_hairpin_destroy_queues(hp);
1732 + kfree(hp);
1733 + }
1734 ++
1735 ++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp)
1736 ++{
1737 ++ int i;
1738 ++
1739 ++ mlx5_hairpin_unpair_peer_sq(hp);
1740 ++
1741 ++ /* destroy peer SQ */
1742 ++ for (i = 0; i < hp->num_channels; i++)
1743 ++ mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
1744 ++
1745 ++ hp->peer_gone = true;
1746 ++}
1747 +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1748 +index bdafc85fd874d..fc91bbf7d0c37 100644
1749 +--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1750 ++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
1751 +@@ -464,8 +464,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
1752 + void *in;
1753 + int err;
1754 +
1755 +- if (!vport)
1756 +- return -EINVAL;
1757 + if (!MLX5_CAP_GEN(mdev, vport_group_manager))
1758 + return -EACCES;
1759 +
1760 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
1761 +index bf85ce9835d7f..42e4437ac3c16 100644
1762 +--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
1763 ++++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
1764 +@@ -708,7 +708,8 @@ mlxsw_thermal_module_tz_init(struct mlxsw_thermal_module *module_tz)
1765 + MLXSW_THERMAL_TRIP_MASK,
1766 + module_tz,
1767 + &mlxsw_thermal_module_ops,
1768 +- NULL, 0, 0);
1769 ++ NULL, 0,
1770 ++ module_tz->parent->polling_delay);
1771 + if (IS_ERR(module_tz->tzdev)) {
1772 + err = PTR_ERR(module_tz->tzdev);
1773 + return err;
1774 +@@ -830,7 +831,8 @@ mlxsw_thermal_gearbox_tz_init(struct mlxsw_thermal_module *gearbox_tz)
1775 + MLXSW_THERMAL_TRIP_MASK,
1776 + gearbox_tz,
1777 + &mlxsw_thermal_gearbox_ops,
1778 +- NULL, 0, 0);
1779 ++ NULL, 0,
1780 ++ gearbox_tz->parent->polling_delay);
1781 + if (IS_ERR(gearbox_tz->tzdev))
1782 + return PTR_ERR(gearbox_tz->tzdev);
1783 +
1784 +diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
1785 +index 3c3069afc0a31..c670bf3464c2a 100644
1786 +--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
1787 ++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
1788 +@@ -3641,7 +3641,7 @@ MLXSW_ITEM32(reg, qeec, max_shaper_bs, 0x1C, 0, 6);
1789 + #define MLXSW_REG_QEEC_HIGHEST_SHAPER_BS 25
1790 + #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1 5
1791 + #define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2 11
1792 +-#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 5
1793 ++#define MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3 11
1794 +
1795 + static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
1796 + enum mlxsw_reg_qeec_hr hr, u8 index,
1797 +diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
1798 +index aa400b925b08e..5bfc7acfd13a9 100644
1799 +--- a/drivers/net/ethernet/mscc/ocelot.c
1800 ++++ b/drivers/net/ethernet/mscc/ocelot.c
1801 +@@ -355,6 +355,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
1802 +
1803 + int ocelot_port_flush(struct ocelot *ocelot, int port)
1804 + {
1805 ++ unsigned int pause_ena;
1806 + int err, val;
1807 +
1808 + /* Disable dequeuing from the egress queues */
1809 +@@ -363,6 +364,7 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
1810 + QSYS_PORT_MODE, port);
1811 +
1812 + /* Disable flow control */
1813 ++ ocelot_fields_read(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, &pause_ena);
1814 + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
1815 +
1816 + /* Disable priority flow control */
1817 +@@ -398,6 +400,9 @@ int ocelot_port_flush(struct ocelot *ocelot, int port)
1818 + /* Clear flushing again. */
1819 + ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
1820 +
1821 ++ /* Re-enable flow control */
1822 ++ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, pause_ena);
1823 ++
1824 + return err;
1825 + }
1826 + EXPORT_SYMBOL(ocelot_port_flush);
1827 +diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1828 +index d258e0ccf9465..e2046b6d65a30 100644
1829 +--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1830 ++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
1831 +@@ -1602,6 +1602,8 @@ err_out_free_netdev:
1832 + free_netdev(netdev);
1833 +
1834 + err_out_free_res:
1835 ++ if (NX_IS_REVISION_P3(pdev->revision))
1836 ++ pci_disable_pcie_error_reporting(pdev);
1837 + pci_release_regions(pdev);
1838 +
1839 + err_out_disable_pdev:
1840 +diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1841 +index c2faf96fcade8..27c07b2412f46 100644
1842 +--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1843 ++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
1844 +@@ -2692,6 +2692,7 @@ err_out_free_hw_res:
1845 + kfree(ahw);
1846 +
1847 + err_out_free_res:
1848 ++ pci_disable_pcie_error_reporting(pdev);
1849 + pci_release_regions(pdev);
1850 +
1851 + err_out_disable_pdev:
1852 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
1853 +index fcdecddb28122..8d51b0cb545ca 100644
1854 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
1855 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
1856 +@@ -26,7 +26,7 @@ static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
1857 + }
1858 +
1859 + /* Needs rtnl lock */
1860 +-static struct rmnet_port*
1861 ++struct rmnet_port*
1862 + rmnet_get_port_rtnl(const struct net_device *real_dev)
1863 + {
1864 + return rtnl_dereference(real_dev->rx_handler_data);
1865 +@@ -253,7 +253,10 @@ static int rmnet_config_notify_cb(struct notifier_block *nb,
1866 + netdev_dbg(real_dev, "Kernel unregister\n");
1867 + rmnet_force_unassociate_device(real_dev);
1868 + break;
1869 +-
1870 ++ case NETDEV_CHANGEMTU:
1871 ++ if (rmnet_vnd_validate_real_dev_mtu(real_dev))
1872 ++ return NOTIFY_BAD;
1873 ++ break;
1874 + default:
1875 + break;
1876 + }
1877 +@@ -329,9 +332,17 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
1878 +
1879 + if (data[IFLA_RMNET_FLAGS]) {
1880 + struct ifla_rmnet_flags *flags;
1881 ++ u32 old_data_format;
1882 +
1883 ++ old_data_format = port->data_format;
1884 + flags = nla_data(data[IFLA_RMNET_FLAGS]);
1885 + port->data_format = flags->flags & flags->mask;
1886 ++
1887 ++ if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
1888 ++ port->data_format = old_data_format;
1889 ++ NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
1890 ++ return -EINVAL;
1891 ++ }
1892 + }
1893 +
1894 + return 0;
1895 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
1896 +index be515982d6286..8d8d4690a0745 100644
1897 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
1898 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
1899 +@@ -73,4 +73,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
1900 + struct netlink_ext_ack *extack);
1901 + int rmnet_del_bridge(struct net_device *rmnet_dev,
1902 + struct net_device *slave_dev);
1903 ++struct rmnet_port*
1904 ++rmnet_get_port_rtnl(const struct net_device *real_dev);
1905 + #endif /* _RMNET_CONFIG_H_ */
1906 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
1907 +index d58b51d277f18..2adcf24848a45 100644
1908 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
1909 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
1910 +@@ -58,9 +58,30 @@ static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
1911 + return NETDEV_TX_OK;
1912 + }
1913 +
1914 ++static int rmnet_vnd_headroom(struct rmnet_port *port)
1915 ++{
1916 ++ u32 headroom;
1917 ++
1918 ++ headroom = sizeof(struct rmnet_map_header);
1919 ++
1920 ++ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
1921 ++ headroom += sizeof(struct rmnet_map_ul_csum_header);
1922 ++
1923 ++ return headroom;
1924 ++}
1925 ++
1926 + static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
1927 + {
1928 +- if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE)
1929 ++ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
1930 ++ struct rmnet_port *port;
1931 ++ u32 headroom;
1932 ++
1933 ++ port = rmnet_get_port_rtnl(priv->real_dev);
1934 ++
1935 ++ headroom = rmnet_vnd_headroom(port);
1936 ++
1937 ++ if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
1938 ++ new_mtu > (priv->real_dev->mtu - headroom))
1939 + return -EINVAL;
1940 +
1941 + rmnet_dev->mtu = new_mtu;
1942 +@@ -104,24 +125,24 @@ static void rmnet_get_stats64(struct net_device *dev,
1943 + struct rtnl_link_stats64 *s)
1944 + {
1945 + struct rmnet_priv *priv = netdev_priv(dev);
1946 +- struct rmnet_vnd_stats total_stats;
1947 ++ struct rmnet_vnd_stats total_stats = { };
1948 + struct rmnet_pcpu_stats *pcpu_ptr;
1949 ++ struct rmnet_vnd_stats snapshot;
1950 + unsigned int cpu, start;
1951 +
1952 +- memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
1953 +-
1954 + for_each_possible_cpu(cpu) {
1955 + pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
1956 +
1957 + do {
1958 + start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
1959 +- total_stats.rx_pkts += pcpu_ptr->stats.rx_pkts;
1960 +- total_stats.rx_bytes += pcpu_ptr->stats.rx_bytes;
1961 +- total_stats.tx_pkts += pcpu_ptr->stats.tx_pkts;
1962 +- total_stats.tx_bytes += pcpu_ptr->stats.tx_bytes;
1963 ++ snapshot = pcpu_ptr->stats; /* struct assignment */
1964 + } while (u64_stats_fetch_retry_irq(&pcpu_ptr->syncp, start));
1965 +
1966 +- total_stats.tx_drops += pcpu_ptr->stats.tx_drops;
1967 ++ total_stats.rx_pkts += snapshot.rx_pkts;
1968 ++ total_stats.rx_bytes += snapshot.rx_bytes;
1969 ++ total_stats.tx_pkts += snapshot.tx_pkts;
1970 ++ total_stats.tx_bytes += snapshot.tx_bytes;
1971 ++ total_stats.tx_drops += snapshot.tx_drops;
1972 + }
1973 +
1974 + s->rx_packets = total_stats.rx_pkts;
1975 +@@ -229,6 +250,7 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
1976 +
1977 + {
1978 + struct rmnet_priv *priv = netdev_priv(rmnet_dev);
1979 ++ u32 headroom;
1980 + int rc;
1981 +
1982 + if (rmnet_get_endpoint(port, id)) {
1983 +@@ -242,6 +264,13 @@ int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
1984 +
1985 + priv->real_dev = real_dev;
1986 +
1987 ++ headroom = rmnet_vnd_headroom(port);
1988 ++
1989 ++ if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
1990 ++ NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
1991 ++ return -EINVAL;
1992 ++ }
1993 ++
1994 + rc = register_netdevice(rmnet_dev);
1995 + if (!rc) {
1996 + ep->egress_dev = rmnet_dev;
1997 +@@ -283,3 +312,45 @@ int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
1998 +
1999 + return 0;
2000 + }
2001 ++
2002 ++int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
2003 ++{
2004 ++ struct hlist_node *tmp_ep;
2005 ++ struct rmnet_endpoint *ep;
2006 ++ struct rmnet_port *port;
2007 ++ unsigned long bkt_ep;
2008 ++ u32 headroom;
2009 ++
2010 ++ port = rmnet_get_port_rtnl(real_dev);
2011 ++
2012 ++ headroom = rmnet_vnd_headroom(port);
2013 ++
2014 ++ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
2015 ++ if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
2016 ++ return -1;
2017 ++ }
2018 ++
2019 ++ return 0;
2020 ++}
2021 ++
2022 ++int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
2023 ++ struct net_device *real_dev)
2024 ++{
2025 ++ struct hlist_node *tmp_ep;
2026 ++ struct rmnet_endpoint *ep;
2027 ++ unsigned long bkt_ep;
2028 ++ u32 headroom;
2029 ++
2030 ++ headroom = rmnet_vnd_headroom(port);
2031 ++
2032 ++ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
2033 ++ if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
2034 ++ continue;
2035 ++
2036 ++ if (rmnet_vnd_change_mtu(ep->egress_dev,
2037 ++ real_dev->mtu - headroom))
2038 ++ return -1;
2039 ++ }
2040 ++
2041 ++ return 0;
2042 ++}
2043 +diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
2044 +index 4967f3461ed1e..dc3a4443ef0af 100644
2045 +--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
2046 ++++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
2047 +@@ -18,4 +18,7 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
2048 + void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
2049 + void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
2050 + void rmnet_vnd_setup(struct net_device *dev);
2051 ++int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
2052 ++int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
2053 ++ struct net_device *real_dev);
2054 + #endif /* _RMNET_VND_H_ */
2055 +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
2056 +index b70d44ac09906..3c73453725f94 100644
2057 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
2058 ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
2059 +@@ -76,10 +76,10 @@ enum power_event {
2060 + #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
2061 +
2062 + /* GMAC HW ADDR regs */
2063 +-#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
2064 +- (reg * 8))
2065 +-#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
2066 +- (reg * 8))
2067 ++#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
2068 ++ 0x00000040 + (reg * 8))
2069 ++#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
2070 ++ 0x00000044 + (reg * 8))
2071 + #define GMAC_MAX_PERFECT_ADDRESSES 1
2072 +
2073 + #define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
2074 +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2075 +index af34a4cadbb0a..ff95400594fc1 100644
2076 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2077 ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2078 +@@ -626,6 +626,8 @@ error_pclk_get:
2079 + void stmmac_remove_config_dt(struct platform_device *pdev,
2080 + struct plat_stmmacenet_data *plat)
2081 + {
2082 ++ clk_disable_unprepare(plat->stmmac_clk);
2083 ++ clk_disable_unprepare(plat->pclk);
2084 + of_node_put(plat->phy_node);
2085 + of_node_put(plat->mdio_node);
2086 + }
2087 +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
2088 +index 030185301014c..01bb36e7cff0a 100644
2089 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
2090 ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
2091 +@@ -849,7 +849,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2092 + smp_mb();
2093 +
2094 + /* Space might have just been freed - check again */
2095 +- if (temac_check_tx_bd_space(lp, num_frag))
2096 ++ if (temac_check_tx_bd_space(lp, num_frag + 1))
2097 + return NETDEV_TX_BUSY;
2098 +
2099 + netif_wake_queue(ndev);
2100 +@@ -876,7 +876,6 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2101 + return NETDEV_TX_OK;
2102 + }
2103 + cur_p->phys = cpu_to_be32(skb_dma_addr);
2104 +- ptr_to_txbd((void *)skb, cur_p);
2105 +
2106 + for (ii = 0; ii < num_frag; ii++) {
2107 + if (++lp->tx_bd_tail >= lp->tx_bd_num)
2108 +@@ -915,6 +914,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2109 + }
2110 + cur_p->app0 |= cpu_to_be32(STS_CTRL_APP0_EOP);
2111 +
2112 ++ /* Mark last fragment with skb address, so it can be consumed
2113 ++ * in temac_start_xmit_done()
2114 ++ */
2115 ++ ptr_to_txbd((void *)skb, cur_p);
2116 ++
2117 + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
2118 + lp->tx_bd_tail++;
2119 + if (lp->tx_bd_tail >= lp->tx_bd_num)
2120 +diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
2121 +index 17be2bb2985cd..920e9f888cc35 100644
2122 +--- a/drivers/net/hamradio/mkiss.c
2123 ++++ b/drivers/net/hamradio/mkiss.c
2124 +@@ -799,6 +799,7 @@ static void mkiss_close(struct tty_struct *tty)
2125 + ax->tty = NULL;
2126 +
2127 + unregister_netdev(ax->dev);
2128 ++ free_netdev(ax->dev);
2129 + }
2130 +
2131 + /* Perform I/O control on an active ax25 channel. */
2132 +diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
2133 +index 0eeec80bec311..e4a5703666461 100644
2134 +--- a/drivers/net/usb/cdc_eem.c
2135 ++++ b/drivers/net/usb/cdc_eem.c
2136 +@@ -123,10 +123,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
2137 + }
2138 +
2139 + skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
2140 ++ dev_kfree_skb_any(skb);
2141 + if (!skb2)
2142 + return NULL;
2143 +
2144 +- dev_kfree_skb_any(skb);
2145 + skb = skb2;
2146 +
2147 + done:
2148 +diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2149 +index 1d3bf810f2ca1..04c4f1570bc8c 100644
2150 +--- a/drivers/net/usb/cdc_ncm.c
2151 ++++ b/drivers/net/usb/cdc_ncm.c
2152 +@@ -1900,7 +1900,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
2153 + static const struct driver_info cdc_ncm_info = {
2154 + .description = "CDC NCM",
2155 + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
2156 +- | FLAG_LINK_INTR,
2157 ++ | FLAG_LINK_INTR | FLAG_ETHER,
2158 + .bind = cdc_ncm_bind,
2159 + .unbind = cdc_ncm_unbind,
2160 + .manage_power = usbnet_manage_power,
2161 +diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2162 +index d44657b54d2b6..378a12ae2d957 100644
2163 +--- a/drivers/net/usb/smsc75xx.c
2164 ++++ b/drivers/net/usb/smsc75xx.c
2165 +@@ -1483,7 +1483,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2166 + ret = smsc75xx_wait_ready(dev, 0);
2167 + if (ret < 0) {
2168 + netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
2169 +- goto err;
2170 ++ goto free_pdata;
2171 + }
2172 +
2173 + smsc75xx_init_mac_address(dev);
2174 +@@ -1492,7 +1492,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2175 + ret = smsc75xx_reset(dev);
2176 + if (ret < 0) {
2177 + netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
2178 +- goto err;
2179 ++ goto cancel_work;
2180 + }
2181 +
2182 + dev->net->netdev_ops = &smsc75xx_netdev_ops;
2183 +@@ -1503,8 +1503,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
2184 + dev->net->max_mtu = MAX_SINGLE_PACKET_SIZE;
2185 + return 0;
2186 +
2187 +-err:
2188 ++cancel_work:
2189 ++ cancel_work_sync(&pdata->set_multicast);
2190 ++free_pdata:
2191 + kfree(pdata);
2192 ++ dev->data[0] = 0;
2193 + return ret;
2194 + }
2195 +
2196 +@@ -1515,7 +1518,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
2197 + cancel_work_sync(&pdata->set_multicast);
2198 + netif_dbg(dev, ifdown, dev->net, "free pdata\n");
2199 + kfree(pdata);
2200 +- pdata = NULL;
2201 + dev->data[0] = 0;
2202 + }
2203 + }
2204 +diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
2205 +index b9b7e00b72a84..bc96ac0c5769c 100644
2206 +--- a/drivers/net/vrf.c
2207 ++++ b/drivers/net/vrf.c
2208 +@@ -1184,9 +1184,6 @@ static int vrf_dev_init(struct net_device *dev)
2209 +
2210 + dev->flags = IFF_MASTER | IFF_NOARP;
2211 +
2212 +- /* MTU is irrelevant for VRF device; set to 64k similar to lo */
2213 +- dev->mtu = 64 * 1024;
2214 +-
2215 + /* similarly, oper state is irrelevant; set to up to avoid confusion */
2216 + dev->operstate = IF_OPER_UP;
2217 + netdev_lockdep_set_classes(dev);
2218 +@@ -1620,7 +1617,8 @@ static void vrf_setup(struct net_device *dev)
2219 + * which breaks networking.
2220 + */
2221 + dev->min_mtu = IPV6_MIN_MTU;
2222 +- dev->max_mtu = ETH_MAX_MTU;
2223 ++ dev->max_mtu = IP6_MAX_MTU;
2224 ++ dev->mtu = dev->max_mtu;
2225 + }
2226 +
2227 + static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
2228 +diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
2229 +index 0be485a253273..41be72c74e3a4 100644
2230 +--- a/drivers/pci/controller/pci-aardvark.c
2231 ++++ b/drivers/pci/controller/pci-aardvark.c
2232 +@@ -514,7 +514,7 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
2233 + udelay(PIO_RETRY_DELAY);
2234 + }
2235 +
2236 +- dev_err(dev, "config read/write timed out\n");
2237 ++ dev_err(dev, "PIO read/write transfer time out\n");
2238 + return -ETIMEDOUT;
2239 + }
2240 +
2241 +@@ -657,6 +657,35 @@ static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
2242 + return true;
2243 + }
2244 +
2245 ++static bool advk_pcie_pio_is_running(struct advk_pcie *pcie)
2246 ++{
2247 ++ struct device *dev = &pcie->pdev->dev;
2248 ++
2249 ++ /*
2250 ++ * Trying to start a new PIO transfer when previous has not completed
2251 ++ * cause External Abort on CPU which results in kernel panic:
2252 ++ *
2253 ++ * SError Interrupt on CPU0, code 0xbf000002 -- SError
2254 ++ * Kernel panic - not syncing: Asynchronous SError Interrupt
2255 ++ *
2256 ++ * Functions advk_pcie_rd_conf() and advk_pcie_wr_conf() are protected
2257 ++ * by raw_spin_lock_irqsave() at pci_lock_config() level to prevent
2258 ++ * concurrent calls at the same time. But because PIO transfer may take
2259 ++ * about 1.5s when link is down or card is disconnected, it means that
2260 ++ * advk_pcie_wait_pio() does not always have to wait for completion.
2261 ++ *
2262 ++ * Some versions of ARM Trusted Firmware handles this External Abort at
2263 ++ * EL3 level and mask it to prevent kernel panic. Relevant TF-A commit:
2264 ++ * https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git/commit/?id=3c7dcdac5c50
2265 ++ */
2266 ++ if (advk_readl(pcie, PIO_START)) {
2267 ++ dev_err(dev, "Previous PIO read/write transfer is still running\n");
2268 ++ return true;
2269 ++ }
2270 ++
2271 ++ return false;
2272 ++}
2273 ++
2274 + static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2275 + int where, int size, u32 *val)
2276 + {
2277 +@@ -673,9 +702,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2278 + return pci_bridge_emul_conf_read(&pcie->bridge, where,
2279 + size, val);
2280 +
2281 +- /* Start PIO */
2282 +- advk_writel(pcie, 0, PIO_START);
2283 +- advk_writel(pcie, 1, PIO_ISR);
2284 ++ if (advk_pcie_pio_is_running(pcie)) {
2285 ++ *val = 0xffffffff;
2286 ++ return PCIBIOS_SET_FAILED;
2287 ++ }
2288 +
2289 + /* Program the control register */
2290 + reg = advk_readl(pcie, PIO_CTRL);
2291 +@@ -694,7 +724,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
2292 + /* Program the data strobe */
2293 + advk_writel(pcie, 0xf, PIO_WR_DATA_STRB);
2294 +
2295 +- /* Start the transfer */
2296 ++ /* Clear PIO DONE ISR and start the transfer */
2297 ++ advk_writel(pcie, 1, PIO_ISR);
2298 + advk_writel(pcie, 1, PIO_START);
2299 +
2300 + ret = advk_pcie_wait_pio(pcie);
2301 +@@ -734,9 +765,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2302 + if (where % size)
2303 + return PCIBIOS_SET_FAILED;
2304 +
2305 +- /* Start PIO */
2306 +- advk_writel(pcie, 0, PIO_START);
2307 +- advk_writel(pcie, 1, PIO_ISR);
2308 ++ if (advk_pcie_pio_is_running(pcie))
2309 ++ return PCIBIOS_SET_FAILED;
2310 +
2311 + /* Program the control register */
2312 + reg = advk_readl(pcie, PIO_CTRL);
2313 +@@ -763,7 +793,8 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
2314 + /* Program the data strobe */
2315 + advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB);
2316 +
2317 +- /* Start the transfer */
2318 ++ /* Clear PIO DONE ISR and start the transfer */
2319 ++ advk_writel(pcie, 1, PIO_ISR);
2320 + advk_writel(pcie, 1, PIO_START);
2321 +
2322 + ret = advk_pcie_wait_pio(pcie);
2323 +diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2324 +index b570f297e3ec1..16fb3d7714d51 100644
2325 +--- a/drivers/pci/quirks.c
2326 ++++ b/drivers/pci/quirks.c
2327 +@@ -3557,6 +3557,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
2328 + dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
2329 + }
2330 +
2331 ++/*
2332 ++ * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
2333 ++ * prevented for those affected devices.
2334 ++ */
2335 ++static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
2336 ++{
2337 ++ if ((dev->device & 0xffc0) == 0x2340)
2338 ++ quirk_no_bus_reset(dev);
2339 ++}
2340 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
2341 ++ quirk_nvidia_no_bus_reset);
2342 ++
2343 + /*
2344 + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
2345 + * The device will throw a Link Down error on AER-capable systems and
2346 +@@ -3577,6 +3589,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
2347 + */
2348 + DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
2349 +
2350 ++/*
2351 ++ * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
2352 ++ * automatically disables LTSSM when Secondary Bus Reset is received and
2353 ++ * the device stops working. Prevent bus reset for these devices. With
2354 ++ * this change, the device can be assigned to VMs with VFIO, but it will
2355 ++ * leak state between VMs. Reference
2356 ++ * https://e2e.ti.com/support/processors/f/791/t/954382
2357 ++ */
2358 ++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
2359 ++
2360 + static void quirk_no_pm_reset(struct pci_dev *dev)
2361 + {
2362 + /*
2363 +@@ -3912,6 +3934,69 @@ static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
2364 + return 0;
2365 + }
2366 +
2367 ++#define PCI_DEVICE_ID_HINIC_VF 0x375E
2368 ++#define HINIC_VF_FLR_TYPE 0x1000
2369 ++#define HINIC_VF_FLR_CAP_BIT (1UL << 30)
2370 ++#define HINIC_VF_OP 0xE80
2371 ++#define HINIC_VF_FLR_PROC_BIT (1UL << 18)
2372 ++#define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */
2373 ++
2374 ++/* Device-specific reset method for Huawei Intelligent NIC virtual functions */
2375 ++static int reset_hinic_vf_dev(struct pci_dev *pdev, int probe)
2376 ++{
2377 ++ unsigned long timeout;
2378 ++ void __iomem *bar;
2379 ++ u32 val;
2380 ++
2381 ++ if (probe)
2382 ++ return 0;
2383 ++
2384 ++ bar = pci_iomap(pdev, 0, 0);
2385 ++ if (!bar)
2386 ++ return -ENOTTY;
2387 ++
2388 ++ /* Get and check firmware capabilities */
2389 ++ val = ioread32be(bar + HINIC_VF_FLR_TYPE);
2390 ++ if (!(val & HINIC_VF_FLR_CAP_BIT)) {
2391 ++ pci_iounmap(pdev, bar);
2392 ++ return -ENOTTY;
2393 ++ }
2394 ++
2395 ++ /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */
2396 ++ val = ioread32be(bar + HINIC_VF_OP);
2397 ++ val = val | HINIC_VF_FLR_PROC_BIT;
2398 ++ iowrite32be(val, bar + HINIC_VF_OP);
2399 ++
2400 ++ pcie_flr(pdev);
2401 ++
2402 ++ /*
2403 ++ * The device must recapture its Bus and Device Numbers after FLR
2404 ++ * in order generate Completions. Issue a config write to let the
2405 ++ * device capture this information.
2406 ++ */
2407 ++ pci_write_config_word(pdev, PCI_VENDOR_ID, 0);
2408 ++
2409 ++ /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */
2410 ++ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT);
2411 ++ do {
2412 ++ val = ioread32be(bar + HINIC_VF_OP);
2413 ++ if (!(val & HINIC_VF_FLR_PROC_BIT))
2414 ++ goto reset_complete;
2415 ++ msleep(20);
2416 ++ } while (time_before(jiffies, timeout));
2417 ++
2418 ++ val = ioread32be(bar + HINIC_VF_OP);
2419 ++ if (!(val & HINIC_VF_FLR_PROC_BIT))
2420 ++ goto reset_complete;
2421 ++
2422 ++ pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val);
2423 ++
2424 ++reset_complete:
2425 ++ pci_iounmap(pdev, bar);
2426 ++
2427 ++ return 0;
2428 ++}
2429 ++
2430 + static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
2431 + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
2432 + reset_intel_82599_sfp_virtfn },
2433 +@@ -3923,6 +4008,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
2434 + { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
2435 + { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
2436 + reset_chelsio_generic_dev },
2437 ++ { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF,
2438 ++ reset_hinic_vf_dev },
2439 + { 0 }
2440 + };
2441 +
2442 +@@ -4763,6 +4850,8 @@ static const struct pci_dev_acs_enabled {
2443 + { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
2444 + { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
2445 + { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
2446 ++ /* Broadcom multi-function device */
2447 ++ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs },
2448 + { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
2449 + /* Amazon Annapurna Labs */
2450 + { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
2451 +diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
2452 +index cdbcc49f71152..731c483a04dea 100644
2453 +--- a/drivers/phy/mediatek/phy-mtk-tphy.c
2454 ++++ b/drivers/phy/mediatek/phy-mtk-tphy.c
2455 +@@ -949,6 +949,8 @@ static int mtk_phy_init(struct phy *phy)
2456 + break;
2457 + default:
2458 + dev_err(tphy->dev, "incompatible PHY type\n");
2459 ++ clk_disable_unprepare(instance->ref_clk);
2460 ++ clk_disable_unprepare(instance->da_ref_clk);
2461 + return -EINVAL;
2462 + }
2463 +
2464 +diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
2465 +index 1c25af28a7233..5c2f2e337b57b 100644
2466 +--- a/drivers/platform/x86/thinkpad_acpi.c
2467 ++++ b/drivers/platform/x86/thinkpad_acpi.c
2468 +@@ -8806,6 +8806,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
2469 + TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */
2470 + TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */
2471 + TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
2472 ++ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
2473 + };
2474 +
2475 + static int __init fan_init(struct ibm_init_struct *iibm)
2476 +diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
2477 +index 03a246e60fd98..21c4c34c52d8d 100644
2478 +--- a/drivers/ptp/ptp_clock.c
2479 ++++ b/drivers/ptp/ptp_clock.c
2480 +@@ -63,7 +63,7 @@ static void enqueue_external_timestamp(struct timestamp_event_queue *queue,
2481 + spin_unlock_irqrestore(&queue->lock, flags);
2482 + }
2483 +
2484 +-s32 scaled_ppm_to_ppb(long ppm)
2485 ++long scaled_ppm_to_ppb(long ppm)
2486 + {
2487 + /*
2488 + * The 'freq' field in the 'struct timex' is in parts per
2489 +@@ -80,7 +80,7 @@ s32 scaled_ppm_to_ppb(long ppm)
2490 + s64 ppb = 1 + ppm;
2491 + ppb *= 125;
2492 + ppb >>= 13;
2493 +- return (s32) ppb;
2494 ++ return (long) ppb;
2495 + }
2496 + EXPORT_SYMBOL(scaled_ppm_to_ppb);
2497 +
2498 +@@ -138,7 +138,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx)
2499 + delta = ktime_to_ns(kt);
2500 + err = ops->adjtime(ops, delta);
2501 + } else if (tx->modes & ADJ_FREQUENCY) {
2502 +- s32 ppb = scaled_ppm_to_ppb(tx->freq);
2503 ++ long ppb = scaled_ppm_to_ppb(tx->freq);
2504 + if (ppb > ops->max_adj || ppb < -ops->max_adj)
2505 + return -ERANGE;
2506 + if (ops->adjfine)
2507 +diff --git a/drivers/regulator/cros-ec-regulator.c b/drivers/regulator/cros-ec-regulator.c
2508 +index eb3fc1db4edc8..c4754f3cf2337 100644
2509 +--- a/drivers/regulator/cros-ec-regulator.c
2510 ++++ b/drivers/regulator/cros-ec-regulator.c
2511 +@@ -225,8 +225,9 @@ static int cros_ec_regulator_probe(struct platform_device *pdev)
2512 +
2513 + drvdata->dev = devm_regulator_register(dev, &drvdata->desc, &cfg);
2514 + if (IS_ERR(drvdata->dev)) {
2515 ++ ret = PTR_ERR(drvdata->dev);
2516 + dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
2517 +- return PTR_ERR(drvdata->dev);
2518 ++ return ret;
2519 + }
2520 +
2521 + platform_set_drvdata(pdev, drvdata);
2522 +diff --git a/drivers/regulator/rt4801-regulator.c b/drivers/regulator/rt4801-regulator.c
2523 +index 2055a9cb13ba5..7a87788d3f092 100644
2524 +--- a/drivers/regulator/rt4801-regulator.c
2525 ++++ b/drivers/regulator/rt4801-regulator.c
2526 +@@ -66,7 +66,7 @@ static int rt4801_enable(struct regulator_dev *rdev)
2527 + struct gpio_descs *gpios = priv->enable_gpios;
2528 + int id = rdev_get_id(rdev), ret;
2529 +
2530 +- if (gpios->ndescs <= id) {
2531 ++ if (!gpios || gpios->ndescs <= id) {
2532 + dev_warn(&rdev->dev, "no dedicated gpio can control\n");
2533 + goto bypass_gpio;
2534 + }
2535 +@@ -88,7 +88,7 @@ static int rt4801_disable(struct regulator_dev *rdev)
2536 + struct gpio_descs *gpios = priv->enable_gpios;
2537 + int id = rdev_get_id(rdev);
2538 +
2539 +- if (gpios->ndescs <= id) {
2540 ++ if (!gpios || gpios->ndescs <= id) {
2541 + dev_warn(&rdev->dev, "no dedicated gpio can control\n");
2542 + goto bypass_gpio;
2543 + }
2544 +diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c
2545 +index 5adc552dffd58..4bca64de0f672 100644
2546 +--- a/drivers/regulator/rtmv20-regulator.c
2547 ++++ b/drivers/regulator/rtmv20-regulator.c
2548 +@@ -27,6 +27,7 @@
2549 + #define RTMV20_REG_LDIRQ 0x30
2550 + #define RTMV20_REG_LDSTAT 0x40
2551 + #define RTMV20_REG_LDMASK 0x50
2552 ++#define RTMV20_MAX_REGS (RTMV20_REG_LDMASK + 1)
2553 +
2554 + #define RTMV20_VID_MASK GENMASK(7, 4)
2555 + #define RICHTEK_VID 0x80
2556 +@@ -313,6 +314,7 @@ static const struct regmap_config rtmv20_regmap_config = {
2557 + .val_bits = 8,
2558 + .cache_type = REGCACHE_RBTREE,
2559 + .max_register = RTMV20_REG_LDMASK,
2560 ++ .num_reg_defaults_raw = RTMV20_MAX_REGS,
2561 +
2562 + .writeable_reg = rtmv20_is_accessible_reg,
2563 + .readable_reg = rtmv20_is_accessible_reg,
2564 +diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
2565 +index ecefc25eff0c0..337353c9655ed 100644
2566 +--- a/drivers/s390/crypto/ap_queue.c
2567 ++++ b/drivers/s390/crypto/ap_queue.c
2568 +@@ -135,12 +135,13 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
2569 + {
2570 + struct ap_queue_status status;
2571 + struct ap_message *ap_msg;
2572 ++ bool found = false;
2573 +
2574 + status = ap_dqap(aq->qid, &aq->reply->psmid,
2575 + aq->reply->msg, aq->reply->len);
2576 + switch (status.response_code) {
2577 + case AP_RESPONSE_NORMAL:
2578 +- aq->queue_count--;
2579 ++ aq->queue_count = max_t(int, 0, aq->queue_count - 1);
2580 + if (aq->queue_count > 0)
2581 + mod_timer(&aq->timeout,
2582 + jiffies + aq->request_timeout);
2583 +@@ -150,8 +151,14 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
2584 + list_del_init(&ap_msg->list);
2585 + aq->pendingq_count--;
2586 + ap_msg->receive(aq, ap_msg, aq->reply);
2587 ++ found = true;
2588 + break;
2589 + }
2590 ++ if (!found) {
2591 ++ AP_DBF_WARN("%s unassociated reply psmid=0x%016llx on 0x%02x.%04x\n",
2592 ++ __func__, aq->reply->psmid,
2593 ++ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
2594 ++ }
2595 + fallthrough;
2596 + case AP_RESPONSE_NO_PENDING_REPLY:
2597 + if (!status.queue_empty || aq->queue_count <= 0)
2598 +@@ -232,7 +239,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
2599 + ap_msg->flags & AP_MSG_FLAG_SPECIAL);
2600 + switch (status.response_code) {
2601 + case AP_RESPONSE_NORMAL:
2602 +- aq->queue_count++;
2603 ++ aq->queue_count = max_t(int, 1, aq->queue_count + 1);
2604 + if (aq->queue_count == 1)
2605 + mod_timer(&aq->timeout, jiffies + aq->request_timeout);
2606 + list_move_tail(&ap_msg->list, &aq->pendingq);
2607 +diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
2608 +index 2786470a52011..4f24f63922126 100644
2609 +--- a/drivers/spi/spi-stm32-qspi.c
2610 ++++ b/drivers/spi/spi-stm32-qspi.c
2611 +@@ -293,7 +293,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
2612 + int err = 0;
2613 +
2614 + if (!op->data.nbytes)
2615 +- return stm32_qspi_wait_nobusy(qspi);
2616 ++ goto wait_nobusy;
2617 +
2618 + if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
2619 + goto out;
2620 +@@ -314,6 +314,9 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
2621 + out:
2622 + /* clear flags */
2623 + writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
2624 ++wait_nobusy:
2625 ++ if (!err)
2626 ++ err = stm32_qspi_wait_nobusy(qspi);
2627 +
2628 + return err;
2629 + }
2630 +diff --git a/drivers/spi/spi-zynq-qspi.c b/drivers/spi/spi-zynq-qspi.c
2631 +index 2765289028fae..68193db8b2e3c 100644
2632 +--- a/drivers/spi/spi-zynq-qspi.c
2633 ++++ b/drivers/spi/spi-zynq-qspi.c
2634 +@@ -678,14 +678,14 @@ static int zynq_qspi_probe(struct platform_device *pdev)
2635 + xqspi->irq = platform_get_irq(pdev, 0);
2636 + if (xqspi->irq <= 0) {
2637 + ret = -ENXIO;
2638 +- goto remove_master;
2639 ++ goto clk_dis_all;
2640 + }
2641 + ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
2642 + 0, pdev->name, xqspi);
2643 + if (ret != 0) {
2644 + ret = -ENXIO;
2645 + dev_err(&pdev->dev, "request_irq failed\n");
2646 +- goto remove_master;
2647 ++ goto clk_dis_all;
2648 + }
2649 +
2650 + ret = of_property_read_u32(np, "num-cs",
2651 +@@ -693,8 +693,9 @@ static int zynq_qspi_probe(struct platform_device *pdev)
2652 + if (ret < 0) {
2653 + ctlr->num_chipselect = 1;
2654 + } else if (num_cs > ZYNQ_QSPI_MAX_NUM_CS) {
2655 ++ ret = -EINVAL;
2656 + dev_err(&pdev->dev, "only 2 chip selects are available\n");
2657 +- goto remove_master;
2658 ++ goto clk_dis_all;
2659 + } else {
2660 + ctlr->num_chipselect = num_cs;
2661 + }
2662 +diff --git a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
2663 +index caaf9e34f1ee2..09b0b8a16e994 100644
2664 +--- a/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
2665 ++++ b/drivers/staging/mt7621-pinctrl/pinctrl-rt2880.c
2666 +@@ -127,7 +127,7 @@ static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev,
2667 + if (p->groups[group].enabled) {
2668 + dev_err(p->dev, "%s is already enabled\n",
2669 + p->groups[group].name);
2670 +- return -EBUSY;
2671 ++ return 0;
2672 + }
2673 +
2674 + p->groups[group].enabled = 1;
2675 +diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
2676 +index 6d8331e7da99e..425b29168b4d0 100644
2677 +--- a/drivers/usb/chipidea/usbmisc_imx.c
2678 ++++ b/drivers/usb/chipidea/usbmisc_imx.c
2679 +@@ -686,6 +686,16 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
2680 + int val;
2681 + unsigned long flags;
2682 +
2683 ++ /* Clear VDATSRCENB0 to disable VDP_SRC and IDM_SNK required by BC 1.2 spec */
2684 ++ spin_lock_irqsave(&usbmisc->lock, flags);
2685 ++ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
2686 ++ val &= ~MX7D_USB_OTG_PHY_CFG2_CHRG_VDATSRCENB0;
2687 ++ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
2688 ++ spin_unlock_irqrestore(&usbmisc->lock, flags);
2689 ++
2690 ++ /* TVDMSRC_DIS */
2691 ++ msleep(20);
2692 ++
2693 + /* VDM_SRC is connected to D- and IDP_SINK is connected to D+ */
2694 + spin_lock_irqsave(&usbmisc->lock, flags);
2695 + val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
2696 +@@ -695,7 +705,8 @@ static int imx7d_charger_secondary_detection(struct imx_usbmisc_data *data)
2697 + usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
2698 + spin_unlock_irqrestore(&usbmisc->lock, flags);
2699 +
2700 +- usleep_range(1000, 2000);
2701 ++ /* TVDMSRC_ON */
2702 ++ msleep(40);
2703 +
2704 + /*
2705 + * Per BC 1.2, check voltage of D+:
2706 +@@ -798,7 +809,8 @@ static int imx7d_charger_primary_detection(struct imx_usbmisc_data *data)
2707 + usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
2708 + spin_unlock_irqrestore(&usbmisc->lock, flags);
2709 +
2710 +- usleep_range(1000, 2000);
2711 ++ /* TVDPSRC_ON */
2712 ++ msleep(40);
2713 +
2714 + /* Check if D- is less than VDAT_REF to determine an SDP per BC 1.2 */
2715 + val = readl(usbmisc->base + MX7D_USB_OTG_PHY_STATUS);
2716 +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2717 +index 228e3d4e1a9fd..357730e8f52f2 100644
2718 +--- a/drivers/usb/core/hub.c
2719 ++++ b/drivers/usb/core/hub.c
2720 +@@ -40,6 +40,8 @@
2721 + #define USB_VENDOR_GENESYS_LOGIC 0x05e3
2722 + #define USB_VENDOR_SMSC 0x0424
2723 + #define USB_PRODUCT_USB5534B 0x5534
2724 ++#define USB_VENDOR_CYPRESS 0x04b4
2725 ++#define USB_PRODUCT_CY7C65632 0x6570
2726 + #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
2727 + #define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
2728 +
2729 +@@ -5643,6 +5645,11 @@ static const struct usb_device_id hub_id_table[] = {
2730 + .idProduct = USB_PRODUCT_USB5534B,
2731 + .bInterfaceClass = USB_CLASS_HUB,
2732 + .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
2733 ++ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
2734 ++ | USB_DEVICE_ID_MATCH_PRODUCT,
2735 ++ .idVendor = USB_VENDOR_CYPRESS,
2736 ++ .idProduct = USB_PRODUCT_CY7C65632,
2737 ++ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
2738 + { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
2739 + | USB_DEVICE_ID_MATCH_INT_CLASS,
2740 + .idVendor = USB_VENDOR_GENESYS_LOGIC,
2741 +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
2742 +index e07fd5ee8ed95..7537dd50ad533 100644
2743 +--- a/drivers/usb/dwc3/core.c
2744 ++++ b/drivers/usb/dwc3/core.c
2745 +@@ -1642,8 +1642,8 @@ static int dwc3_remove(struct platform_device *pdev)
2746 +
2747 + pm_runtime_get_sync(&pdev->dev);
2748 +
2749 +- dwc3_debugfs_exit(dwc);
2750 + dwc3_core_exit_mode(dwc);
2751 ++ dwc3_debugfs_exit(dwc);
2752 +
2753 + dwc3_core_exit(dwc);
2754 + dwc3_ulpi_exit(dwc);
2755 +diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
2756 +index 8ab3949423604..74d9c2c38193d 100644
2757 +--- a/drivers/usb/dwc3/debug.h
2758 ++++ b/drivers/usb/dwc3/debug.h
2759 +@@ -413,9 +413,12 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status)
2760 +
2761 +
2762 + #ifdef CONFIG_DEBUG_FS
2763 ++extern void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep);
2764 + extern void dwc3_debugfs_init(struct dwc3 *d);
2765 + extern void dwc3_debugfs_exit(struct dwc3 *d);
2766 + #else
2767 ++static inline void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
2768 ++{ }
2769 + static inline void dwc3_debugfs_init(struct dwc3 *d)
2770 + { }
2771 + static inline void dwc3_debugfs_exit(struct dwc3 *d)
2772 +diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
2773 +index 5da4f6082d930..3ebe3e6c284d2 100644
2774 +--- a/drivers/usb/dwc3/debugfs.c
2775 ++++ b/drivers/usb/dwc3/debugfs.c
2776 +@@ -890,30 +890,14 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep,
2777 + }
2778 + }
2779 +
2780 +-static void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep,
2781 +- struct dentry *parent)
2782 ++void dwc3_debugfs_create_endpoint_dir(struct dwc3_ep *dep)
2783 + {
2784 + struct dentry *dir;
2785 +
2786 +- dir = debugfs_create_dir(dep->name, parent);
2787 ++ dir = debugfs_create_dir(dep->name, dep->dwc->root);
2788 + dwc3_debugfs_create_endpoint_files(dep, dir);
2789 + }
2790 +
2791 +-static void dwc3_debugfs_create_endpoint_dirs(struct dwc3 *dwc,
2792 +- struct dentry *parent)
2793 +-{
2794 +- int i;
2795 +-
2796 +- for (i = 0; i < dwc->num_eps; i++) {
2797 +- struct dwc3_ep *dep = dwc->eps[i];
2798 +-
2799 +- if (!dep)
2800 +- continue;
2801 +-
2802 +- dwc3_debugfs_create_endpoint_dir(dep, parent);
2803 +- }
2804 +-}
2805 +-
2806 + void dwc3_debugfs_init(struct dwc3 *dwc)
2807 + {
2808 + struct dentry *root;
2809 +@@ -944,7 +928,6 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
2810 + &dwc3_testmode_fops);
2811 + debugfs_create_file("link_state", 0644, root, dwc,
2812 + &dwc3_link_state_fops);
2813 +- dwc3_debugfs_create_endpoint_dirs(dwc, root);
2814 + }
2815 + }
2816 +
2817 +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2818 +index 8bccdd7b0ca2e..14a7c05abfe8f 100644
2819 +--- a/drivers/usb/dwc3/gadget.c
2820 ++++ b/drivers/usb/dwc3/gadget.c
2821 +@@ -2664,6 +2664,8 @@ static int dwc3_gadget_init_endpoint(struct dwc3 *dwc, u8 epnum)
2822 + INIT_LIST_HEAD(&dep->started_list);
2823 + INIT_LIST_HEAD(&dep->cancelled_list);
2824 +
2825 ++ dwc3_debugfs_create_endpoint_dir(dep);
2826 ++
2827 + return 0;
2828 + }
2829 +
2830 +@@ -2707,6 +2709,7 @@ static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
2831 + list_del(&dep->endpoint.ep_list);
2832 + }
2833 +
2834 ++ debugfs_remove_recursive(debugfs_lookup(dep->name, dwc->root));
2835 + kfree(dep);
2836 + }
2837 + }
2838 +diff --git a/fs/afs/main.c b/fs/afs/main.c
2839 +index b2975256dadbd..179004b15566d 100644
2840 +--- a/fs/afs/main.c
2841 ++++ b/fs/afs/main.c
2842 +@@ -203,8 +203,8 @@ static int __init afs_init(void)
2843 + goto error_fs;
2844 +
2845 + afs_proc_symlink = proc_symlink("fs/afs", NULL, "../self/net/afs");
2846 +- if (IS_ERR(afs_proc_symlink)) {
2847 +- ret = PTR_ERR(afs_proc_symlink);
2848 ++ if (!afs_proc_symlink) {
2849 ++ ret = -ENOMEM;
2850 + goto error_proc;
2851 + }
2852 +
2853 +diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
2854 +index dcab112e1f001..086b6bacbad17 100644
2855 +--- a/fs/notify/fanotify/fanotify_user.c
2856 ++++ b/fs/notify/fanotify/fanotify_user.c
2857 +@@ -378,7 +378,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
2858 + info_type, fanotify_info_name(info),
2859 + info->name_len, buf, count);
2860 + if (ret < 0)
2861 +- return ret;
2862 ++ goto out_close_fd;
2863 +
2864 + buf += ret;
2865 + count -= ret;
2866 +@@ -426,7 +426,7 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
2867 + fanotify_event_object_fh(event),
2868 + info_type, dot, dot_len, buf, count);
2869 + if (ret < 0)
2870 +- return ret;
2871 ++ goto out_close_fd;
2872 +
2873 + buf += ret;
2874 + count -= ret;
2875 +diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
2876 +index a57af878fd0cd..4a5966475a35a 100644
2877 +--- a/include/linux/mfd/rohm-bd70528.h
2878 ++++ b/include/linux/mfd/rohm-bd70528.h
2879 +@@ -26,9 +26,7 @@ struct bd70528_data {
2880 + struct mutex rtc_timer_lock;
2881 + };
2882 +
2883 +-#define BD70528_BUCK_VOLTS 17
2884 +-#define BD70528_BUCK_VOLTS 17
2885 +-#define BD70528_BUCK_VOLTS 17
2886 ++#define BD70528_BUCK_VOLTS 0x10
2887 + #define BD70528_LDO_VOLTS 0x20
2888 +
2889 + #define BD70528_REG_BUCK1_EN 0x0F
2890 +diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
2891 +index 028f442530cf5..60ffeb6b67ae7 100644
2892 +--- a/include/linux/mlx5/transobj.h
2893 ++++ b/include/linux/mlx5/transobj.h
2894 +@@ -85,4 +85,5 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
2895 + struct mlx5_hairpin_params *params);
2896 +
2897 + void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair);
2898 ++void mlx5_core_hairpin_clear_dead_peer(struct mlx5_hairpin *hp);
2899 + #endif /* __TRANSOBJ_H__ */
2900 +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
2901 +index a4fff7d7abe58..4eb38918da8f8 100644
2902 +--- a/include/linux/mm_types.h
2903 ++++ b/include/linux/mm_types.h
2904 +@@ -448,13 +448,6 @@ struct mm_struct {
2905 + */
2906 + atomic_t has_pinned;
2907 +
2908 +- /**
2909 +- * @write_protect_seq: Locked when any thread is write
2910 +- * protecting pages mapped by this mm to enforce a later COW,
2911 +- * for instance during page table copying for fork().
2912 +- */
2913 +- seqcount_t write_protect_seq;
2914 +-
2915 + #ifdef CONFIG_MMU
2916 + atomic_long_t pgtables_bytes; /* PTE page table pages */
2917 + #endif
2918 +@@ -463,6 +456,18 @@ struct mm_struct {
2919 + spinlock_t page_table_lock; /* Protects page tables and some
2920 + * counters
2921 + */
2922 ++ /*
2923 ++ * With some kernel config, the current mmap_lock's offset
2924 ++ * inside 'mm_struct' is at 0x120, which is very optimal, as
2925 ++ * its two hot fields 'count' and 'owner' sit in 2 different
2926 ++ * cachelines, and when mmap_lock is highly contended, both
2927 ++ * of the 2 fields will be accessed frequently, current layout
2928 ++ * will help to reduce cache bouncing.
2929 ++ *
2930 ++ * So please be careful with adding new fields before
2931 ++ * mmap_lock, which can easily push the 2 fields into one
2932 ++ * cacheline.
2933 ++ */
2934 + struct rw_semaphore mmap_lock;
2935 +
2936 + struct list_head mmlist; /* List of maybe swapped mm's. These
2937 +@@ -483,7 +488,15 @@ struct mm_struct {
2938 + unsigned long stack_vm; /* VM_STACK */
2939 + unsigned long def_flags;
2940 +
2941 ++ /**
2942 ++ * @write_protect_seq: Locked when any thread is write
2943 ++ * protecting pages mapped by this mm to enforce a later COW,
2944 ++ * for instance during page table copying for fork().
2945 ++ */
2946 ++ seqcount_t write_protect_seq;
2947 ++
2948 + spinlock_t arg_lock; /* protect the below fields */
2949 ++
2950 + unsigned long start_code, end_code, start_data, end_data;
2951 + unsigned long start_brk, brk, start_stack;
2952 + unsigned long arg_start, arg_end, env_start, env_end;
2953 +diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
2954 +index d3e8ba5c71258..6d6b42143effc 100644
2955 +--- a/include/linux/ptp_clock_kernel.h
2956 ++++ b/include/linux/ptp_clock_kernel.h
2957 +@@ -222,7 +222,7 @@ extern int ptp_clock_index(struct ptp_clock *ptp);
2958 + * @ppm: Parts per million, but with a 16 bit binary fractional field
2959 + */
2960 +
2961 +-extern s32 scaled_ppm_to_ppb(long ppm);
2962 ++extern long scaled_ppm_to_ppb(long ppm);
2963 +
2964 + /**
2965 + * ptp_find_pin() - obtain the pin index of a given auxiliary function
2966 +diff --git a/include/linux/socket.h b/include/linux/socket.h
2967 +index e9cb30d8cbfb1..9aa530d497da8 100644
2968 +--- a/include/linux/socket.h
2969 ++++ b/include/linux/socket.h
2970 +@@ -437,6 +437,4 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
2971 + extern int __sys_socketpair(int family, int type, int protocol,
2972 + int __user *usockvec);
2973 + extern int __sys_shutdown(int fd, int how);
2974 +-
2975 +-extern struct ns_common *get_net_ns(struct ns_common *ns);
2976 + #endif /* _LINUX_SOCKET_H */
2977 +diff --git a/include/linux/swapops.h b/include/linux/swapops.h
2978 +index d9b7c9132c2f6..6430a94c69818 100644
2979 +--- a/include/linux/swapops.h
2980 ++++ b/include/linux/swapops.h
2981 +@@ -23,6 +23,16 @@
2982 + #define SWP_TYPE_SHIFT (BITS_PER_XA_VALUE - MAX_SWAPFILES_SHIFT)
2983 + #define SWP_OFFSET_MASK ((1UL << SWP_TYPE_SHIFT) - 1)
2984 +
2985 ++/* Clear all flags but only keep swp_entry_t related information */
2986 ++static inline pte_t pte_swp_clear_flags(pte_t pte)
2987 ++{
2988 ++ if (pte_swp_soft_dirty(pte))
2989 ++ pte = pte_swp_clear_soft_dirty(pte);
2990 ++ if (pte_swp_uffd_wp(pte))
2991 ++ pte = pte_swp_clear_uffd_wp(pte);
2992 ++ return pte;
2993 ++}
2994 ++
2995 + /*
2996 + * Store a type+offset into a swp_entry_t in an arch-independent format
2997 + */
2998 +@@ -66,10 +76,7 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
2999 + {
3000 + swp_entry_t arch_entry;
3001 +
3002 +- if (pte_swp_soft_dirty(pte))
3003 +- pte = pte_swp_clear_soft_dirty(pte);
3004 +- if (pte_swp_uffd_wp(pte))
3005 +- pte = pte_swp_clear_uffd_wp(pte);
3006 ++ pte = pte_swp_clear_flags(pte);
3007 + arch_entry = __pte_to_swp_entry(pte);
3008 + return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
3009 + }
3010 +diff --git a/include/net/mac80211.h b/include/net/mac80211.h
3011 +index dcdba96814a2b..6ff49c13717bb 100644
3012 +--- a/include/net/mac80211.h
3013 ++++ b/include/net/mac80211.h
3014 +@@ -6335,7 +6335,12 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw,
3015 +
3016 + /**
3017 + * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header
3018 +- * of injected frames
3019 ++ * of injected frames.
3020 ++ *
3021 ++ * To accurately parse and take into account rate and retransmission fields,
3022 ++ * you must initialize the chandef field in the ieee80211_tx_info structure
3023 ++ * of the skb before calling this function.
3024 ++ *
3025 + * @skb: packet injected by userspace
3026 + * @dev: the &struct device of this 802.11 device
3027 + */
3028 +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
3029 +index 22bc07f4b043d..eb0e7731f3b1c 100644
3030 +--- a/include/net/net_namespace.h
3031 ++++ b/include/net/net_namespace.h
3032 +@@ -203,6 +203,8 @@ struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
3033 + void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
3034 +
3035 + void net_ns_barrier(void);
3036 ++
3037 ++struct ns_common *get_net_ns(struct ns_common *ns);
3038 + #else /* CONFIG_NET_NS */
3039 + #include <linux/sched.h>
3040 + #include <linux/nsproxy.h>
3041 +@@ -222,6 +224,11 @@ static inline void net_ns_get_ownership(const struct net *net,
3042 + }
3043 +
3044 + static inline void net_ns_barrier(void) {}
3045 ++
3046 ++static inline struct ns_common *get_net_ns(struct ns_common *ns)
3047 ++{
3048 ++ return ERR_PTR(-EINVAL);
3049 ++}
3050 + #endif /* CONFIG_NET_NS */
3051 +
3052 +
3053 +diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
3054 +index 7d6687618d808..d1b327036ae43 100644
3055 +--- a/include/uapi/linux/in.h
3056 ++++ b/include/uapi/linux/in.h
3057 +@@ -289,6 +289,9 @@ struct sockaddr_in {
3058 + /* Address indicating an error return. */
3059 + #define INADDR_NONE ((unsigned long int) 0xffffffff)
3060 +
3061 ++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
3062 ++#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
3063 ++
3064 + /* Network number for local host loopback. */
3065 + #define IN_LOOPBACKNET 127
3066 +
3067 +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3068 +index 4f50d6f128be3..e97724e36dfb5 100644
3069 +--- a/kernel/bpf/verifier.c
3070 ++++ b/kernel/bpf/verifier.c
3071 +@@ -5740,6 +5740,27 @@ struct bpf_sanitize_info {
3072 + bool mask_to_left;
3073 + };
3074 +
3075 ++static struct bpf_verifier_state *
3076 ++sanitize_speculative_path(struct bpf_verifier_env *env,
3077 ++ const struct bpf_insn *insn,
3078 ++ u32 next_idx, u32 curr_idx)
3079 ++{
3080 ++ struct bpf_verifier_state *branch;
3081 ++ struct bpf_reg_state *regs;
3082 ++
3083 ++ branch = push_stack(env, next_idx, curr_idx, true);
3084 ++ if (branch && insn) {
3085 ++ regs = branch->frame[branch->curframe]->regs;
3086 ++ if (BPF_SRC(insn->code) == BPF_K) {
3087 ++ mark_reg_unknown(env, regs, insn->dst_reg);
3088 ++ } else if (BPF_SRC(insn->code) == BPF_X) {
3089 ++ mark_reg_unknown(env, regs, insn->dst_reg);
3090 ++ mark_reg_unknown(env, regs, insn->src_reg);
3091 ++ }
3092 ++ }
3093 ++ return branch;
3094 ++}
3095 ++
3096 + static int sanitize_ptr_alu(struct bpf_verifier_env *env,
3097 + struct bpf_insn *insn,
3098 + const struct bpf_reg_state *ptr_reg,
3099 +@@ -5823,12 +5844,26 @@ do_sim:
3100 + tmp = *dst_reg;
3101 + *dst_reg = *ptr_reg;
3102 + }
3103 +- ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3104 ++ ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1,
3105 ++ env->insn_idx);
3106 + if (!ptr_is_dst_reg && ret)
3107 + *dst_reg = tmp;
3108 + return !ret ? REASON_STACK : 0;
3109 + }
3110 +
3111 ++static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
3112 ++{
3113 ++ struct bpf_verifier_state *vstate = env->cur_state;
3114 ++
3115 ++ /* If we simulate paths under speculation, we don't update the
3116 ++ * insn as 'seen' such that when we verify unreachable paths in
3117 ++ * the non-speculative domain, sanitize_dead_code() can still
3118 ++ * rewrite/sanitize them.
3119 ++ */
3120 ++ if (!vstate->speculative)
3121 ++ env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
3122 ++}
3123 ++
3124 + static int sanitize_err(struct bpf_verifier_env *env,
3125 + const struct bpf_insn *insn, int reason,
3126 + const struct bpf_reg_state *off_reg,
3127 +@@ -7974,14 +8009,28 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
3128 + if (err)
3129 + return err;
3130 + }
3131 ++
3132 + if (pred == 1) {
3133 +- /* only follow the goto, ignore fall-through */
3134 ++ /* Only follow the goto, ignore fall-through. If needed, push
3135 ++ * the fall-through branch for simulation under speculative
3136 ++ * execution.
3137 ++ */
3138 ++ if (!env->bypass_spec_v1 &&
3139 ++ !sanitize_speculative_path(env, insn, *insn_idx + 1,
3140 ++ *insn_idx))
3141 ++ return -EFAULT;
3142 + *insn_idx += insn->off;
3143 + return 0;
3144 + } else if (pred == 0) {
3145 +- /* only follow fall-through branch, since
3146 +- * that's where the program will go
3147 ++ /* Only follow the fall-through branch, since that's where the
3148 ++ * program will go. If needed, push the goto branch for
3149 ++ * simulation under speculative execution.
3150 + */
3151 ++ if (!env->bypass_spec_v1 &&
3152 ++ !sanitize_speculative_path(env, insn,
3153 ++ *insn_idx + insn->off + 1,
3154 ++ *insn_idx))
3155 ++ return -EFAULT;
3156 + return 0;
3157 + }
3158 +
3159 +@@ -9811,7 +9860,7 @@ static int do_check(struct bpf_verifier_env *env)
3160 + }
3161 +
3162 + regs = cur_regs(env);
3163 +- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
3164 ++ sanitize_mark_insn_seen(env);
3165 + prev_insn_idx = env->insn_idx;
3166 +
3167 + if (class == BPF_ALU || class == BPF_ALU64) {
3168 +@@ -10031,7 +10080,7 @@ process_bpf_exit:
3169 + return err;
3170 +
3171 + env->insn_idx++;
3172 +- env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
3173 ++ sanitize_mark_insn_seen(env);
3174 + } else {
3175 + verbose(env, "invalid BPF_LD mode\n");
3176 + return -EINVAL;
3177 +@@ -10439,6 +10488,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3178 + {
3179 + struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
3180 + struct bpf_insn *insn = new_prog->insnsi;
3181 ++ u32 old_seen = old_data[off].seen;
3182 + u32 prog_len;
3183 + int i;
3184 +
3185 +@@ -10459,7 +10509,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
3186 + memcpy(new_data + off + cnt - 1, old_data + off,
3187 + sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
3188 + for (i = off; i < off + cnt - 1; i++) {
3189 +- new_data[i].seen = env->pass_cnt;
3190 ++ /* Expand insni[off]'s seen count to the patched range. */
3191 ++ new_data[i].seen = old_seen;
3192 + new_data[i].zext_dst = insn_has_def32(env, insn + i);
3193 + }
3194 + env->insn_aux_data = new_data;
3195 +@@ -11703,6 +11754,9 @@ static void free_states(struct bpf_verifier_env *env)
3196 + * insn_aux_data was touched. These variables are compared to clear temporary
3197 + * data from failed pass. For testing and experiments do_check_common() can be
3198 + * run multiple times even when prior attempt to verify is unsuccessful.
3199 ++ *
3200 ++ * Note that special handling is needed on !env->bypass_spec_v1 if this is
3201 ++ * ever called outside of error path with subsequent program rejection.
3202 + */
3203 + static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
3204 + {
3205 +diff --git a/kernel/crash_core.c b/kernel/crash_core.c
3206 +index 106e4500fd53d..4a5fed2f497b8 100644
3207 +--- a/kernel/crash_core.c
3208 ++++ b/kernel/crash_core.c
3209 +@@ -463,6 +463,7 @@ static int __init crash_save_vmcoreinfo_init(void)
3210 + VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
3211 + VMCOREINFO_STRUCT_SIZE(mem_section);
3212 + VMCOREINFO_OFFSET(mem_section, section_mem_map);
3213 ++ VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
3214 + VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
3215 + #endif
3216 + VMCOREINFO_STRUCT_SIZE(page);
3217 +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3218 +index ff8a172a69ca9..d6e1c90de570a 100644
3219 +--- a/kernel/sched/fair.c
3220 ++++ b/kernel/sched/fair.c
3221 +@@ -3767,11 +3767,17 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
3222 + */
3223 + static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
3224 + {
3225 ++ /*
3226 ++ * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
3227 ++ * See ___update_load_avg() for details.
3228 ++ */
3229 ++ u32 divider = get_pelt_divider(&cfs_rq->avg);
3230 ++
3231 + dequeue_load_avg(cfs_rq, se);
3232 + sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
3233 +- sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
3234 ++ cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
3235 + sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
3236 +- sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
3237 ++ cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
3238 +
3239 + add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
3240 +
3241 +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
3242 +index b2c141eaca020..b09c598065019 100644
3243 +--- a/kernel/trace/trace.c
3244 ++++ b/kernel/trace/trace.c
3245 +@@ -2195,9 +2195,6 @@ struct saved_cmdlines_buffer {
3246 + };
3247 + static struct saved_cmdlines_buffer *savedcmd;
3248 +
3249 +-/* temporary disable recording */
3250 +-static atomic_t trace_record_taskinfo_disabled __read_mostly;
3251 +-
3252 + static inline char *get_saved_cmdlines(int idx)
3253 + {
3254 + return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
3255 +@@ -2483,8 +2480,6 @@ static bool tracing_record_taskinfo_skip(int flags)
3256 + {
3257 + if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
3258 + return true;
3259 +- if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
3260 +- return true;
3261 + if (!__this_cpu_read(trace_taskinfo_save))
3262 + return true;
3263 + return false;
3264 +@@ -3685,9 +3680,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3265 + return ERR_PTR(-EBUSY);
3266 + #endif
3267 +
3268 +- if (!iter->snapshot)
3269 +- atomic_inc(&trace_record_taskinfo_disabled);
3270 +-
3271 + if (*pos != iter->pos) {
3272 + iter->ent = NULL;
3273 + iter->cpu = 0;
3274 +@@ -3730,9 +3722,6 @@ static void s_stop(struct seq_file *m, void *p)
3275 + return;
3276 + #endif
3277 +
3278 +- if (!iter->snapshot)
3279 +- atomic_dec(&trace_record_taskinfo_disabled);
3280 +-
3281 + trace_access_unlock(iter->cpu_file);
3282 + trace_event_read_unlock();
3283 + }
3284 +diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
3285 +index c1637f90c8a38..4702efb00ff21 100644
3286 +--- a/kernel/trace/trace_clock.c
3287 ++++ b/kernel/trace/trace_clock.c
3288 +@@ -115,9 +115,9 @@ u64 notrace trace_clock_global(void)
3289 + prev_time = READ_ONCE(trace_clock_struct.prev_time);
3290 + now = sched_clock_cpu(this_cpu);
3291 +
3292 +- /* Make sure that now is always greater than prev_time */
3293 ++ /* Make sure that now is always greater than or equal to prev_time */
3294 + if ((s64)(now - prev_time) < 0)
3295 +- now = prev_time + 1;
3296 ++ now = prev_time;
3297 +
3298 + /*
3299 + * If in an NMI context then dont risk lockups and simply return
3300 +@@ -131,7 +131,7 @@ u64 notrace trace_clock_global(void)
3301 + /* Reread prev_time in case it was already updated */
3302 + prev_time = READ_ONCE(trace_clock_struct.prev_time);
3303 + if ((s64)(now - prev_time) < 0)
3304 +- now = prev_time + 1;
3305 ++ now = prev_time;
3306 +
3307 + trace_clock_struct.prev_time = now;
3308 +
3309 +diff --git a/mm/memory-failure.c b/mm/memory-failure.c
3310 +index 2d7a667f8e609..25fb82320e3d5 100644
3311 +--- a/mm/memory-failure.c
3312 ++++ b/mm/memory-failure.c
3313 +@@ -1445,7 +1445,12 @@ int memory_failure(unsigned long pfn, int flags)
3314 + return 0;
3315 + }
3316 +
3317 +- if (!PageTransTail(p) && !PageLRU(p))
3318 ++ /*
3319 ++ * __munlock_pagevec may clear a writeback page's LRU flag without
3320 ++ * page_lock. We need wait writeback completion for this page or it
3321 ++ * may trigger vfs BUG while evict inode.
3322 ++ */
3323 ++ if (!PageTransTail(p) && !PageLRU(p) && !PageWriteback(p))
3324 + goto identify_page_state;
3325 +
3326 + /*
3327 +diff --git a/mm/slab_common.c b/mm/slab_common.c
3328 +index 8f27ccf9f7f35..ec832904f4084 100644
3329 +--- a/mm/slab_common.c
3330 ++++ b/mm/slab_common.c
3331 +@@ -87,8 +87,7 @@ EXPORT_SYMBOL(kmem_cache_size);
3332 + #ifdef CONFIG_DEBUG_VM
3333 + static int kmem_cache_sanity_check(const char *name, unsigned int size)
3334 + {
3335 +- if (!name || in_interrupt() || size < sizeof(void *) ||
3336 +- size > KMALLOC_MAX_SIZE) {
3337 ++ if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
3338 + pr_err("kmem_cache_create(%s) integrity check failed\n", name);
3339 + return -EINVAL;
3340 + }
3341 +diff --git a/mm/slub.c b/mm/slub.c
3342 +index 05a501b67cd59..f5fc44208bdc3 100644
3343 +--- a/mm/slub.c
3344 ++++ b/mm/slub.c
3345 +@@ -15,6 +15,7 @@
3346 + #include <linux/module.h>
3347 + #include <linux/bit_spinlock.h>
3348 + #include <linux/interrupt.h>
3349 ++#include <linux/swab.h>
3350 + #include <linux/bitops.h>
3351 + #include <linux/slab.h>
3352 + #include "slab.h"
3353 +@@ -698,15 +699,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
3354 + p, p - addr, get_freepointer(s, p));
3355 +
3356 + if (s->flags & SLAB_RED_ZONE)
3357 +- print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
3358 ++ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
3359 + s->red_left_pad);
3360 + else if (p > addr + 16)
3361 + print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
3362 +
3363 +- print_section(KERN_ERR, "Object ", p,
3364 ++ print_section(KERN_ERR, "Object ", p,
3365 + min_t(unsigned int, s->object_size, PAGE_SIZE));
3366 + if (s->flags & SLAB_RED_ZONE)
3367 +- print_section(KERN_ERR, "Redzone ", p + s->object_size,
3368 ++ print_section(KERN_ERR, "Redzone ", p + s->object_size,
3369 + s->inuse - s->object_size);
3370 +
3371 + off = get_info_end(s);
3372 +@@ -718,7 +719,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
3373 +
3374 + if (off != size_from_object(s))
3375 + /* Beginning of the filler is the free pointer */
3376 +- print_section(KERN_ERR, "Padding ", p + off,
3377 ++ print_section(KERN_ERR, "Padding ", p + off,
3378 + size_from_object(s) - off);
3379 +
3380 + dump_stack();
3381 +@@ -895,11 +896,11 @@ static int check_object(struct kmem_cache *s, struct page *page,
3382 + u8 *endobject = object + s->object_size;
3383 +
3384 + if (s->flags & SLAB_RED_ZONE) {
3385 +- if (!check_bytes_and_report(s, page, object, "Redzone",
3386 ++ if (!check_bytes_and_report(s, page, object, "Left Redzone",
3387 + object - s->red_left_pad, val, s->red_left_pad))
3388 + return 0;
3389 +
3390 +- if (!check_bytes_and_report(s, page, object, "Redzone",
3391 ++ if (!check_bytes_and_report(s, page, object, "Right Redzone",
3392 + endobject, val, s->inuse - s->object_size))
3393 + return 0;
3394 + } else {
3395 +@@ -914,7 +915,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
3396 + if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
3397 + (!check_bytes_and_report(s, page, p, "Poison", p,
3398 + POISON_FREE, s->object_size - 1) ||
3399 +- !check_bytes_and_report(s, page, p, "Poison",
3400 ++ !check_bytes_and_report(s, page, p, "End Poison",
3401 + p + s->object_size - 1, POISON_END, 1)))
3402 + return 0;
3403 + /*
3404 +@@ -3639,7 +3640,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3405 + {
3406 + slab_flags_t flags = s->flags;
3407 + unsigned int size = s->object_size;
3408 +- unsigned int freepointer_area;
3409 + unsigned int order;
3410 +
3411 + /*
3412 +@@ -3648,13 +3648,6 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3413 + * the possible location of the free pointer.
3414 + */
3415 + size = ALIGN(size, sizeof(void *));
3416 +- /*
3417 +- * This is the area of the object where a freepointer can be
3418 +- * safely written. If redzoning adds more to the inuse size, we
3419 +- * can't use that portion for writing the freepointer, so
3420 +- * s->offset must be limited within this for the general case.
3421 +- */
3422 +- freepointer_area = size;
3423 +
3424 + #ifdef CONFIG_SLUB_DEBUG
3425 + /*
3426 +@@ -3680,19 +3673,21 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3427 +
3428 + /*
3429 + * With that we have determined the number of bytes in actual use
3430 +- * by the object. This is the potential offset to the free pointer.
3431 ++ * by the object and redzoning.
3432 + */
3433 + s->inuse = size;
3434 +
3435 +- if (((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3436 +- s->ctor)) {
3437 ++ if ((flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) ||
3438 ++ ((flags & SLAB_RED_ZONE) && s->object_size < sizeof(void *)) ||
3439 ++ s->ctor) {
3440 + /*
3441 + * Relocate free pointer after the object if it is not
3442 + * permitted to overwrite the first word of the object on
3443 + * kmem_cache_free.
3444 + *
3445 + * This is the case if we do RCU, have a constructor or
3446 +- * destructor or are poisoning the objects.
3447 ++ * destructor, are poisoning the objects, or are
3448 ++ * redzoning an object smaller than sizeof(void *).
3449 + *
3450 + * The assumption that s->offset >= s->inuse means free
3451 + * pointer is outside of the object is used in the
3452 +@@ -3701,13 +3696,13 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3453 + */
3454 + s->offset = size;
3455 + size += sizeof(void *);
3456 +- } else if (freepointer_area > sizeof(void *)) {
3457 ++ } else {
3458 + /*
3459 + * Store freelist pointer near middle of object to keep
3460 + * it away from the edges of the object to avoid small
3461 + * sized over/underflows from neighboring allocations.
3462 + */
3463 +- s->offset = ALIGN(freepointer_area / 2, sizeof(void *));
3464 ++ s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
3465 + }
3466 +
3467 + #ifdef CONFIG_SLUB_DEBUG
3468 +diff --git a/mm/swapfile.c b/mm/swapfile.c
3469 +index 5256c10049b0f..5af6b0f770de6 100644
3470 +--- a/mm/swapfile.c
3471 ++++ b/mm/swapfile.c
3472 +@@ -1903,7 +1903,7 @@ unsigned int count_swap_pages(int type, int free)
3473 +
3474 + static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
3475 + {
3476 +- return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte);
3477 ++ return pte_same(pte_swp_clear_flags(pte), swp_pte);
3478 + }
3479 +
3480 + /*
3481 +diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
3482 +index 206d0b424712e..c0aa54d21c649 100644
3483 +--- a/net/batman-adv/bat_iv_ogm.c
3484 ++++ b/net/batman-adv/bat_iv_ogm.c
3485 +@@ -410,8 +410,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
3486 + if (WARN_ON(!forw_packet->if_outgoing))
3487 + return;
3488 +
3489 +- if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
3490 ++ if (forw_packet->if_outgoing->soft_iface != soft_iface) {
3491 ++ pr_warn("%s: soft interface switch for queued OGM\n", __func__);
3492 + return;
3493 ++ }
3494 +
3495 + if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
3496 + return;
3497 +diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
3498 +index 8424464186a6b..5e5726048a1af 100644
3499 +--- a/net/bridge/br_private.h
3500 ++++ b/net/bridge/br_private.h
3501 +@@ -98,8 +98,8 @@ struct br_vlan_stats {
3502 + };
3503 +
3504 + struct br_tunnel_info {
3505 +- __be64 tunnel_id;
3506 +- struct metadata_dst *tunnel_dst;
3507 ++ __be64 tunnel_id;
3508 ++ struct metadata_dst __rcu *tunnel_dst;
3509 + };
3510 +
3511 + /* private vlan flags */
3512 +diff --git a/net/bridge/br_vlan_tunnel.c b/net/bridge/br_vlan_tunnel.c
3513 +index 169e005fbda29..debe167202782 100644
3514 +--- a/net/bridge/br_vlan_tunnel.c
3515 ++++ b/net/bridge/br_vlan_tunnel.c
3516 +@@ -41,26 +41,33 @@ static struct net_bridge_vlan *br_vlan_tunnel_lookup(struct rhashtable *tbl,
3517 + br_vlan_tunnel_rht_params);
3518 + }
3519 +
3520 ++static void vlan_tunnel_info_release(struct net_bridge_vlan *vlan)
3521 ++{
3522 ++ struct metadata_dst *tdst = rtnl_dereference(vlan->tinfo.tunnel_dst);
3523 ++
3524 ++ WRITE_ONCE(vlan->tinfo.tunnel_id, 0);
3525 ++ RCU_INIT_POINTER(vlan->tinfo.tunnel_dst, NULL);
3526 ++ dst_release(&tdst->dst);
3527 ++}
3528 ++
3529 + void vlan_tunnel_info_del(struct net_bridge_vlan_group *vg,
3530 + struct net_bridge_vlan *vlan)
3531 + {
3532 +- if (!vlan->tinfo.tunnel_dst)
3533 ++ if (!rcu_access_pointer(vlan->tinfo.tunnel_dst))
3534 + return;
3535 + rhashtable_remove_fast(&vg->tunnel_hash, &vlan->tnode,
3536 + br_vlan_tunnel_rht_params);
3537 +- vlan->tinfo.tunnel_id = 0;
3538 +- dst_release(&vlan->tinfo.tunnel_dst->dst);
3539 +- vlan->tinfo.tunnel_dst = NULL;
3540 ++ vlan_tunnel_info_release(vlan);
3541 + }
3542 +
3543 + static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
3544 + struct net_bridge_vlan *vlan, u32 tun_id)
3545 + {
3546 +- struct metadata_dst *metadata = NULL;
3547 ++ struct metadata_dst *metadata = rtnl_dereference(vlan->tinfo.tunnel_dst);
3548 + __be64 key = key32_to_tunnel_id(cpu_to_be32(tun_id));
3549 + int err;
3550 +
3551 +- if (vlan->tinfo.tunnel_dst)
3552 ++ if (metadata)
3553 + return -EEXIST;
3554 +
3555 + metadata = __ip_tun_set_dst(0, 0, 0, 0, 0, TUNNEL_KEY,
3556 +@@ -69,8 +76,8 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
3557 + return -EINVAL;
3558 +
3559 + metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_BRIDGE;
3560 +- vlan->tinfo.tunnel_dst = metadata;
3561 +- vlan->tinfo.tunnel_id = key;
3562 ++ rcu_assign_pointer(vlan->tinfo.tunnel_dst, metadata);
3563 ++ WRITE_ONCE(vlan->tinfo.tunnel_id, key);
3564 +
3565 + err = rhashtable_lookup_insert_fast(&vg->tunnel_hash, &vlan->tnode,
3566 + br_vlan_tunnel_rht_params);
3567 +@@ -79,9 +86,7 @@ static int __vlan_tunnel_info_add(struct net_bridge_vlan_group *vg,
3568 +
3569 + return 0;
3570 + out:
3571 +- dst_release(&vlan->tinfo.tunnel_dst->dst);
3572 +- vlan->tinfo.tunnel_dst = NULL;
3573 +- vlan->tinfo.tunnel_id = 0;
3574 ++ vlan_tunnel_info_release(vlan);
3575 +
3576 + return err;
3577 + }
3578 +@@ -182,12 +187,15 @@ int br_handle_ingress_vlan_tunnel(struct sk_buff *skb,
3579 + int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
3580 + struct net_bridge_vlan *vlan)
3581 + {
3582 ++ struct metadata_dst *tunnel_dst;
3583 ++ __be64 tunnel_id;
3584 + int err;
3585 +
3586 +- if (!vlan || !vlan->tinfo.tunnel_id)
3587 ++ if (!vlan)
3588 + return 0;
3589 +
3590 +- if (unlikely(!skb_vlan_tag_present(skb)))
3591 ++ tunnel_id = READ_ONCE(vlan->tinfo.tunnel_id);
3592 ++ if (!tunnel_id || unlikely(!skb_vlan_tag_present(skb)))
3593 + return 0;
3594 +
3595 + skb_dst_drop(skb);
3596 +@@ -195,7 +203,9 @@ int br_handle_egress_vlan_tunnel(struct sk_buff *skb,
3597 + if (err)
3598 + return err;
3599 +
3600 +- skb_dst_set(skb, dst_clone(&vlan->tinfo.tunnel_dst->dst));
3601 ++ tunnel_dst = rcu_dereference(vlan->tinfo.tunnel_dst);
3602 ++ if (tunnel_dst && dst_hold_safe(&tunnel_dst->dst))
3603 ++ skb_dst_set(skb, &tunnel_dst->dst);
3604 +
3605 + return 0;
3606 + }
3607 +diff --git a/net/can/bcm.c b/net/can/bcm.c
3608 +index 909b9e684e043..f3e4d9528fa38 100644
3609 +--- a/net/can/bcm.c
3610 ++++ b/net/can/bcm.c
3611 +@@ -125,7 +125,7 @@ struct bcm_sock {
3612 + struct sock sk;
3613 + int bound;
3614 + int ifindex;
3615 +- struct notifier_block notifier;
3616 ++ struct list_head notifier;
3617 + struct list_head rx_ops;
3618 + struct list_head tx_ops;
3619 + unsigned long dropped_usr_msgs;
3620 +@@ -133,6 +133,10 @@ struct bcm_sock {
3621 + char procname [32]; /* inode number in decimal with \0 */
3622 + };
3623 +
3624 ++static LIST_HEAD(bcm_notifier_list);
3625 ++static DEFINE_SPINLOCK(bcm_notifier_lock);
3626 ++static struct bcm_sock *bcm_busy_notifier;
3627 ++
3628 + static inline struct bcm_sock *bcm_sk(const struct sock *sk)
3629 + {
3630 + return (struct bcm_sock *)sk;
3631 +@@ -402,6 +406,7 @@ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
3632 + if (!op->count && (op->flags & TX_COUNTEVT)) {
3633 +
3634 + /* create notification to user */
3635 ++ memset(&msg_head, 0, sizeof(msg_head));
3636 + msg_head.opcode = TX_EXPIRED;
3637 + msg_head.flags = op->flags;
3638 + msg_head.count = op->count;
3639 +@@ -439,6 +444,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
3640 + /* this element is not throttled anymore */
3641 + data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
3642 +
3643 ++ memset(&head, 0, sizeof(head));
3644 + head.opcode = RX_CHANGED;
3645 + head.flags = op->flags;
3646 + head.count = op->count;
3647 +@@ -560,6 +566,7 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
3648 + }
3649 +
3650 + /* create notification to user */
3651 ++ memset(&msg_head, 0, sizeof(msg_head));
3652 + msg_head.opcode = RX_TIMEOUT;
3653 + msg_head.flags = op->flags;
3654 + msg_head.count = op->count;
3655 +@@ -1378,20 +1385,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
3656 + /*
3657 + * notification handler for netdevice status changes
3658 + */
3659 +-static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
3660 +- void *ptr)
3661 ++static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
3662 ++ struct net_device *dev)
3663 + {
3664 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3665 +- struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
3666 + struct sock *sk = &bo->sk;
3667 + struct bcm_op *op;
3668 + int notify_enodev = 0;
3669 +
3670 + if (!net_eq(dev_net(dev), sock_net(sk)))
3671 +- return NOTIFY_DONE;
3672 +-
3673 +- if (dev->type != ARPHRD_CAN)
3674 +- return NOTIFY_DONE;
3675 ++ return;
3676 +
3677 + switch (msg) {
3678 +
3679 +@@ -1426,7 +1428,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
3680 + sk->sk_error_report(sk);
3681 + }
3682 + }
3683 ++}
3684 +
3685 ++static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
3686 ++ void *ptr)
3687 ++{
3688 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3689 ++
3690 ++ if (dev->type != ARPHRD_CAN)
3691 ++ return NOTIFY_DONE;
3692 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
3693 ++ return NOTIFY_DONE;
3694 ++ if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
3695 ++ return NOTIFY_DONE;
3696 ++
3697 ++ spin_lock(&bcm_notifier_lock);
3698 ++ list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
3699 ++ spin_unlock(&bcm_notifier_lock);
3700 ++ bcm_notify(bcm_busy_notifier, msg, dev);
3701 ++ spin_lock(&bcm_notifier_lock);
3702 ++ }
3703 ++ bcm_busy_notifier = NULL;
3704 ++ spin_unlock(&bcm_notifier_lock);
3705 + return NOTIFY_DONE;
3706 + }
3707 +
3708 +@@ -1446,9 +1469,9 @@ static int bcm_init(struct sock *sk)
3709 + INIT_LIST_HEAD(&bo->rx_ops);
3710 +
3711 + /* set notifier */
3712 +- bo->notifier.notifier_call = bcm_notifier;
3713 +-
3714 +- register_netdevice_notifier(&bo->notifier);
3715 ++ spin_lock(&bcm_notifier_lock);
3716 ++ list_add_tail(&bo->notifier, &bcm_notifier_list);
3717 ++ spin_unlock(&bcm_notifier_lock);
3718 +
3719 + return 0;
3720 + }
3721 +@@ -1471,7 +1494,14 @@ static int bcm_release(struct socket *sock)
3722 +
3723 + /* remove bcm_ops, timer, rx_unregister(), etc. */
3724 +
3725 +- unregister_netdevice_notifier(&bo->notifier);
3726 ++ spin_lock(&bcm_notifier_lock);
3727 ++ while (bcm_busy_notifier == bo) {
3728 ++ spin_unlock(&bcm_notifier_lock);
3729 ++ schedule_timeout_uninterruptible(1);
3730 ++ spin_lock(&bcm_notifier_lock);
3731 ++ }
3732 ++ list_del(&bo->notifier);
3733 ++ spin_unlock(&bcm_notifier_lock);
3734 +
3735 + lock_sock(sk);
3736 +
3737 +@@ -1692,6 +1722,10 @@ static struct pernet_operations canbcm_pernet_ops __read_mostly = {
3738 + .exit = canbcm_pernet_exit,
3739 + };
3740 +
3741 ++static struct notifier_block canbcm_notifier = {
3742 ++ .notifier_call = bcm_notifier
3743 ++};
3744 ++
3745 + static int __init bcm_module_init(void)
3746 + {
3747 + int err;
3748 +@@ -1705,12 +1739,14 @@ static int __init bcm_module_init(void)
3749 + }
3750 +
3751 + register_pernet_subsys(&canbcm_pernet_ops);
3752 ++ register_netdevice_notifier(&canbcm_notifier);
3753 + return 0;
3754 + }
3755 +
3756 + static void __exit bcm_module_exit(void)
3757 + {
3758 + can_proto_unregister(&bcm_can_proto);
3759 ++ unregister_netdevice_notifier(&canbcm_notifier);
3760 + unregister_pernet_subsys(&canbcm_pernet_ops);
3761 + }
3762 +
3763 +diff --git a/net/can/isotp.c b/net/can/isotp.c
3764 +index d5780ab29e098..1adefb14527d8 100644
3765 +--- a/net/can/isotp.c
3766 ++++ b/net/can/isotp.c
3767 +@@ -143,10 +143,14 @@ struct isotp_sock {
3768 + u32 force_tx_stmin;
3769 + u32 force_rx_stmin;
3770 + struct tpcon rx, tx;
3771 +- struct notifier_block notifier;
3772 ++ struct list_head notifier;
3773 + wait_queue_head_t wait;
3774 + };
3775 +
3776 ++static LIST_HEAD(isotp_notifier_list);
3777 ++static DEFINE_SPINLOCK(isotp_notifier_lock);
3778 ++static struct isotp_sock *isotp_busy_notifier;
3779 ++
3780 + static inline struct isotp_sock *isotp_sk(const struct sock *sk)
3781 + {
3782 + return (struct isotp_sock *)sk;
3783 +@@ -1008,7 +1012,14 @@ static int isotp_release(struct socket *sock)
3784 + /* wait for complete transmission of current pdu */
3785 + wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
3786 +
3787 +- unregister_netdevice_notifier(&so->notifier);
3788 ++ spin_lock(&isotp_notifier_lock);
3789 ++ while (isotp_busy_notifier == so) {
3790 ++ spin_unlock(&isotp_notifier_lock);
3791 ++ schedule_timeout_uninterruptible(1);
3792 ++ spin_lock(&isotp_notifier_lock);
3793 ++ }
3794 ++ list_del(&so->notifier);
3795 ++ spin_unlock(&isotp_notifier_lock);
3796 +
3797 + lock_sock(sk);
3798 +
3799 +@@ -1284,21 +1295,16 @@ static int isotp_getsockopt(struct socket *sock, int level, int optname,
3800 + return 0;
3801 + }
3802 +
3803 +-static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
3804 +- void *ptr)
3805 ++static void isotp_notify(struct isotp_sock *so, unsigned long msg,
3806 ++ struct net_device *dev)
3807 + {
3808 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3809 +- struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
3810 + struct sock *sk = &so->sk;
3811 +
3812 + if (!net_eq(dev_net(dev), sock_net(sk)))
3813 +- return NOTIFY_DONE;
3814 +-
3815 +- if (dev->type != ARPHRD_CAN)
3816 +- return NOTIFY_DONE;
3817 ++ return;
3818 +
3819 + if (so->ifindex != dev->ifindex)
3820 +- return NOTIFY_DONE;
3821 ++ return;
3822 +
3823 + switch (msg) {
3824 + case NETDEV_UNREGISTER:
3825 +@@ -1324,7 +1330,28 @@ static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
3826 + sk->sk_error_report(sk);
3827 + break;
3828 + }
3829 ++}
3830 +
3831 ++static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
3832 ++ void *ptr)
3833 ++{
3834 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3835 ++
3836 ++ if (dev->type != ARPHRD_CAN)
3837 ++ return NOTIFY_DONE;
3838 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
3839 ++ return NOTIFY_DONE;
3840 ++ if (unlikely(isotp_busy_notifier)) /* Check for reentrant bug. */
3841 ++ return NOTIFY_DONE;
3842 ++
3843 ++ spin_lock(&isotp_notifier_lock);
3844 ++ list_for_each_entry(isotp_busy_notifier, &isotp_notifier_list, notifier) {
3845 ++ spin_unlock(&isotp_notifier_lock);
3846 ++ isotp_notify(isotp_busy_notifier, msg, dev);
3847 ++ spin_lock(&isotp_notifier_lock);
3848 ++ }
3849 ++ isotp_busy_notifier = NULL;
3850 ++ spin_unlock(&isotp_notifier_lock);
3851 + return NOTIFY_DONE;
3852 + }
3853 +
3854 +@@ -1361,8 +1388,9 @@ static int isotp_init(struct sock *sk)
3855 +
3856 + init_waitqueue_head(&so->wait);
3857 +
3858 +- so->notifier.notifier_call = isotp_notifier;
3859 +- register_netdevice_notifier(&so->notifier);
3860 ++ spin_lock(&isotp_notifier_lock);
3861 ++ list_add_tail(&so->notifier, &isotp_notifier_list);
3862 ++ spin_unlock(&isotp_notifier_lock);
3863 +
3864 + return 0;
3865 + }
3866 +@@ -1409,6 +1437,10 @@ static const struct can_proto isotp_can_proto = {
3867 + .prot = &isotp_proto,
3868 + };
3869 +
3870 ++static struct notifier_block canisotp_notifier = {
3871 ++ .notifier_call = isotp_notifier
3872 ++};
3873 ++
3874 + static __init int isotp_module_init(void)
3875 + {
3876 + int err;
3877 +@@ -1418,6 +1450,8 @@ static __init int isotp_module_init(void)
3878 + err = can_proto_register(&isotp_can_proto);
3879 + if (err < 0)
3880 + pr_err("can: registration of isotp protocol failed\n");
3881 ++ else
3882 ++ register_netdevice_notifier(&canisotp_notifier);
3883 +
3884 + return err;
3885 + }
3886 +@@ -1425,6 +1459,7 @@ static __init int isotp_module_init(void)
3887 + static __exit void isotp_module_exit(void)
3888 + {
3889 + can_proto_unregister(&isotp_can_proto);
3890 ++ unregister_netdevice_notifier(&canisotp_notifier);
3891 + }
3892 +
3893 + module_init(isotp_module_init);
3894 +diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
3895 +index e09d087ba2409..c3946c3558826 100644
3896 +--- a/net/can/j1939/transport.c
3897 ++++ b/net/can/j1939/transport.c
3898 +@@ -330,6 +330,9 @@ static void j1939_session_skb_drop_old(struct j1939_session *session)
3899 +
3900 + if ((do_skcb->offset + do_skb->len) < offset_start) {
3901 + __skb_unlink(do_skb, &session->skb_queue);
3902 ++ /* drop ref taken in j1939_session_skb_queue() */
3903 ++ skb_unref(do_skb);
3904 ++
3905 + kfree_skb(do_skb);
3906 + }
3907 + spin_unlock_irqrestore(&session->skb_queue.lock, flags);
3908 +@@ -349,12 +352,13 @@ void j1939_session_skb_queue(struct j1939_session *session,
3909 +
3910 + skcb->flags |= J1939_ECU_LOCAL_SRC;
3911 +
3912 ++ skb_get(skb);
3913 + skb_queue_tail(&session->skb_queue, skb);
3914 + }
3915 +
3916 + static struct
3917 +-sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
3918 +- unsigned int offset_start)
3919 ++sk_buff *j1939_session_skb_get_by_offset(struct j1939_session *session,
3920 ++ unsigned int offset_start)
3921 + {
3922 + struct j1939_priv *priv = session->priv;
3923 + struct j1939_sk_buff_cb *do_skcb;
3924 +@@ -371,6 +375,10 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
3925 + skb = do_skb;
3926 + }
3927 + }
3928 ++
3929 ++ if (skb)
3930 ++ skb_get(skb);
3931 ++
3932 + spin_unlock_irqrestore(&session->skb_queue.lock, flags);
3933 +
3934 + if (!skb)
3935 +@@ -381,12 +389,12 @@ sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
3936 + return skb;
3937 + }
3938 +
3939 +-static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
3940 ++static struct sk_buff *j1939_session_skb_get(struct j1939_session *session)
3941 + {
3942 + unsigned int offset_start;
3943 +
3944 + offset_start = session->pkt.dpo * 7;
3945 +- return j1939_session_skb_find_by_offset(session, offset_start);
3946 ++ return j1939_session_skb_get_by_offset(session, offset_start);
3947 + }
3948 +
3949 + /* see if we are receiver
3950 +@@ -776,7 +784,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
3951 + int ret = 0;
3952 + u8 dat[8];
3953 +
3954 +- se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
3955 ++ se_skb = j1939_session_skb_get_by_offset(session, session->pkt.tx * 7);
3956 + if (!se_skb)
3957 + return -ENOBUFS;
3958 +
3959 +@@ -801,7 +809,8 @@ static int j1939_session_tx_dat(struct j1939_session *session)
3960 + netdev_err_once(priv->ndev,
3961 + "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
3962 + __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
3963 +- return -EOVERFLOW;
3964 ++ ret = -EOVERFLOW;
3965 ++ goto out_free;
3966 + }
3967 +
3968 + if (!len) {
3969 +@@ -835,6 +844,12 @@ static int j1939_session_tx_dat(struct j1939_session *session)
3970 + if (pkt_done)
3971 + j1939_tp_set_rxtimeout(session, 250);
3972 +
3973 ++ out_free:
3974 ++ if (ret)
3975 ++ kfree_skb(se_skb);
3976 ++ else
3977 ++ consume_skb(se_skb);
3978 ++
3979 + return ret;
3980 + }
3981 +
3982 +@@ -1007,7 +1022,7 @@ static int j1939_xtp_txnext_receiver(struct j1939_session *session)
3983 + static int j1939_simple_txnext(struct j1939_session *session)
3984 + {
3985 + struct j1939_priv *priv = session->priv;
3986 +- struct sk_buff *se_skb = j1939_session_skb_find(session);
3987 ++ struct sk_buff *se_skb = j1939_session_skb_get(session);
3988 + struct sk_buff *skb;
3989 + int ret;
3990 +
3991 +@@ -1015,8 +1030,10 @@ static int j1939_simple_txnext(struct j1939_session *session)
3992 + return 0;
3993 +
3994 + skb = skb_clone(se_skb, GFP_ATOMIC);
3995 +- if (!skb)
3996 +- return -ENOMEM;
3997 ++ if (!skb) {
3998 ++ ret = -ENOMEM;
3999 ++ goto out_free;
4000 ++ }
4001 +
4002 + can_skb_set_owner(skb, se_skb->sk);
4003 +
4004 +@@ -1024,12 +1041,18 @@ static int j1939_simple_txnext(struct j1939_session *session)
4005 +
4006 + ret = j1939_send_one(priv, skb);
4007 + if (ret)
4008 +- return ret;
4009 ++ goto out_free;
4010 +
4011 + j1939_sk_errqueue(session, J1939_ERRQUEUE_SCHED);
4012 + j1939_sk_queue_activate_next(session);
4013 +
4014 +- return 0;
4015 ++ out_free:
4016 ++ if (ret)
4017 ++ kfree_skb(se_skb);
4018 ++ else
4019 ++ consume_skb(se_skb);
4020 ++
4021 ++ return ret;
4022 + }
4023 +
4024 + static bool j1939_session_deactivate_locked(struct j1939_session *session)
4025 +@@ -1170,9 +1193,10 @@ static void j1939_session_completed(struct j1939_session *session)
4026 + struct sk_buff *skb;
4027 +
4028 + if (!session->transmission) {
4029 +- skb = j1939_session_skb_find(session);
4030 ++ skb = j1939_session_skb_get(session);
4031 + /* distribute among j1939 receivers */
4032 + j1939_sk_recv(session->priv, skb);
4033 ++ consume_skb(skb);
4034 + }
4035 +
4036 + j1939_session_deactivate_activate_next(session);
4037 +@@ -1744,7 +1768,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4038 + {
4039 + struct j1939_priv *priv = session->priv;
4040 + struct j1939_sk_buff_cb *skcb;
4041 +- struct sk_buff *se_skb;
4042 ++ struct sk_buff *se_skb = NULL;
4043 + const u8 *dat;
4044 + u8 *tpdat;
4045 + int offset;
4046 +@@ -1786,7 +1810,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4047 + goto out_session_cancel;
4048 + }
4049 +
4050 +- se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
4051 ++ se_skb = j1939_session_skb_get_by_offset(session, packet * 7);
4052 + if (!se_skb) {
4053 + netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
4054 + session);
4055 +@@ -1848,11 +1872,13 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
4056 + j1939_tp_set_rxtimeout(session, 250);
4057 + }
4058 + session->last_cmd = 0xff;
4059 ++ consume_skb(se_skb);
4060 + j1939_session_put(session);
4061 +
4062 + return;
4063 +
4064 + out_session_cancel:
4065 ++ kfree_skb(se_skb);
4066 + j1939_session_timers_cancel(session);
4067 + j1939_session_cancel(session, J1939_XTP_ABORT_FAULT);
4068 + j1939_session_put(session);
4069 +diff --git a/net/can/raw.c b/net/can/raw.c
4070 +index 95113b0898b24..4a7c063deb6ce 100644
4071 +--- a/net/can/raw.c
4072 ++++ b/net/can/raw.c
4073 +@@ -83,7 +83,7 @@ struct raw_sock {
4074 + struct sock sk;
4075 + int bound;
4076 + int ifindex;
4077 +- struct notifier_block notifier;
4078 ++ struct list_head notifier;
4079 + int loopback;
4080 + int recv_own_msgs;
4081 + int fd_frames;
4082 +@@ -95,6 +95,10 @@ struct raw_sock {
4083 + struct uniqframe __percpu *uniq;
4084 + };
4085 +
4086 ++static LIST_HEAD(raw_notifier_list);
4087 ++static DEFINE_SPINLOCK(raw_notifier_lock);
4088 ++static struct raw_sock *raw_busy_notifier;
4089 ++
4090 + /* Return pointer to store the extra msg flags for raw_recvmsg().
4091 + * We use the space of one unsigned int beyond the 'struct sockaddr_can'
4092 + * in skb->cb.
4093 +@@ -263,21 +267,16 @@ static int raw_enable_allfilters(struct net *net, struct net_device *dev,
4094 + return err;
4095 + }
4096 +
4097 +-static int raw_notifier(struct notifier_block *nb,
4098 +- unsigned long msg, void *ptr)
4099 ++static void raw_notify(struct raw_sock *ro, unsigned long msg,
4100 ++ struct net_device *dev)
4101 + {
4102 +- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4103 +- struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
4104 + struct sock *sk = &ro->sk;
4105 +
4106 + if (!net_eq(dev_net(dev), sock_net(sk)))
4107 +- return NOTIFY_DONE;
4108 +-
4109 +- if (dev->type != ARPHRD_CAN)
4110 +- return NOTIFY_DONE;
4111 ++ return;
4112 +
4113 + if (ro->ifindex != dev->ifindex)
4114 +- return NOTIFY_DONE;
4115 ++ return;
4116 +
4117 + switch (msg) {
4118 + case NETDEV_UNREGISTER:
4119 +@@ -305,7 +304,28 @@ static int raw_notifier(struct notifier_block *nb,
4120 + sk->sk_error_report(sk);
4121 + break;
4122 + }
4123 ++}
4124 ++
4125 ++static int raw_notifier(struct notifier_block *nb, unsigned long msg,
4126 ++ void *ptr)
4127 ++{
4128 ++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4129 ++
4130 ++ if (dev->type != ARPHRD_CAN)
4131 ++ return NOTIFY_DONE;
4132 ++ if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
4133 ++ return NOTIFY_DONE;
4134 ++ if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
4135 ++ return NOTIFY_DONE;
4136 +
4137 ++ spin_lock(&raw_notifier_lock);
4138 ++ list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
4139 ++ spin_unlock(&raw_notifier_lock);
4140 ++ raw_notify(raw_busy_notifier, msg, dev);
4141 ++ spin_lock(&raw_notifier_lock);
4142 ++ }
4143 ++ raw_busy_notifier = NULL;
4144 ++ spin_unlock(&raw_notifier_lock);
4145 + return NOTIFY_DONE;
4146 + }
4147 +
4148 +@@ -334,9 +354,9 @@ static int raw_init(struct sock *sk)
4149 + return -ENOMEM;
4150 +
4151 + /* set notifier */
4152 +- ro->notifier.notifier_call = raw_notifier;
4153 +-
4154 +- register_netdevice_notifier(&ro->notifier);
4155 ++ spin_lock(&raw_notifier_lock);
4156 ++ list_add_tail(&ro->notifier, &raw_notifier_list);
4157 ++ spin_unlock(&raw_notifier_lock);
4158 +
4159 + return 0;
4160 + }
4161 +@@ -351,7 +371,14 @@ static int raw_release(struct socket *sock)
4162 +
4163 + ro = raw_sk(sk);
4164 +
4165 +- unregister_netdevice_notifier(&ro->notifier);
4166 ++ spin_lock(&raw_notifier_lock);
4167 ++ while (raw_busy_notifier == ro) {
4168 ++ spin_unlock(&raw_notifier_lock);
4169 ++ schedule_timeout_uninterruptible(1);
4170 ++ spin_lock(&raw_notifier_lock);
4171 ++ }
4172 ++ list_del(&ro->notifier);
4173 ++ spin_unlock(&raw_notifier_lock);
4174 +
4175 + lock_sock(sk);
4176 +
4177 +@@ -881,6 +908,10 @@ static const struct can_proto raw_can_proto = {
4178 + .prot = &raw_proto,
4179 + };
4180 +
4181 ++static struct notifier_block canraw_notifier = {
4182 ++ .notifier_call = raw_notifier
4183 ++};
4184 ++
4185 + static __init int raw_module_init(void)
4186 + {
4187 + int err;
4188 +@@ -890,6 +921,8 @@ static __init int raw_module_init(void)
4189 + err = can_proto_register(&raw_can_proto);
4190 + if (err < 0)
4191 + pr_err("can: registration of raw protocol failed\n");
4192 ++ else
4193 ++ register_netdevice_notifier(&canraw_notifier);
4194 +
4195 + return err;
4196 + }
4197 +@@ -897,6 +930,7 @@ static __init int raw_module_init(void)
4198 + static __exit void raw_module_exit(void)
4199 + {
4200 + can_proto_unregister(&raw_can_proto);
4201 ++ unregister_netdevice_notifier(&canraw_notifier);
4202 + }
4203 +
4204 + module_init(raw_module_init);
4205 +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
4206 +index dbc66b896287a..5c9d95f30be60 100644
4207 +--- a/net/core/net_namespace.c
4208 ++++ b/net/core/net_namespace.c
4209 +@@ -650,6 +650,18 @@ void __put_net(struct net *net)
4210 + }
4211 + EXPORT_SYMBOL_GPL(__put_net);
4212 +
4213 ++/**
4214 ++ * get_net_ns - increment the refcount of the network namespace
4215 ++ * @ns: common namespace (net)
4216 ++ *
4217 ++ * Returns the net's common namespace.
4218 ++ */
4219 ++struct ns_common *get_net_ns(struct ns_common *ns)
4220 ++{
4221 ++ return &get_net(container_of(ns, struct net, ns))->ns;
4222 ++}
4223 ++EXPORT_SYMBOL_GPL(get_net_ns);
4224 ++
4225 + struct net *get_net_ns_by_fd(int fd)
4226 + {
4227 + struct file *file;
4228 +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4229 +index 83894723ebeea..dd46592464058 100644
4230 +--- a/net/core/rtnetlink.c
4231 ++++ b/net/core/rtnetlink.c
4232 +@@ -4842,10 +4842,12 @@ static int rtnl_bridge_notify(struct net_device *dev)
4233 + if (err < 0)
4234 + goto errout;
4235 +
4236 +- if (!skb->len) {
4237 +- err = -EINVAL;
4238 ++ /* Notification info is only filled for bridge ports, not the bridge
4239 ++ * device itself. Therefore, a zero notification length is valid and
4240 ++ * should not result in an error.
4241 ++ */
4242 ++ if (!skb->len)
4243 + goto errout;
4244 +- }
4245 +
4246 + rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
4247 + return 0;
4248 +diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
4249 +index c3a5489964cde..9908b922cce8d 100644
4250 +--- a/net/ethtool/strset.c
4251 ++++ b/net/ethtool/strset.c
4252 +@@ -328,6 +328,8 @@ static int strset_reply_size(const struct ethnl_req_info *req_base,
4253 + int len = 0;
4254 + int ret;
4255 +
4256 ++ len += nla_total_size(0); /* ETHTOOL_A_STRSET_STRINGSETS */
4257 ++
4258 + for (i = 0; i < ETH_SS_COUNT; i++) {
4259 + const struct strset_info *set_info = &data->sets[i];
4260 +
4261 +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
4262 +index be09c7669a799..ca217a6f488f6 100644
4263 +--- a/net/ipv4/cipso_ipv4.c
4264 ++++ b/net/ipv4/cipso_ipv4.c
4265 +@@ -472,6 +472,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
4266 + kfree(doi_def->map.std->lvl.local);
4267 + kfree(doi_def->map.std->cat.cipso);
4268 + kfree(doi_def->map.std->cat.local);
4269 ++ kfree(doi_def->map.std);
4270 + break;
4271 + }
4272 + kfree(doi_def);
4273 +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
4274 +index ff3818333fcfb..b71b836cc7d19 100644
4275 +--- a/net/ipv4/icmp.c
4276 ++++ b/net/ipv4/icmp.c
4277 +@@ -759,6 +759,13 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
4278 + icmp_param.data_len = room;
4279 + icmp_param.head_len = sizeof(struct icmphdr);
4280 +
4281 ++ /* if we don't have a source address at this point, fall back to the
4282 ++ * dummy address instead of sending out a packet with a source address
4283 ++ * of 0.0.0.0
4284 ++ */
4285 ++ if (!fl4.saddr)
4286 ++ fl4.saddr = htonl(INADDR_DUMMY);
4287 ++
4288 + icmp_push_reply(&icmp_param, &fl4, &ipc, &rt);
4289 + ende:
4290 + ip_rt_put(rt);
4291 +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
4292 +index 7b272bbed2b43..6b3c558a4f232 100644
4293 +--- a/net/ipv4/igmp.c
4294 ++++ b/net/ipv4/igmp.c
4295 +@@ -1801,6 +1801,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
4296 + while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
4297 + in_dev->mc_list = i->next_rcu;
4298 + in_dev->mc_count--;
4299 ++ ip_mc_clear_src(i);
4300 + ip_ma_put(i);
4301 + }
4302 + }
4303 +diff --git a/net/ipv4/route.c b/net/ipv4/route.c
4304 +index 798dc85bde5b7..e968bb47d5bd8 100644
4305 +--- a/net/ipv4/route.c
4306 ++++ b/net/ipv4/route.c
4307 +@@ -2076,6 +2076,19 @@ martian_source:
4308 + return err;
4309 + }
4310 +
4311 ++/* get device for dst_alloc with local routes */
4312 ++static struct net_device *ip_rt_get_dev(struct net *net,
4313 ++ const struct fib_result *res)
4314 ++{
4315 ++ struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
4316 ++ struct net_device *dev = NULL;
4317 ++
4318 ++ if (nhc)
4319 ++ dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
4320 ++
4321 ++ return dev ? : net->loopback_dev;
4322 ++}
4323 ++
4324 + /*
4325 + * NOTE. We drop all the packets that has local source
4326 + * addresses, because every properly looped back packet
4327 +@@ -2232,7 +2245,7 @@ local_input:
4328 + }
4329 + }
4330 +
4331 +- rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
4332 ++ rth = rt_dst_alloc(ip_rt_get_dev(net, res),
4333 + flags | RTCF_LOCAL, res->type,
4334 + IN_DEV_CONF_GET(in_dev, NOPOLICY), false);
4335 + if (!rth)
4336 +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
4337 +index 9d28b2778e8fe..fbb9a11fe4a37 100644
4338 +--- a/net/ipv4/udp.c
4339 ++++ b/net/ipv4/udp.c
4340 +@@ -2569,6 +2569,9 @@ void udp_destroy_sock(struct sock *sk)
4341 + {
4342 + struct udp_sock *up = udp_sk(sk);
4343 + bool slow = lock_sock_fast(sk);
4344 ++
4345 ++ /* protects from races with udp_abort() */
4346 ++ sock_set_flag(sk, SOCK_DEAD);
4347 + udp_flush_pending_frames(sk);
4348 + unlock_sock_fast(sk, slow);
4349 + if (static_branch_unlikely(&udp_encap_needed_key)) {
4350 +@@ -2819,10 +2822,17 @@ int udp_abort(struct sock *sk, int err)
4351 + {
4352 + lock_sock(sk);
4353 +
4354 ++ /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
4355 ++ * with close()
4356 ++ */
4357 ++ if (sock_flag(sk, SOCK_DEAD))
4358 ++ goto out;
4359 ++
4360 + sk->sk_err = err;
4361 + sk->sk_error_report(sk);
4362 + __udp_disconnect(sk, 0);
4363 +
4364 ++out:
4365 + release_sock(sk);
4366 +
4367 + return 0;
4368 +diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
4369 +index e204163c7036c..92f3235fa2874 100644
4370 +--- a/net/ipv6/netfilter/nft_fib_ipv6.c
4371 ++++ b/net/ipv6/netfilter/nft_fib_ipv6.c
4372 +@@ -135,6 +135,17 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
4373 + }
4374 + EXPORT_SYMBOL_GPL(nft_fib6_eval_type);
4375 +
4376 ++static bool nft_fib_v6_skip_icmpv6(const struct sk_buff *skb, u8 next, const struct ipv6hdr *iph)
4377 ++{
4378 ++ if (likely(next != IPPROTO_ICMPV6))
4379 ++ return false;
4380 ++
4381 ++ if (ipv6_addr_type(&iph->saddr) != IPV6_ADDR_ANY)
4382 ++ return false;
4383 ++
4384 ++ return ipv6_addr_type(&iph->daddr) & IPV6_ADDR_LINKLOCAL;
4385 ++}
4386 ++
4387 + void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
4388 + const struct nft_pktinfo *pkt)
4389 + {
4390 +@@ -163,10 +174,13 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
4391 +
4392 + lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif, iph);
4393 +
4394 +- if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
4395 +- nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
4396 +- nft_fib_store_result(dest, priv, nft_in(pkt));
4397 +- return;
4398 ++ if (nft_hook(pkt) == NF_INET_PRE_ROUTING ||
4399 ++ nft_hook(pkt) == NF_INET_INGRESS) {
4400 ++ if (nft_fib_is_loopback(pkt->skb, nft_in(pkt)) ||
4401 ++ nft_fib_v6_skip_icmpv6(pkt->skb, pkt->tprot, iph)) {
4402 ++ nft_fib_store_result(dest, priv, nft_in(pkt));
4403 ++ return;
4404 ++ }
4405 + }
4406 +
4407 + *dest = 0;
4408 +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
4409 +index 29d9691359b9c..e2de58d6cdce2 100644
4410 +--- a/net/ipv6/udp.c
4411 ++++ b/net/ipv6/udp.c
4412 +@@ -1596,6 +1596,9 @@ void udpv6_destroy_sock(struct sock *sk)
4413 + {
4414 + struct udp_sock *up = udp_sk(sk);
4415 + lock_sock(sk);
4416 ++
4417 ++ /* protects from races with udp_abort() */
4418 ++ sock_set_flag(sk, SOCK_DEAD);
4419 + udp_v6_flush_pending_frames(sk);
4420 + release_sock(sk);
4421 +
4422 +diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
4423 +index d4cc9ac2d7033..6b50cb5e0e3cc 100644
4424 +--- a/net/mac80211/scan.c
4425 ++++ b/net/mac80211/scan.c
4426 +@@ -251,13 +251,24 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
4427 + struct ieee80211_mgmt *mgmt = (void *)skb->data;
4428 + struct ieee80211_bss *bss;
4429 + struct ieee80211_channel *channel;
4430 ++ size_t min_hdr_len = offsetof(struct ieee80211_mgmt,
4431 ++ u.probe_resp.variable);
4432 ++
4433 ++ if (!ieee80211_is_probe_resp(mgmt->frame_control) &&
4434 ++ !ieee80211_is_beacon(mgmt->frame_control) &&
4435 ++ !ieee80211_is_s1g_beacon(mgmt->frame_control))
4436 ++ return;
4437 +
4438 + if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
4439 +- if (skb->len < 15)
4440 +- return;
4441 +- } else if (skb->len < 24 ||
4442 +- (!ieee80211_is_probe_resp(mgmt->frame_control) &&
4443 +- !ieee80211_is_beacon(mgmt->frame_control)))
4444 ++ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
4445 ++ min_hdr_len = offsetof(struct ieee80211_ext,
4446 ++ u.s1g_short_beacon.variable);
4447 ++ else
4448 ++ min_hdr_len = offsetof(struct ieee80211_ext,
4449 ++ u.s1g_beacon);
4450 ++ }
4451 ++
4452 ++ if (skb->len < min_hdr_len)
4453 + return;
4454 +
4455 + sdata1 = rcu_dereference(local->scan_sdata);
4456 +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
4457 +index 1d8526d89505f..20b3581a1c43f 100644
4458 +--- a/net/mac80211/tx.c
4459 ++++ b/net/mac80211/tx.c
4460 +@@ -2030,6 +2030,26 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
4461 + ieee80211_tx(sdata, sta, skb, false);
4462 + }
4463 +
4464 ++static bool ieee80211_validate_radiotap_len(struct sk_buff *skb)
4465 ++{
4466 ++ struct ieee80211_radiotap_header *rthdr =
4467 ++ (struct ieee80211_radiotap_header *)skb->data;
4468 ++
4469 ++ /* check for not even having the fixed radiotap header part */
4470 ++ if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
4471 ++ return false; /* too short to be possibly valid */
4472 ++
4473 ++ /* is it a header version we can trust to find length from? */
4474 ++ if (unlikely(rthdr->it_version))
4475 ++ return false; /* only version 0 is supported */
4476 ++
4477 ++ /* does the skb contain enough to deliver on the alleged length? */
4478 ++ if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
4479 ++ return false; /* skb too short for claimed rt header extent */
4480 ++
4481 ++ return true;
4482 ++}
4483 ++
4484 + bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
4485 + struct net_device *dev)
4486 + {
4487 +@@ -2038,8 +2058,6 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
4488 + struct ieee80211_radiotap_header *rthdr =
4489 + (struct ieee80211_radiotap_header *) skb->data;
4490 + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4491 +- struct ieee80211_supported_band *sband =
4492 +- local->hw.wiphy->bands[info->band];
4493 + int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
4494 + NULL);
4495 + u16 txflags;
4496 +@@ -2052,17 +2070,8 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
4497 + u8 vht_mcs = 0, vht_nss = 0;
4498 + int i;
4499 +
4500 +- /* check for not even having the fixed radiotap header part */
4501 +- if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
4502 +- return false; /* too short to be possibly valid */
4503 +-
4504 +- /* is it a header version we can trust to find length from? */
4505 +- if (unlikely(rthdr->it_version))
4506 +- return false; /* only version 0 is supported */
4507 +-
4508 +- /* does the skb contain enough to deliver on the alleged length? */
4509 +- if (unlikely(skb->len < ieee80211_get_radiotap_len(skb->data)))
4510 +- return false; /* skb too short for claimed rt header extent */
4511 ++ if (!ieee80211_validate_radiotap_len(skb))
4512 ++ return false;
4513 +
4514 + info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
4515 + IEEE80211_TX_CTL_DONTFRAG;
4516 +@@ -2186,6 +2195,9 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
4517 + return false;
4518 +
4519 + if (rate_found) {
4520 ++ struct ieee80211_supported_band *sband =
4521 ++ local->hw.wiphy->bands[info->band];
4522 ++
4523 + info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
4524 +
4525 + for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
4526 +@@ -2199,7 +2211,7 @@ bool ieee80211_parse_tx_radiotap(struct sk_buff *skb,
4527 + } else if (rate_flags & IEEE80211_TX_RC_VHT_MCS) {
4528 + ieee80211_rate_set_vht(info->control.rates, vht_mcs,
4529 + vht_nss);
4530 +- } else {
4531 ++ } else if (sband) {
4532 + for (i = 0; i < sband->n_bitrates; i++) {
4533 + if (rate * 5 != sband->bitrates[i].bitrate)
4534 + continue;
4535 +@@ -2236,8 +2248,8 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
4536 + info->flags = IEEE80211_TX_CTL_REQ_TX_STATUS |
4537 + IEEE80211_TX_CTL_INJECTED;
4538 +
4539 +- /* Sanity-check and process the injection radiotap header */
4540 +- if (!ieee80211_parse_tx_radiotap(skb, dev))
4541 ++ /* Sanity-check the length of the radiotap header */
4542 ++ if (!ieee80211_validate_radiotap_len(skb))
4543 + goto fail;
4544 +
4545 + /* we now know there is a radiotap header with a length we can use */
4546 +@@ -2353,6 +2365,14 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
4547 +
4548 + info->band = chandef->chan->band;
4549 +
4550 ++ /*
4551 ++ * Process the radiotap header. This will now take into account the
4552 ++ * selected chandef above to accurately set injection rates and
4553 ++ * retransmissions.
4554 ++ */
4555 ++ if (!ieee80211_parse_tx_radiotap(skb, dev))
4556 ++ goto fail_rcu;
4557 ++
4558 + /* remove the injection radiotap header */
4559 + skb_pull(skb, len_rthdr);
4560 +
4561 +diff --git a/net/mptcp/options.c b/net/mptcp/options.c
4562 +index 91034a221983c..ac0233c9cd349 100644
4563 +--- a/net/mptcp/options.c
4564 ++++ b/net/mptcp/options.c
4565 +@@ -314,6 +314,8 @@ void mptcp_get_options(const struct sk_buff *skb,
4566 + length--;
4567 + continue;
4568 + default:
4569 ++ if (length < 2)
4570 ++ return;
4571 + opsize = *ptr++;
4572 + if (opsize < 2) /* "silly options" */
4573 + return;
4574 +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
4575 +index 7832b20baac2e..3ca8b359e399a 100644
4576 +--- a/net/mptcp/protocol.c
4577 ++++ b/net/mptcp/protocol.c
4578 +@@ -276,11 +276,13 @@ static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
4579 +
4580 + /* try to fetch required memory from subflow */
4581 + if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
4582 +- if (ssk->sk_forward_alloc < skb->truesize)
4583 +- goto drop;
4584 +- __sk_mem_reclaim(ssk, skb->truesize);
4585 +- if (!sk_rmem_schedule(sk, skb, skb->truesize))
4586 ++ int amount = sk_mem_pages(skb->truesize) << SK_MEM_QUANTUM_SHIFT;
4587 ++
4588 ++ if (ssk->sk_forward_alloc < amount)
4589 + goto drop;
4590 ++
4591 ++ ssk->sk_forward_alloc -= amount;
4592 ++ sk->sk_forward_alloc += amount;
4593 + }
4594 +
4595 + /* the skb map_seq accounts for the skb offset:
4596 +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
4597 +index 96b6aca9d0ae7..851fb3d8c791d 100644
4598 +--- a/net/mptcp/subflow.c
4599 ++++ b/net/mptcp/subflow.c
4600 +@@ -655,10 +655,10 @@ static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq)
4601 + return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32));
4602 + }
4603 +
4604 +-static void warn_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
4605 ++static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn)
4606 + {
4607 +- WARN_ONCE(1, "Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
4608 +- ssn, subflow->map_subflow_seq, subflow->map_data_len);
4609 ++ pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d",
4610 ++ ssn, subflow->map_subflow_seq, subflow->map_data_len);
4611 + }
4612 +
4613 + static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
4614 +@@ -683,13 +683,13 @@ static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
4615 + /* Mapping covers data later in the subflow stream,
4616 + * currently unsupported.
4617 + */
4618 +- warn_bad_map(subflow, ssn);
4619 ++ dbg_bad_map(subflow, ssn);
4620 + return false;
4621 + }
4622 + if (unlikely(!before(ssn, subflow->map_subflow_seq +
4623 + subflow->map_data_len))) {
4624 + /* Mapping does covers past subflow data, invalid */
4625 +- warn_bad_map(subflow, ssn + skb->len);
4626 ++ dbg_bad_map(subflow, ssn);
4627 + return false;
4628 + }
4629 + return true;
4630 +diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
4631 +index d7d34a62d3bf5..2fc4ae960769d 100644
4632 +--- a/net/netfilter/nf_synproxy_core.c
4633 ++++ b/net/netfilter/nf_synproxy_core.c
4634 +@@ -31,6 +31,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
4635 + int length = (th->doff * 4) - sizeof(*th);
4636 + u8 buf[40], *ptr;
4637 +
4638 ++ if (unlikely(length < 0))
4639 ++ return false;
4640 ++
4641 + ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
4642 + if (ptr == NULL)
4643 + return false;
4644 +@@ -47,6 +50,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
4645 + length--;
4646 + continue;
4647 + default:
4648 ++ if (length < 2)
4649 ++ return true;
4650 + opsize = *ptr++;
4651 + if (opsize < 2)
4652 + return true;
4653 +diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
4654 +index 93a7edcff11e7..0d9baddb9cd49 100644
4655 +--- a/net/qrtr/qrtr.c
4656 ++++ b/net/qrtr/qrtr.c
4657 +@@ -429,7 +429,7 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
4658 + struct qrtr_sock *ipc;
4659 + struct sk_buff *skb;
4660 + struct qrtr_cb *cb;
4661 +- unsigned int size;
4662 ++ size_t size;
4663 + unsigned int ver;
4664 + size_t hdrlen;
4665 +
4666 +diff --git a/net/rds/recv.c b/net/rds/recv.c
4667 +index aba4afe4dfedc..967d115f97efd 100644
4668 +--- a/net/rds/recv.c
4669 ++++ b/net/rds/recv.c
4670 +@@ -714,7 +714,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
4671 +
4672 + if (rds_cmsg_recv(inc, msg, rs)) {
4673 + ret = -EFAULT;
4674 +- goto out;
4675 ++ break;
4676 + }
4677 + rds_recvmsg_zcookie(rs, msg);
4678 +
4679 +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
4680 +index 315a5b2f3add8..7ef074c6dd160 100644
4681 +--- a/net/sched/act_ct.c
4682 ++++ b/net/sched/act_ct.c
4683 +@@ -900,14 +900,19 @@ static int tcf_ct_act_nat(struct sk_buff *skb,
4684 + }
4685 +
4686 + err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
4687 +- if (err == NF_ACCEPT &&
4688 +- ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) {
4689 +- if (maniptype == NF_NAT_MANIP_SRC)
4690 +- maniptype = NF_NAT_MANIP_DST;
4691 +- else
4692 +- maniptype = NF_NAT_MANIP_SRC;
4693 +-
4694 +- err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
4695 ++ if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
4696 ++ if (ct->status & IPS_SRC_NAT) {
4697 ++ if (maniptype == NF_NAT_MANIP_SRC)
4698 ++ maniptype = NF_NAT_MANIP_DST;
4699 ++ else
4700 ++ maniptype = NF_NAT_MANIP_SRC;
4701 ++
4702 ++ err = ct_nat_execute(skb, ct, ctinfo, range,
4703 ++ maniptype);
4704 ++ } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
4705 ++ err = ct_nat_execute(skb, ct, ctinfo, NULL,
4706 ++ NF_NAT_MANIP_SRC);
4707 ++ }
4708 + }
4709 + return err;
4710 + #else
4711 +diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
4712 +index 7d37638ee1c7a..5c15968b5155b 100644
4713 +--- a/net/sched/sch_cake.c
4714 ++++ b/net/sched/sch_cake.c
4715 +@@ -943,7 +943,7 @@ static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
4716 + }
4717 +
4718 + tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
4719 +- if (!tcph)
4720 ++ if (!tcph || tcph->doff < 5)
4721 + return NULL;
4722 +
4723 + return skb_header_pointer(skb, offset,
4724 +@@ -967,6 +967,8 @@ static const void *cake_get_tcpopt(const struct tcphdr *tcph,
4725 + length--;
4726 + continue;
4727 + }
4728 ++ if (length < 2)
4729 ++ break;
4730 + opsize = *ptr++;
4731 + if (opsize < 2 || opsize > length)
4732 + break;
4733 +@@ -1104,6 +1106,8 @@ static bool cake_tcph_may_drop(const struct tcphdr *tcph,
4734 + length--;
4735 + continue;
4736 + }
4737 ++ if (length < 2)
4738 ++ break;
4739 + opsize = *ptr++;
4740 + if (opsize < 2 || opsize > length)
4741 + break;
4742 +diff --git a/net/socket.c b/net/socket.c
4743 +index 6e6cccc2104f7..002d5952ae5d8 100644
4744 +--- a/net/socket.c
4745 ++++ b/net/socket.c
4746 +@@ -1080,19 +1080,6 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
4747 + * what to do with it - that's up to the protocol still.
4748 + */
4749 +
4750 +-/**
4751 +- * get_net_ns - increment the refcount of the network namespace
4752 +- * @ns: common namespace (net)
4753 +- *
4754 +- * Returns the net's common namespace.
4755 +- */
4756 +-
4757 +-struct ns_common *get_net_ns(struct ns_common *ns)
4758 +-{
4759 +- return &get_net(container_of(ns, struct net, ns))->ns;
4760 +-}
4761 +-EXPORT_SYMBOL_GPL(get_net_ns);
4762 +-
4763 + static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
4764 + {
4765 + struct socket *sock;
4766 +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
4767 +index 41c3303c33577..39be4b52329b5 100644
4768 +--- a/net/unix/af_unix.c
4769 ++++ b/net/unix/af_unix.c
4770 +@@ -535,12 +535,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
4771 + u->path.mnt = NULL;
4772 + state = sk->sk_state;
4773 + sk->sk_state = TCP_CLOSE;
4774 ++
4775 ++ skpair = unix_peer(sk);
4776 ++ unix_peer(sk) = NULL;
4777 ++
4778 + unix_state_unlock(sk);
4779 +
4780 + wake_up_interruptible_all(&u->peer_wait);
4781 +
4782 +- skpair = unix_peer(sk);
4783 +-
4784 + if (skpair != NULL) {
4785 + if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
4786 + unix_state_lock(skpair);
4787 +@@ -555,7 +557,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
4788 +
4789 + unix_dgram_peer_wake_disconnect(sk, skpair);
4790 + sock_put(skpair); /* It may now die */
4791 +- unix_peer(sk) = NULL;
4792 + }
4793 +
4794 + /* Try to flush out this socket. Throw out buffers at least */
4795 +diff --git a/net/wireless/Makefile b/net/wireless/Makefile
4796 +index 2eee93985ab0d..af590ae606b69 100644
4797 +--- a/net/wireless/Makefile
4798 ++++ b/net/wireless/Makefile
4799 +@@ -28,7 +28,7 @@ $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.hex)
4800 + @$(kecho) " GEN $@"
4801 + @(echo '#include "reg.h"'; \
4802 + echo 'const u8 shipped_regdb_certs[] = {'; \
4803 +- cat $^ ; \
4804 ++ echo | cat - $^ ; \
4805 + echo '};'; \
4806 + echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \
4807 + ) > $@
4808 +diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c
4809 +index a95c79d183492..a817d8e3e4b36 100644
4810 +--- a/net/wireless/pmsr.c
4811 ++++ b/net/wireless/pmsr.c
4812 +@@ -324,6 +324,7 @@ void cfg80211_pmsr_complete(struct wireless_dev *wdev,
4813 + gfp_t gfp)
4814 + {
4815 + struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
4816 ++ struct cfg80211_pmsr_request *tmp, *prev, *to_free = NULL;
4817 + struct sk_buff *msg;
4818 + void *hdr;
4819 +
4820 +@@ -354,9 +355,20 @@ free_msg:
4821 + nlmsg_free(msg);
4822 + free_request:
4823 + spin_lock_bh(&wdev->pmsr_lock);
4824 +- list_del(&req->list);
4825 ++ /*
4826 ++ * cfg80211_pmsr_process_abort() may have already moved this request
4827 ++ * to the free list, and will free it later. In this case, don't free
4828 ++ * it here.
4829 ++ */
4830 ++ list_for_each_entry_safe(tmp, prev, &wdev->pmsr_list, list) {
4831 ++ if (tmp == req) {
4832 ++ list_del(&req->list);
4833 ++ to_free = req;
4834 ++ break;
4835 ++ }
4836 ++ }
4837 + spin_unlock_bh(&wdev->pmsr_lock);
4838 +- kfree(req);
4839 ++ kfree(to_free);
4840 + }
4841 + EXPORT_SYMBOL_GPL(cfg80211_pmsr_complete);
4842 +
4843 +diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
4844 +index 91a4ef7f620ca..a9b079d56fd69 100644
4845 +--- a/sound/soc/codecs/rt5659.c
4846 ++++ b/sound/soc/codecs/rt5659.c
4847 +@@ -2433,13 +2433,18 @@ static int set_dmic_power(struct snd_soc_dapm_widget *w,
4848 + return 0;
4849 + }
4850 +
4851 +-static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
4852 ++static const struct snd_soc_dapm_widget rt5659_particular_dapm_widgets[] = {
4853 + SND_SOC_DAPM_SUPPLY("LDO2", RT5659_PWR_ANLG_3, RT5659_PWR_LDO2_BIT, 0,
4854 + NULL, 0),
4855 +- SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
4856 +- NULL, 0),
4857 ++ SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
4858 ++ 0, NULL, 0),
4859 + SND_SOC_DAPM_SUPPLY("Mic Det Power", RT5659_PWR_VOL,
4860 + RT5659_PWR_MIC_DET_BIT, 0, NULL, 0),
4861 ++};
4862 ++
4863 ++static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
4864 ++ SND_SOC_DAPM_SUPPLY("PLL", RT5659_PWR_ANLG_3, RT5659_PWR_PLL_BIT, 0,
4865 ++ NULL, 0),
4866 + SND_SOC_DAPM_SUPPLY("Mono Vref", RT5659_PWR_ANLG_1,
4867 + RT5659_PWR_VREF3_BIT, 0, NULL, 0),
4868 +
4869 +@@ -2464,8 +2469,6 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = {
4870 + RT5659_ADC_MONO_R_ASRC_SFT, 0, NULL, 0),
4871 +
4872 + /* Input Side */
4873 +- SND_SOC_DAPM_SUPPLY("MICBIAS1", RT5659_PWR_ANLG_2, RT5659_PWR_MB1_BIT,
4874 +- 0, NULL, 0),
4875 + SND_SOC_DAPM_SUPPLY("MICBIAS2", RT5659_PWR_ANLG_2, RT5659_PWR_MB2_BIT,
4876 + 0, NULL, 0),
4877 + SND_SOC_DAPM_SUPPLY("MICBIAS3", RT5659_PWR_ANLG_2, RT5659_PWR_MB3_BIT,
4878 +@@ -3660,10 +3663,23 @@ static int rt5659_set_bias_level(struct snd_soc_component *component,
4879 +
4880 + static int rt5659_probe(struct snd_soc_component *component)
4881 + {
4882 ++ struct snd_soc_dapm_context *dapm =
4883 ++ snd_soc_component_get_dapm(component);
4884 + struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
4885 +
4886 + rt5659->component = component;
4887 +
4888 ++ switch (rt5659->pdata.jd_src) {
4889 ++ case RT5659_JD_HDA_HEADER:
4890 ++ break;
4891 ++
4892 ++ default:
4893 ++ snd_soc_dapm_new_controls(dapm,
4894 ++ rt5659_particular_dapm_widgets,
4895 ++ ARRAY_SIZE(rt5659_particular_dapm_widgets));
4896 ++ break;
4897 ++ }
4898 ++
4899 + return 0;
4900 + }
4901 +
4902 +diff --git a/sound/soc/codecs/rt5682-sdw.c b/sound/soc/codecs/rt5682-sdw.c
4903 +index 58fb13132602c..aa6c325faeab2 100644
4904 +--- a/sound/soc/codecs/rt5682-sdw.c
4905 ++++ b/sound/soc/codecs/rt5682-sdw.c
4906 +@@ -455,7 +455,8 @@ static int rt5682_io_init(struct device *dev, struct sdw_slave *slave)
4907 +
4908 + regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_2,
4909 + RT5682_EXT_JD_SRC, RT5682_EXT_JD_SRC_MANUAL);
4910 +- regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd042);
4911 ++ regmap_write(rt5682->regmap, RT5682_CBJ_CTRL_1, 0xd142);
4912 ++ regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_5, 0x0700, 0x0600);
4913 + regmap_update_bits(rt5682->regmap, RT5682_CBJ_CTRL_3,
4914 + RT5682_CBJ_IN_BUF_EN, RT5682_CBJ_IN_BUF_EN);
4915 + regmap_update_bits(rt5682->regmap, RT5682_SAR_IL_CMD_1,
4916 +diff --git a/sound/soc/codecs/tas2562.h b/sound/soc/codecs/tas2562.h
4917 +index 81866aeb3fbfa..55b2a1f52ca37 100644
4918 +--- a/sound/soc/codecs/tas2562.h
4919 ++++ b/sound/soc/codecs/tas2562.h
4920 +@@ -57,13 +57,13 @@
4921 + #define TAS2562_TDM_CFG0_RAMPRATE_MASK BIT(5)
4922 + #define TAS2562_TDM_CFG0_RAMPRATE_44_1 BIT(5)
4923 + #define TAS2562_TDM_CFG0_SAMPRATE_MASK GENMASK(3, 1)
4924 +-#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ 0x0
4925 +-#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ 0x1
4926 +-#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ 0x2
4927 +-#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ 0x3
4928 +-#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ 0x4
4929 +-#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ 0x5
4930 +-#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ 0x6
4931 ++#define TAS2562_TDM_CFG0_SAMPRATE_7305_8KHZ (0x0 << 1)
4932 ++#define TAS2562_TDM_CFG0_SAMPRATE_14_7_16KHZ (0x1 << 1)
4933 ++#define TAS2562_TDM_CFG0_SAMPRATE_22_05_24KHZ (0x2 << 1)
4934 ++#define TAS2562_TDM_CFG0_SAMPRATE_29_4_32KHZ (0x3 << 1)
4935 ++#define TAS2562_TDM_CFG0_SAMPRATE_44_1_48KHZ (0x4 << 1)
4936 ++#define TAS2562_TDM_CFG0_SAMPRATE_88_2_96KHZ (0x5 << 1)
4937 ++#define TAS2562_TDM_CFG0_SAMPRATE_176_4_192KHZ (0x6 << 1)
4938 +
4939 + #define TAS2562_TDM_CFG2_RIGHT_JUSTIFY BIT(6)
4940 +
4941 +diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
4942 +index a2dd3b6b7fec1..7cd14d6b9436a 100644
4943 +--- a/sound/soc/fsl/fsl-asoc-card.c
4944 ++++ b/sound/soc/fsl/fsl-asoc-card.c
4945 +@@ -720,6 +720,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
4946 + /* Initialize sound card */
4947 + priv->pdev = pdev;
4948 + priv->card.dev = &pdev->dev;
4949 ++ priv->card.owner = THIS_MODULE;
4950 + ret = snd_soc_of_parse_card_name(&priv->card, "model");
4951 + if (ret) {
4952 + snprintf(priv->name, sizeof(priv->name), "%s-audio",
4953 +diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
4954 +index 7a30a12519a70..e620a62ef534f 100644
4955 +--- a/sound/soc/qcom/lpass-cpu.c
4956 ++++ b/sound/soc/qcom/lpass-cpu.c
4957 +@@ -93,8 +93,30 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream,
4958 + struct snd_soc_dai *dai)
4959 + {
4960 + struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
4961 ++ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
4962 ++ unsigned int id = dai->driver->id;
4963 +
4964 + clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]);
4965 ++ /*
4966 ++ * Ensure LRCLK is disabled even in device node validation.
4967 ++ * Will not impact if disabled in lpass_cpu_daiops_trigger()
4968 ++ * suspend.
4969 ++ */
4970 ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
4971 ++ regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_DISABLE);
4972 ++ else
4973 ++ regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_DISABLE);
4974 ++
4975 ++ /*
4976 ++ * BCLK may not be enabled if lpass_cpu_daiops_prepare is called before
4977 ++ * lpass_cpu_daiops_shutdown. It's paired with the clk_enable in
4978 ++ * lpass_cpu_daiops_prepare.
4979 ++ */
4980 ++ if (drvdata->mi2s_was_prepared[dai->driver->id]) {
4981 ++ drvdata->mi2s_was_prepared[dai->driver->id] = false;
4982 ++ clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]);
4983 ++ }
4984 ++
4985 + clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]);
4986 + }
4987 +
4988 +@@ -275,6 +297,18 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
4989 + case SNDRV_PCM_TRIGGER_START:
4990 + case SNDRV_PCM_TRIGGER_RESUME:
4991 + case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
4992 ++ /*
4993 ++ * Ensure lpass BCLK/LRCLK is enabled during
4994 ++ * device resume as lpass_cpu_daiops_prepare() is not called
4995 ++ * after the device resumes. We don't check mi2s_was_prepared before
4996 ++ * enable/disable BCLK in trigger events because:
4997 ++ * 1. These trigger events are paired, so the BCLK
4998 ++ * enable_count is balanced.
4999 ++ * 2. the BCLK can be shared (ex: headset and headset mic),
5000 ++ * we need to increase the enable_count so that we don't
5001 ++ * turn off the shared BCLK while other devices are using
5002 ++ * it.
5003 ++ */
5004 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5005 + ret = regmap_fields_write(i2sctl->spken, id,
5006 + LPAIF_I2SCTL_SPKEN_ENABLE);
5007 +@@ -296,6 +330,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
5008 + case SNDRV_PCM_TRIGGER_STOP:
5009 + case SNDRV_PCM_TRIGGER_SUSPEND:
5010 + case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
5011 ++ /*
5012 ++ * To ensure lpass BCLK/LRCLK is disabled during
5013 ++ * device suspend.
5014 ++ */
5015 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
5016 + ret = regmap_fields_write(i2sctl->spken, id,
5017 + LPAIF_I2SCTL_SPKEN_DISABLE);
5018 +@@ -315,12 +353,53 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
5019 + return ret;
5020 + }
5021 +
5022 ++static int lpass_cpu_daiops_prepare(struct snd_pcm_substream *substream,
5023 ++ struct snd_soc_dai *dai)
5024 ++{
5025 ++ struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai);
5026 ++ struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
5027 ++ unsigned int id = dai->driver->id;
5028 ++ int ret;
5029 ++
5030 ++ /*
5031 ++ * Ensure lpass BCLK/LRCLK is enabled bit before playback/capture
5032 ++ * data flow starts. This allows other codec to have some delay before
5033 ++ * the data flow.
5034 ++ * (ex: to drop start up pop noise before capture starts).
5035 ++ */
5036 ++ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
5037 ++ ret = regmap_fields_write(i2sctl->spken, id, LPAIF_I2SCTL_SPKEN_ENABLE);
5038 ++ else
5039 ++ ret = regmap_fields_write(i2sctl->micen, id, LPAIF_I2SCTL_MICEN_ENABLE);
5040 ++
5041 ++ if (ret) {
5042 ++ dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret);
5043 ++ return ret;
5044 ++ }
5045 ++
5046 ++ /*
5047 ++ * Check mi2s_was_prepared before enabling BCLK as lpass_cpu_daiops_prepare can
5048 ++ * be called multiple times. It's paired with the clk_disable in
5049 ++ * lpass_cpu_daiops_shutdown.
5050 ++ */
5051 ++ if (!drvdata->mi2s_was_prepared[dai->driver->id]) {
5052 ++ ret = clk_enable(drvdata->mi2s_bit_clk[id]);
5053 ++ if (ret) {
5054 ++ dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret);
5055 ++ return ret;
5056 ++ }
5057 ++ drvdata->mi2s_was_prepared[dai->driver->id] = true;
5058 ++ }
5059 ++ return 0;
5060 ++}
5061 ++
5062 + const struct snd_soc_dai_ops asoc_qcom_lpass_cpu_dai_ops = {
5063 + .set_sysclk = lpass_cpu_daiops_set_sysclk,
5064 + .startup = lpass_cpu_daiops_startup,
5065 + .shutdown = lpass_cpu_daiops_shutdown,
5066 + .hw_params = lpass_cpu_daiops_hw_params,
5067 + .trigger = lpass_cpu_daiops_trigger,
5068 ++ .prepare = lpass_cpu_daiops_prepare,
5069 + };
5070 + EXPORT_SYMBOL_GPL(asoc_qcom_lpass_cpu_dai_ops);
5071 +
5072 +diff --git a/sound/soc/qcom/lpass.h b/sound/soc/qcom/lpass.h
5073 +index 1d926dd5f5900..0484ad39b3dce 100644
5074 +--- a/sound/soc/qcom/lpass.h
5075 ++++ b/sound/soc/qcom/lpass.h
5076 +@@ -67,6 +67,10 @@ struct lpass_data {
5077 + /* MI2S SD lines to use for playback/capture */
5078 + unsigned int mi2s_playback_sd_mode[LPASS_MAX_MI2S_PORTS];
5079 + unsigned int mi2s_capture_sd_mode[LPASS_MAX_MI2S_PORTS];
5080 ++
5081 ++ /* The state of MI2S prepare dai_ops was called */
5082 ++ bool mi2s_was_prepared[LPASS_MAX_MI2S_PORTS];
5083 ++
5084 + int hdmi_port_enable;
5085 +
5086 + /* low-power audio interface (LPAIF) registers */
5087 +diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h
5088 +index 7d6687618d808..d1b327036ae43 100644
5089 +--- a/tools/include/uapi/linux/in.h
5090 ++++ b/tools/include/uapi/linux/in.h
5091 +@@ -289,6 +289,9 @@ struct sockaddr_in {
5092 + /* Address indicating an error return. */
5093 + #define INADDR_NONE ((unsigned long int) 0xffffffff)
5094 +
5095 ++/* Dummy address for src of ICMP replies if no real address is set (RFC7600). */
5096 ++#define INADDR_DUMMY ((unsigned long int) 0xc0000008)
5097 ++
5098 + /* Network number for local host loopback. */
5099 + #define IN_LOOPBACKNET 127
5100 +
5101 +diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
5102 +index 7150e34cf2afb..3028f932e10c0 100644
5103 +--- a/tools/lib/bpf/xsk.c
5104 ++++ b/tools/lib/bpf/xsk.c
5105 +@@ -779,7 +779,7 @@ int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
5106 + goto out_put_ctx;
5107 + }
5108 + if (xsk->fd == umem->fd)
5109 +- umem->rx_ring_setup_done = true;
5110 ++ umem->tx_ring_setup_done = true;
5111 + }
5112 +
5113 + err = xsk_get_mmap_offsets(xsk->fd, &off);
5114 +diff --git a/tools/perf/trace/beauty/include/linux/socket.h b/tools/perf/trace/beauty/include/linux/socket.h
5115 +index e9cb30d8cbfb1..9aa530d497da8 100644
5116 +--- a/tools/perf/trace/beauty/include/linux/socket.h
5117 ++++ b/tools/perf/trace/beauty/include/linux/socket.h
5118 +@@ -437,6 +437,4 @@ extern int __sys_getpeername(int fd, struct sockaddr __user *usockaddr,
5119 + extern int __sys_socketpair(int family, int type, int protocol,
5120 + int __user *usockvec);
5121 + extern int __sys_shutdown(int fd, int how);
5122 +-
5123 +-extern struct ns_common *get_net_ns(struct ns_common *ns);
5124 + #endif /* _LINUX_SOCKET_H */
5125 +diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
5126 +index 2b5707738609e..6fad54c7ecb4a 100755
5127 +--- a/tools/testing/selftests/net/fib_tests.sh
5128 ++++ b/tools/testing/selftests/net/fib_tests.sh
5129 +@@ -1384,12 +1384,37 @@ ipv4_rt_replace()
5130 + ipv4_rt_replace_mpath
5131 + }
5132 +
5133 ++# checks that cached input route on VRF port is deleted
5134 ++# when VRF is deleted
5135 ++ipv4_local_rt_cache()
5136 ++{
5137 ++ run_cmd "ip addr add 10.0.0.1/32 dev lo"
5138 ++ run_cmd "ip netns add test-ns"
5139 ++ run_cmd "ip link add veth-outside type veth peer name veth-inside"
5140 ++ run_cmd "ip link add vrf-100 type vrf table 1100"
5141 ++ run_cmd "ip link set veth-outside master vrf-100"
5142 ++ run_cmd "ip link set veth-inside netns test-ns"
5143 ++ run_cmd "ip link set veth-outside up"
5144 ++ run_cmd "ip link set vrf-100 up"
5145 ++ run_cmd "ip route add 10.1.1.1/32 dev veth-outside table 1100"
5146 ++ run_cmd "ip netns exec test-ns ip link set veth-inside up"
5147 ++ run_cmd "ip netns exec test-ns ip addr add 10.1.1.1/32 dev veth-inside"
5148 ++ run_cmd "ip netns exec test-ns ip route add 10.0.0.1/32 dev veth-inside"
5149 ++ run_cmd "ip netns exec test-ns ip route add default via 10.0.0.1"
5150 ++ run_cmd "ip netns exec test-ns ping 10.0.0.1 -c 1 -i 1"
5151 ++ run_cmd "ip link delete vrf-100"
5152 ++
5153 ++ # if we do not hang test is a success
5154 ++ log_test $? 0 "Cached route removed from VRF port device"
5155 ++}
5156 ++
5157 + ipv4_route_test()
5158 + {
5159 + route_setup
5160 +
5161 + ipv4_rt_add
5162 + ipv4_rt_replace
5163 ++ ipv4_local_rt_cache
5164 +
5165 + route_cleanup
5166 + }
5167 +diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
5168 +index e927df83efb91..987a914ee0df2 100755
5169 +--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
5170 ++++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
5171 +@@ -195,9 +195,6 @@ ip -net "$ns4" link set ns4eth3 up
5172 + ip -net "$ns4" route add default via 10.0.3.2
5173 + ip -net "$ns4" route add default via dead:beef:3::2
5174 +
5175 +-# use TCP syn cookies, even if no flooding was detected.
5176 +-ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
5177 +-
5178 + set_ethtool_flags() {
5179 + local ns="$1"
5180 + local dev="$2"
5181 +@@ -666,6 +663,14 @@ for sender in $ns1 $ns2 $ns3 $ns4;do
5182 + exit $ret
5183 + fi
5184 +
5185 ++ # ns1<->ns2 is not subject to reordering/tc delays. Use it to test
5186 ++ # mptcp syncookie support.
5187 ++ if [ $sender = $ns1 ]; then
5188 ++ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=2
5189 ++ else
5190 ++ ip netns exec "$ns2" sysctl -q net.ipv4.tcp_syncookies=1
5191 ++ fi
5192 ++
5193 + run_tests "$ns2" $sender 10.0.1.2
5194 + run_tests "$ns2" $sender dead:beef:1::2
5195 + run_tests "$ns2" $sender 10.0.2.1